From f221132d02bb040ecbb2cb499319484456e5771b Mon Sep 17 00:00:00 2001 From: Nathan Baltzell Date: Tue, 12 Dec 2023 18:51:12 -0500 Subject: [PATCH 01/26] run 2to3 --- python/ccdb/__init__.py | 8 +- python/ccdb/authentication.py | 2 +- python/ccdb/cmd/console_context.py | 12 +- python/ccdb/cmd/console_util.py | 6 +- python/ccdb/cmd/utils/add.py | 534 ++++++------- python/ccdb/cmd/utils/cat.py | 1038 ++++++++++++------------- python/ccdb/cmd/utils/dump.py | 152 ++-- python/ccdb/cmd/utils/help.py | 18 +- python/ccdb/cmd/utils/info.py | 484 ++++++------ python/ccdb/cmd/utils/log.py | 252 +++--- python/ccdb/cmd/utils/ls.py | 12 +- python/ccdb/cmd/utils/mktbl.py | 90 +-- python/ccdb/cmd/utils/mkvar.py | 166 ++-- python/ccdb/cmd/utils/pwd.py | 68 +- python/ccdb/cmd/utils/rm.py | 364 ++++----- python/ccdb/cmd/utils/run.py | 130 ++-- python/ccdb/cmd/utils/usage.py | 64 +- python/ccdb/cmd/utils/user.py | 10 +- python/ccdb/cmd/utils/var.py | 134 ++-- python/ccdb/cmd/utils/vers.py | 234 +++--- python/ccdb/model.py | 1136 ++++++++++++++-------------- python/ccdb/provider.py | 14 +- python/ccdb/table_file.py | 342 ++++----- 23 files changed, 2635 insertions(+), 2635 deletions(-) diff --git a/python/ccdb/__init__.py b/python/ccdb/__init__.py index 4282f5be..5210d57d 100644 --- a/python/ccdb/__init__.py +++ b/python/ccdb/__init__.py @@ -45,7 +45,7 @@ def get_ccdb_home_path(): def init_ccdb_console(): from .cmd import ConsoleContext - import cmd.colorama + from . import cmd.colorama # TODO move ccdb to pure logging. NO print command at all @@ -97,12 +97,12 @@ def init_ccdb_console(): context.connection_string = "mysql://ccdb_user@localhost/ccdb" # connection string - if "CCDB_CONNECTION" in os.environ.keys(): + if "CCDB_CONNECTION" in list(os.environ.keys()): context.connection_string = os.environ["CCDB_CONNECTION"] logger.debug("Set connection string from $CCDB_CONNECTION :" + context.connection_string) else: # fallback to jana calib url - if "JANA_CALIB_URL" in os.environ.keys(): + if "JANA_CALIB_URL" in list(os.environ.keys()): jana_url = os.environ["JANA_CALIB_URL"] logger.debug("$CCDB_CONNECTION was not found. Found JANA_CALIB_URL ('"+jana_url+"'). Try use it") @@ -115,7 +115,7 @@ def init_ccdb_console(): # connection string in in command line arguments ( by -c or --connection) is processed by context.process(sys.argv) - if "CCDB_USER" in os.environ.keys(): + if "CCDB_USER" in list(os.environ.keys()): context.user_name = os.environ["CCDB_USER"] logger.debug("Set user name from $CCDB_USER :" + context.user_name) # elif "USER" in os.environ.keys(): diff --git a/python/ccdb/authentication.py b/python/ccdb/authentication.py index 11ee1653..b5621cae 100644 --- a/python/ccdb/authentication.py +++ b/python/ccdb/authentication.py @@ -86,7 +86,7 @@ def current_user_name(self): @current_user_name.setter def current_user_name(self, username): try: - assert isinstance(username, basestring) + assert isinstance(username, str) except NameError: assert isinstance(username, str) self._username = username diff --git a/python/ccdb/cmd/console_context.py b/python/ccdb/cmd/console_context.py index c85bc958..2496d720 100644 --- a/python/ccdb/cmd/console_context.py +++ b/python/ccdb/cmd/console_context.py @@ -1,4 +1,4 @@ -from __future__ import print_function + import os import re import imp @@ -136,7 +136,7 @@ def theme(self, value): :type value: themes.NoColorTheme """ assert (isinstance(value, themes.NoColorTheme)) - for key in self._utils.keys(): + for key in list(self._utils.keys()): self._utils[key].theme = value log.debug(lfm(" |- theme(value) {0} | \\{0} | |- theme switched to : '{1}'", os.linesep, value)) self._theme = value @@ -178,7 +178,7 @@ def register_utilities(self, path=""): log.debug("{0:<10} {1:<15} {2}:".format("(command)", "(name)", "(description)")) log.debug("\n".join(["{0:<10} {1:<15} {2}:".format(command, util.name, util.short_descr) for command, util - in self._utils.items()])) + in list(self._utils.items())])) # -------------------------------- # search_utils @@ -197,7 +197,7 @@ def search_utils(self, path): # get list of files and module names files = os.listdir(path) test = re.compile(".py$", re.IGNORECASE) - files = filter(test.search, files) + files = list(filter(test.search, files)) filenameToModuleName = lambda f: os.path.splitext(f)[0] moduleNames = sorted(map(filenameToModuleName, files)) log.debug(lfm(" |- found '{0}' modules.{1} |- proceed loading each module:{1}", len(moduleNames), os.linesep)) @@ -442,7 +442,7 @@ def check_connection(self, util): def interactive_loop(self): self.print_interactive_intro() # initialise autocomplete - self.words = self._utils.keys() + self.words = list(self._utils.keys()) # completer = Completer(words) colorama.deinit() # make colorama to release stderr and stdout readline.parse_and_bind("tab: complete") @@ -466,7 +466,7 @@ def interactive_loop(self): # read command from user try: - user_input = raw_input(self.current_path + "> ") + user_input = input(self.current_path + "> ") except EOFError: log.debug("EOF sequence received. Ending interactive loop") break diff --git a/python/ccdb/cmd/console_util.py b/python/ccdb/cmd/console_util.py index 42eda04b..34b1d0cb 100644 --- a/python/ccdb/cmd/console_util.py +++ b/python/ccdb/cmd/console_util.py @@ -1,4 +1,4 @@ -from __future__ import print_function + from .console_context import ConsoleContext from .themes import NoColorTheme @@ -43,10 +43,10 @@ def __init__(self): def read_multiline(self): user_input = [] - entry = raw_input("Enter comment text, put 'EOF' on its own line to quit: \n") + entry = input("Enter comment text, put 'EOF' on its own line to quit: \n") while entry != "EOF": user_input.append(entry) - entry = raw_input("") + entry = input("") return user_input diff --git a/python/ccdb/cmd/utils/add.py b/python/ccdb/cmd/utils/add.py index 5d7ac8a1..8332fb33 100644 --- a/python/ccdb/cmd/utils/add.py +++ b/python/ccdb/cmd/utils/add.py @@ -1,267 +1,267 @@ -import os -import logging - -import ccdb -from ccdb import TextFileDOM -from ccdb import AlchemyProvider -from ccdb.cmd import ConsoleUtilBase -from ccdb import BraceMessage as LogFmt - -log = logging.getLogger("ccdb.cmd.utils.add") - - -# ccdbcmd module interface -def create_util_instance(): - """ - This function is a module interface - - :return: new AddData util - :rtype: AddData - """ - log.debug(" registering AddData") - return AddData() - - -# ********************************************************************* -# Class AddData - Add data constants * -# * -# ********************************************************************* -class AddData(ConsoleUtilBase): - """ Add data constants according given type table""" - - # ccdb utility class descr part - # ------------------------------ - command = "add" - name = "AddData" - short_descr = "Add data constants" - uses_db = True - - def __init(self): - self.reset() - - # ---------------------------------------- - # process - # ---------------------------------------- - def reset(self): - """ - Resets for new command - :return: None - """ - - # set arguments to default - self.raw_table_path = "" - self.table_path = "" - self.raw_file_path = "" - self.file_path = "" - self.run_min = 0 - self.run_max = ccdb.INFINITE_RUN - if self.context is None: - self.variation = "default" - else: - self.variation = self.context.current_variation - self.comment = "" - self.is_namevalue_format = False - self.no_comments = False - self.c_comments = False # file has '//'-style comments - self.raw_entry = "/" # object path with possible pattern, like /mole/* - self.path = "/" # parent path - - # ---------------------------------------- - # process - # ---------------------------------------- - def process(self, args): - if log.isEnabledFor(logging.DEBUG): - log.debug(LogFmt("{0}AddData is in charge{0}\\".format(os.linesep))) - log.debug(LogFmt(" |- arguments : '" + "' '".join(args) + "'")) - - self.reset() - - provider = self.context.provider - assert isinstance(provider, AlchemyProvider) - - # process arguments - if not self.process_arguments(args): - log.debug(LogFmt(" |- process arguments {0}{1}{2}", self.theme.Fail, "failed", self.theme.Reset)) - raise ValueError("Problem parsing arguments") - - # by "" user means default variation - # self.variation = "default" if not bool(self.variation) else self.variation - # TODO commented as self.variation is set in self.reset() need to be tested - - # validate what we've got - if not self.validate(): - log.debug(LogFmt(" |- arguments validation {0}{1}{2}", self.theme.Fail, "failed", self.theme.Reset)) - raise ValueError("Arguments validation failed") - - # correct paths - self.table_path = self.context.prepare_path(self.raw_table_path) - self.file_path = self.raw_file_path - - # reading file - try: - if not self.is_namevalue_format: - dom = ccdb.read_ccdb_text_file(self.file_path) - else: - dom = ccdb.read_namevalue_text_file(self.file_path, self.c_comments) - except IOError as error: - log.warning(LogFmt("Unable to read file '{0}'. The error message is: '{1}'", self.file_path, error)) - raise - - # check what we've got - assert isinstance(dom, TextFileDOM) - if not dom.data_is_consistent: - message = "Inconsistency error. " + dom.inconsistent_reason - log.warning(message) - raise ValueError(message=message) - - if len(dom.comment_lines): - self.comment += "\n".join(dom.comment_lines) - - # >oO debug record - log.debug(" |- adding constants") - log.debug(LogFmt(" |- columns: '{0}' rows: '{1}' comment lines: '{2}' metas: '{3}'", - len(dom.rows[0]), len(dom.rows), len(dom.comment_lines), len(dom.metas))) - - try: - table = provider.get_type_table(self.table_path) - except Exception as ex: - if 'No table found by exact path' in ex.message: # TODO replace with good exception type - # it is safe to use len(dom.rows[0]) because dom.data_is_consistant checked that - print(self._get_notable_instruction(self.table_path, len(dom.rows[0]), len(dom.rows))) - - # try to create - assignment = provider.create_assignment(dom, - self.table_path, - self.run_min, - self.run_max, - self.variation, - self.comment) - log.info(assignment.request) - return 0 - - # ---------------------------------------- - - # process_arguments - # ---------------------------------------- - def process_arguments(self, args): - - # parse loop - i = 0 - - while i < len(args): - token = args[i].strip() - i += 1 - if token.startswith('-'): - # it is some command, lets parse what is the command - - # variation - if token == "-v" or token.startswith("--variation"): - if i < len(args): - self.variation = args[i] - i += 1 - - # runrange - if token == "-r" or token == "--runrange": - result = self.context.parse_run_range(args[i]) - i += 1 - if not result: - log.warning("Run range should be in form of: min-max, or min- , or -max") - return False - - # there is a result - (self.run_min, self.run_max, run_min_set, run_max_set) = result - - # check how the bounds were set - if not run_min_set: - log.warning("Min run bound was set as 0 by default. ") - if not run_max_set: - log.warning("Max run bound was set as INFINITE_RUN by default. ") - - # file - if token == "-f" or token == "--file": - self.raw_entry = args[i] - self.object_type = "directory" - i += 1 - - # skip comments 'no-comments' value - if token == "-n" or token == "--no-comments": - self.no_comments = True - - # name-value file mode - if token == "--name-value": - self.is_namevalue_format = True - - # c style comments - if token == "--c-comments": - self.c_comments = True - - else: - if token.startswith("#"): - # everething next are comments - self.comment += " ".join(args[i - 1:]) - self.comment.strip() - self.comment = self.comment[1:] - break # break the loop since everything further are comments - - # it probably must be a type table path - if self.raw_table_path == "": - self.raw_table_path = token - elif self.raw_file_path == "": - self.raw_file_path = token - - return True - - # ---------------------------------------- - - # validate - # ---------------------------------------- - def validate(self): - if not self.raw_file_path or not self.raw_table_path: - return False - return True - - # ---------------------------------------- - - # print instruction if no table found - # ---------------------------------------- - def _get_notable_instruction(self, path, cols, rows): - """print instruction if no table found""" - msg = """ -There is no table with path '{0}' found in the database. -A table should be created prior adding the constants (see help mktbl) -The command to create the table for the data is: - -mktbl {0} -c {1} -r {2} # -""".format(path, cols, rows) - return msg - - # ---------------------------------------- - # print_help - # ---------------------------------------- - def print_help(self): - """Prints help of the command""" - - print ("""Add data constants according given type table - add -v -r - file_to_import - -Required parameters: - - must be /absolute/path/ in command line mode - might be also relative/path in interactive mode - - - variation name - - - - run range. - if one inputs '-' this means - - if one inputs '-' this means <0>- - if one omits runrange at all. The data will be put as - - file_to_import - file to import. It should be ccdb file format (see documentation or file format section) - if file format is column of names and column of values add --name-value flag - -Additional flags: - - --name-value - indicates that the input file is in name-value format (column of names and column of values) - -n or --no-comments - do not add all "#..." comments that is found in file to ccdb database - --c-comments - for files that contains '//' - C style comments. The add replaces simply // to #. - - """) +import os +import logging + +import ccdb +from ccdb import TextFileDOM +from ccdb import AlchemyProvider +from ccdb.cmd import ConsoleUtilBase +from ccdb import BraceMessage as LogFmt + +log = logging.getLogger("ccdb.cmd.utils.add") + + +# ccdbcmd module interface +def create_util_instance(): + """ + This function is a module interface + + :return: new AddData util + :rtype: AddData + """ + log.debug(" registering AddData") + return AddData() + + +# ********************************************************************* +# Class AddData - Add data constants * +# * +# ********************************************************************* +class AddData(ConsoleUtilBase): + """ Add data constants according given type table""" + + # ccdb utility class descr part + # ------------------------------ + command = "add" + name = "AddData" + short_descr = "Add data constants" + uses_db = True + + def __init(self): + self.reset() + + # ---------------------------------------- + # process + # ---------------------------------------- + def reset(self): + """ + Resets for new command + :return: None + """ + + # set arguments to default + self.raw_table_path = "" + self.table_path = "" + self.raw_file_path = "" + self.file_path = "" + self.run_min = 0 + self.run_max = ccdb.INFINITE_RUN + if self.context is None: + self.variation = "default" + else: + self.variation = self.context.current_variation + self.comment = "" + self.is_namevalue_format = False + self.no_comments = False + self.c_comments = False # file has '//'-style comments + self.raw_entry = "/" # object path with possible pattern, like /mole/* + self.path = "/" # parent path + + # ---------------------------------------- + # process + # ---------------------------------------- + def process(self, args): + if log.isEnabledFor(logging.DEBUG): + log.debug(LogFmt("{0}AddData is in charge{0}\\".format(os.linesep))) + log.debug(LogFmt(" |- arguments : '" + "' '".join(args) + "'")) + + self.reset() + + provider = self.context.provider + assert isinstance(provider, AlchemyProvider) + + # process arguments + if not self.process_arguments(args): + log.debug(LogFmt(" |- process arguments {0}{1}{2}", self.theme.Fail, "failed", self.theme.Reset)) + raise ValueError("Problem parsing arguments") + + # by "" user means default variation + # self.variation = "default" if not bool(self.variation) else self.variation + # TODO commented as self.variation is set in self.reset() need to be tested + + # validate what we've got + if not self.validate(): + log.debug(LogFmt(" |- arguments validation {0}{1}{2}", self.theme.Fail, "failed", self.theme.Reset)) + raise ValueError("Arguments validation failed") + + # correct paths + self.table_path = self.context.prepare_path(self.raw_table_path) + self.file_path = self.raw_file_path + + # reading file + try: + if not self.is_namevalue_format: + dom = ccdb.read_ccdb_text_file(self.file_path) + else: + dom = ccdb.read_namevalue_text_file(self.file_path, self.c_comments) + except IOError as error: + log.warning(LogFmt("Unable to read file '{0}'. The error message is: '{1}'", self.file_path, error)) + raise + + # check what we've got + assert isinstance(dom, TextFileDOM) + if not dom.data_is_consistent: + message = "Inconsistency error. " + dom.inconsistent_reason + log.warning(message) + raise ValueError(message=message) + + if len(dom.comment_lines): + self.comment += "\n".join(dom.comment_lines) + + # >oO debug record + log.debug(" |- adding constants") + log.debug(LogFmt(" |- columns: '{0}' rows: '{1}' comment lines: '{2}' metas: '{3}'", + len(dom.rows[0]), len(dom.rows), len(dom.comment_lines), len(dom.metas))) + + try: + table = provider.get_type_table(self.table_path) + except Exception as ex: + if 'No table found by exact path' in ex.message: # TODO replace with good exception type + # it is safe to use len(dom.rows[0]) because dom.data_is_consistant checked that + print((self._get_notable_instruction(self.table_path, len(dom.rows[0]), len(dom.rows)))) + + # try to create + assignment = provider.create_assignment(dom, + self.table_path, + self.run_min, + self.run_max, + self.variation, + self.comment) + log.info(assignment.request) + return 0 + + # ---------------------------------------- + + # process_arguments + # ---------------------------------------- + def process_arguments(self, args): + + # parse loop + i = 0 + + while i < len(args): + token = args[i].strip() + i += 1 + if token.startswith('-'): + # it is some command, lets parse what is the command + + # variation + if token == "-v" or token.startswith("--variation"): + if i < len(args): + self.variation = args[i] + i += 1 + + # runrange + if token == "-r" or token == "--runrange": + result = self.context.parse_run_range(args[i]) + i += 1 + if not result: + log.warning("Run range should be in form of: min-max, or min- , or -max") + return False + + # there is a result + (self.run_min, self.run_max, run_min_set, run_max_set) = result + + # check how the bounds were set + if not run_min_set: + log.warning("Min run bound was set as 0 by default. ") + if not run_max_set: + log.warning("Max run bound was set as INFINITE_RUN by default. ") + + # file + if token == "-f" or token == "--file": + self.raw_entry = args[i] + self.object_type = "directory" + i += 1 + + # skip comments 'no-comments' value + if token == "-n" or token == "--no-comments": + self.no_comments = True + + # name-value file mode + if token == "--name-value": + self.is_namevalue_format = True + + # c style comments + if token == "--c-comments": + self.c_comments = True + + else: + if token.startswith("#"): + # everething next are comments + self.comment += " ".join(args[i - 1:]) + self.comment.strip() + self.comment = self.comment[1:] + break # break the loop since everything further are comments + + # it probably must be a type table path + if self.raw_table_path == "": + self.raw_table_path = token + elif self.raw_file_path == "": + self.raw_file_path = token + + return True + + # ---------------------------------------- + + # validate + # ---------------------------------------- + def validate(self): + if not self.raw_file_path or not self.raw_table_path: + return False + return True + + # ---------------------------------------- + + # print instruction if no table found + # ---------------------------------------- + def _get_notable_instruction(self, path, cols, rows): + """print instruction if no table found""" + msg = """ +There is no table with path '{0}' found in the database. +A table should be created prior adding the constants (see help mktbl) +The command to create the table for the data is: + +mktbl {0} -c {1} -r {2} # +""".format(path, cols, rows) + return msg + + # ---------------------------------------- + # print_help + # ---------------------------------------- + def print_help(self): + """Prints help of the command""" + + print ("""Add data constants according given type table + add -v -r - file_to_import + +Required parameters: + - must be /absolute/path/ in command line mode + might be also relative/path in interactive mode + + - variation name + + - - run range. + if one inputs '-' this means - + if one inputs '-' this means <0>- + if one omits runrange at all. The data will be put as + + file_to_import - file to import. It should be ccdb file format (see documentation or file format section) + if file format is column of names and column of values add --name-value flag + +Additional flags: + + --name-value - indicates that the input file is in name-value format (column of names and column of values) + -n or --no-comments - do not add all "#..." comments that is found in file to ccdb database + --c-comments - for files that contains '//' - C style comments. The add replaces simply // to #. + + """) diff --git a/python/ccdb/cmd/utils/cat.py b/python/ccdb/cmd/utils/cat.py index ef5d648f..4549c222 100644 --- a/python/ccdb/cmd/utils/cat.py +++ b/python/ccdb/cmd/utils/cat.py @@ -1,519 +1,519 @@ -import logging -import sys -import os - -from ccdb import TypeTable, Assignment -from ccdb import AlchemyProvider -from ccdb.cmd import ConsoleUtilBase -from ccdb.path_utils import ParseRequestResult, parse_request -from ccdb import BraceMessage as Lfm # lfm is aka log format message. See BraceMessage desc about -from sqlalchemy.orm.exc import NoResultFound - -log = logging.getLogger("ccdb.cmd.utils.cat") - - -# ccdb cmd module interface -def create_util_instance(): - log.debug(Lfm(" registering Cat")) - return Cat() - - -# ********************************************************************* -# Class Cat - Show assignment data by ID * -# ********************************************************************* -class Cat(ConsoleUtilBase): - """Show assignment data by ID""" - - # ccdb utility class descr part - # ------------------------------ - command = "cat" - name = "Cat" - short_descr = "Show assignment data by ID" - uses_db = True - - # specific values - show_borders = True - show_header = True - show_comments = False - show_date = False - - def __init__(self): - ConsoleUtilBase.__init__(self) - self.raw_entry = "/" # object path with possible pattern, like /mole/* - self.path = "/" # parent path - self.raw_table_path = "" - self.use_ass_id = False - self.ass_id = 0 - self.print_horizontal = True - self.user_request_print_horizontal = False - self.user_request_print_vertical = False - self.request = ParseRequestResult() - - # ---------------------------------------- - # process - # ---------------------------------------- - def process(self, args): - """ - Process this command - :param args: - :return: 0 if command was successful, value!=0 means command was not successful - :rtype: int - """ - if log.isEnabledFor(logging.DEBUG): - log.debug(Lfm("{0}Cat command is in charge {0}\\", os.linesep)) - log.debug(Lfm(" |- arguments : '" + "' '".join(args) + "'")) - - assert self.context is not None - - # reset arguments on each process - self.raw_table_path = "" - self.show_borders = True - self.show_header = True - self.show_comments = False - self.show_date = False - self.request = ParseRequestResult() - self.ass_id = 0 - self.user_request_print_horizontal = False - self.user_request_print_vertical = False - - if not len(args): - print ("Please provide ID for assignment. Use 'help cat' to get more information") - return 1 - - if not self.process_arguments(args): - return 1 - - if self.use_ass_id: - assignment = self.get_assignment_by_id(self.ass_id) - else: - assignment = self.get_assignment_by_request(self.request) - - if assignment: - # now we have to know, how to print an assignment - data = assignment.constant_set.data_table - - if len(data) and len(data[0]): - if self.user_request_print_horizontal: - self.print_assignment_horizontal(assignment, self.show_header, self.show_borders, - self.show_comments) - elif self.user_request_print_vertical: - self.print_assignment_vertical(assignment, self.show_header, self.show_borders, self.show_comments) - else: - if len(data) == 1 and len(data[0]) > 3: - self.print_assignment_vertical(assignment, self.show_header, self.show_borders, - self.show_comments) - else: - self.print_assignment_horizontal(assignment, self.show_header, self.show_borders, - self.show_comments) - else: - log.warning("Assignment contains no data") - else: - print ("Cannot fill data for assignment with this ID") - return 1 - - return 0 - - # ---------------------------------------- - # gets assignment by database id - # ---------------------------------------- - def get_assignment_by_id(self, assignment_id): - """gets assignment by database id""" - - provider = self.context.provider - assert isinstance(provider, AlchemyProvider) - return self.context.provider.get_assignment_by_id(assignment_id) - - # ---------------------------------------- - # gets assignment by parsed request - # ---------------------------------------- - def get_assignment_by_request(self, request): - """gets assignment by parsed request - @param request: Parsed request - @type request: ParseRequestResult - """ - return self._get_assignment_by_request(request) - - # ---------------------------------------- - # gets assignment by parsed request - # ---------------------------------------- - def _get_assignment_by_request(self, request): - - provider = self.context.provider - assert isinstance(provider, AlchemyProvider) - assert isinstance(request, ParseRequestResult) - - if not request.variation_is_parsed: - request.variation = self.context.current_variation - - if not request.run_is_parsed: - request.run = self.context.current_run - - # correct path - table_path = self.context.prepare_path(request.path) - time = request.time if request.time_is_parsed else None - - # check such table really exists (otherwise exception will be thrown) - # noinspection PyBroadException - try: - provider.get_type_table(table_path) - except: - log.error("Cant load: " + table_path) - - log.debug(Lfm(" |- getting assignments for path : '{0}', run: '{1}', var: '{2}', time: '{3}'" - "", table_path, request.run, request.variation, time)) - try: - assignment = provider.get_assignment(table_path, request.run, request.variation, time) - log.debug(Lfm(" |- found assignment: {0}", assignment)) - return assignment - - except NoResultFound: - # if we here there were no assignments selected - log.warning(Lfm("There is no data for table {}, run {}, variation '{}'", - table_path, request.run, request.variation)) - if request.time_is_parsed: - log.warning(" on ".format(request.time_str)) - - return None - - # ---------------------------------------- - # process_arguments - # ---------------------------------------- - def process_arguments(self, args): - # solo arguments - if ("-b" in args) or ("--borders" in args): - self.show_borders = True - if ("-nb" in args) or ("--no-borders" in args): - self.show_borders = False - if ("-h" in args) or "--header": - self.show_header = True - if ("-nh" in args) or ("--no-header" in args): - self.show_header = False - if ("-c" in args) or ("--comments" in args): - self.show_comments = True - if ("-nc" in args) or ("--no-comments" in args): - self.show_comments = False - if ("-t" in args) or ("--time" in args): - self.show_date = True - if ("-nt" in args) or ("--no-time" in args): - self.show_date = False - if ("-ph" in args) or ("--horizontal" in args): - self.user_request_print_horizontal = True - if ("-pa" in args) or ("--vertical" in args): - self.user_request_print_vertical = True - - # parse loop - i = 0 - while i < len(args): - token = args[i].strip() - i += 1 - if token.startswith('-'): - # it is some command, lets parse what is the command - - # variation - if token == "-v" or token.startswith("--variation"): - if i < len(args): - self.request.variation = args[i].strip() - self.request.variation_is_parsed = True - i += 1 - - # runrange - if token == "-r" or token == "--run": - try: - self.request.run = int(args[i].strip()) - self.request.run_is_parsed = True - i += 1 - except ValueError: - log.warning("Cannot read run from '{}' command", token) - return False - - # get assignment by id - if token == "--id" and i < len(args): - - token = args[i].strip() - i += 1 - try: - self.ass_id = int(token) - self.use_ass_id = True - log.debug(Lfm(" |- parsed DB id : '{}' ", self.ass_id)) - except ValueError: - log.warning("Cannot parse assignment DB id: '{}'", token) - return False - - else: # !token.startswith('-') - # it probably must be a request or just a table name - log.debug(Lfm(" |- parsing request : '{0}'", token)) - self.request = parse_request(token) - - return True - - # ---------------------------------------- - # validate - # ---------------------------------------- - def validate(self): - if not self.raw_table_path: - return False - return True - - # -------------------------------------------------------------------------------- - # print_assignment_vertical - # -------------------------------------------------------------------------------- - def print_assignment_horizontal(self, assignment, print_header=True, display_borders=True, comments=False): - """ - print table with assignment data horizontally - - :param assignment : Assignment object ot print - :type assignment: Assignment - - :param print_header: print header with column information or not - :type print_header: bool - - :param comments: print comments - - :param display_borders: print '|' borders or not - :type display_borders: bool - - """ - log.debug(Lfm(" |- print asgmnt horizontally: header {0}, borders {1}, comments {2}" - "", print_header, display_borders, comments)) - - border = "|" if display_borders else " " - - assert isinstance(assignment, Assignment) - table = assignment.constant_set.type_table - assert isinstance(table, TypeTable) - - # PRINT COMMENTS - if comments: - print ("#" + str(assignment.comment).replace(os.linesep, os.linesep + "#")) - - column_names = [column.name for column in table.columns] - column_types = [column.type for column in table.columns] - data = assignment.constant_set.data_list - - columns_count = len(column_names) - - assert len(column_names) == len(column_types) - assert (len(data) % columns_count) == 0 - - min_width = 10 - column_width = [10 for _ in range(columns_count)] - total_data_width = 0 - - # determine column length - for i in range(0, columns_count): - if len(column_names[i]) > min_width: - column_width[i] = len(column_names[i]) - else: - column_width[i] = min_width - - total_data_width += column_width[i] - - # this is our cap, if we need it.... - cap = "+" + (total_data_width + 3 * columns_count - 1) * "-" + "+" - - # print header if needed - if print_header: - - # cap? - if display_borders: - print (self.theme.AsgmtBorder + cap) - - # names line - for i in range(0, columns_count): - sys.stdout.write(self.theme.AsgmtBorder + border + self.theme.Reset) - col_format = " %%-%is " % column_width[i] - sys.stdout.write(self.theme.AsgmtHead + col_format % column_names[i] + self.theme.Reset) - - print (self.theme.AsgmtBorder + border + self.theme.Reset) # last border - - # types line - for i in range(0, columns_count): - sys.stdout.write(self.theme.AsgmtBorder + border + self.theme.Reset) - col_format = " %%-%is " % column_width[i] - sys.stdout.write(self.theme.AsgmtType + col_format % column_types[i] + self.theme.Reset) - print (self.theme.AsgmtBorder + border + self.theme.Reset) # last border - - # cap? - if display_borders: - print (self.theme.AsgmtBorder + cap) - - # data line by line - column_iter = 0 - for dataItem in data: - # place data - sys.stdout.write(self.theme.AsgmtBorder + border + self.theme.Reset) - col_format = " %%-%is " % column_width[column_iter] - sys.stdout.write(self.theme.AsgmtValue + col_format % dataItem + self.theme.Reset) - column_iter += 1 - - # new line? - if column_iter == columns_count: - column_iter = 0 - print (self.theme.AsgmtBorder + border + self.theme.Reset) - - # final cap? - if display_borders: - print (self.theme.AsgmtBorder + cap) - - # -------------------------------------------------------------------------------- - # print_assignment_horizontal - # -------------------------------------------------------------------------------- - def print_assignment_vertical(self, assignment, print_header=True, display_borders=True, comments=False): - """ - print columns vertically and rows horizontally - - :param assignment : Assignment object ot print - :type assignment: Assignment - - :param print_header: print header with column information or not - :type print_header: bool - - :param display_borders: print '|' borders or not - :type display_borders: bool - - :param comments: print comments - """ - log.debug(Lfm(" |- print asgmnt vertically: header {0}, borders {1}, comments {2}", - print_header, display_borders, comments)) - - assert isinstance(assignment, Assignment) - - border = " " - if display_borders: - border = "|" - - table = assignment.constant_set.type_table - isinstance(table, TypeTable) - - # PRINT COMMENTS - if comments: - print ("#" + str(assignment.comment).replace(os.linesep, "#" + os.linesep)) - - column_names = [column.name for column in table.columns] - column_types = [column.type for column in table.columns] - data = assignment.constant_set.data_table - - if not data: # no rows - return - if not data[0]: # no columns - return - assert len(column_names) == len(column_types) - assert len(data[0]) == len(column_names) - - # present data as columns, each column has cells - columns = [] - header_columns_added = 0 - if print_header: - columns.append(column_names) - columns.append(column_types) - header_columns_added = 2 - - for _ in data: - columns.append([]) - - # fill data to columns - for rowI in range(0, len(data)): - for colI in range(0, len(data[rowI])): - columns[rowI + header_columns_added].append(data[rowI][colI]) - - column_widths = [len(max(column, key=len)) for column in columns] - total_width = 0 - for length in column_widths: - total_width += length - - # totalDataLength = 0 - - # #determine column length - # for i in range(0, columnsNum): - # if len(columnNames[i]) > minLength: - # columnLengths[i] = len(columnNames[i]) - # else: - # columnLengths[i] = minLength - - # totalDataLength += columnLengths[i]; - - # this is our cap, if we need it.... - cap = "+" + (total_width + 3 * len(columns) - 2) * "-" + "+" - - # print header if needed - # names line - # for i in range(0, columnsNum): - # sys.stdout.write(self.theme.AsgmtBorder + border + self.theme.Reset) - # frmt = " %%-%is "%columnLengths[i] - # sys.stdout.write(self.theme.AsgmtHead + frmt%columnNames[i] + self.theme.Reset) - # - # print self.theme.AsgmtBorder + border + self.theme.Reset #last border - # #types line - # for i in range(0, columnsNum): - # sys.stdout.write(self.theme.AsgmtBorder + border + self.theme.Reset) - # frmt = " %%-%is "%columnLengths[i] - # sys.stdout.write(self.theme.AsgmtType + frmt%columnTypes[i] + self.theme.Reset) - # print self.theme.AsgmtBorder + border + self.theme.Reset #last border - - # cap? - if display_borders: - print (self.theme.AsgmtBorder + cap + self.theme.Reset) - - # #data line by line - # columnIter = 0 - - for rowI in range(0, len(columns[0])): - sys.stdout.write(self.theme.AsgmtBorder + border + self.theme.Reset) - - for colI in range(0, len(columns)): - # place data - data_item = columns[colI][rowI] - frmt = " %%-%is " % column_widths[colI] - if colI == 0 and print_header: - sys.stdout.write(self.theme.AsgmtHead + frmt % data_item + self.theme.Reset) - elif colI == 1 and print_header: - sys.stdout.write(self.theme.AsgmtType + '(' + (frmt % data_item).strip() + ')' + self.theme.Reset) - sys.stdout.write(self.theme.AsgmtBorder + border + self.theme.Reset) - else: - sys.stdout.write(self.theme.AsgmtValue + frmt % data_item + self.theme.Reset) - - sys.stdout.write(self.theme.AsgmtBorder + border + self.theme.Reset + os.linesep) - - # #final cap? - if display_borders: - print (self.theme.AsgmtBorder + cap + self.theme.Reset) - - # ---------------------------------------- - # print_help - # ---------------------------------------- - def print_help(self): - """Prints help of the command""" - - print ("""Show data values for assignment. - -Usage: - cat - cat --id #Where assignment_id provided by 'vers ' command - -Formatting flags: - - -c or --comments - Show comments on/off - -nc or --no-comments - - -ph or --horizontal - Print table horizontally - -pa or --vertical - Print table vertically - (If no '--horizontal' or '--vertical' flag is given, the layout of table is determined automatically: - vertical layout if table has only 1 row and more than 3 columns, horizontal otherwise) - - -b or --borders - Switch show borders on of off - -nb or --no-borders - - -h or --header - Show header on/off - -nh or --no-header - - -t or --time - Show time - -nt or --no-time - -Examples: - > cat /test/test_vars/test_table #print latest data for test_table - > cat /test/test_vars/test_table::subtest #print latest data in subtest variation - > cat /test/test_vars/test_table:::2012-08 #print data latest for august 2012 - -See also 'dump' command which is 'cat' formatted to save data to files. 'help dump' - - """) +import logging +import sys +import os + +from ccdb import TypeTable, Assignment +from ccdb import AlchemyProvider +from ccdb.cmd import ConsoleUtilBase +from ccdb.path_utils import ParseRequestResult, parse_request +from ccdb import BraceMessage as Lfm # lfm is aka log format message. See BraceMessage desc about +from sqlalchemy.orm.exc import NoResultFound + +log = logging.getLogger("ccdb.cmd.utils.cat") + + +# ccdb cmd module interface +def create_util_instance(): + log.debug(Lfm(" registering Cat")) + return Cat() + + +# ********************************************************************* +# Class Cat - Show assignment data by ID * +# ********************************************************************* +class Cat(ConsoleUtilBase): + """Show assignment data by ID""" + + # ccdb utility class descr part + # ------------------------------ + command = "cat" + name = "Cat" + short_descr = "Show assignment data by ID" + uses_db = True + + # specific values + show_borders = True + show_header = True + show_comments = False + show_date = False + + def __init__(self): + ConsoleUtilBase.__init__(self) + self.raw_entry = "/" # object path with possible pattern, like /mole/* + self.path = "/" # parent path + self.raw_table_path = "" + self.use_ass_id = False + self.ass_id = 0 + self.print_horizontal = True + self.user_request_print_horizontal = False + self.user_request_print_vertical = False + self.request = ParseRequestResult() + + # ---------------------------------------- + # process + # ---------------------------------------- + def process(self, args): + """ + Process this command + :param args: + :return: 0 if command was successful, value!=0 means command was not successful + :rtype: int + """ + if log.isEnabledFor(logging.DEBUG): + log.debug(Lfm("{0}Cat command is in charge {0}\\", os.linesep)) + log.debug(Lfm(" |- arguments : '" + "' '".join(args) + "'")) + + assert self.context is not None + + # reset arguments on each process + self.raw_table_path = "" + self.show_borders = True + self.show_header = True + self.show_comments = False + self.show_date = False + self.request = ParseRequestResult() + self.ass_id = 0 + self.user_request_print_horizontal = False + self.user_request_print_vertical = False + + if not len(args): + print ("Please provide ID for assignment. Use 'help cat' to get more information") + return 1 + + if not self.process_arguments(args): + return 1 + + if self.use_ass_id: + assignment = self.get_assignment_by_id(self.ass_id) + else: + assignment = self.get_assignment_by_request(self.request) + + if assignment: + # now we have to know, how to print an assignment + data = assignment.constant_set.data_table + + if len(data) and len(data[0]): + if self.user_request_print_horizontal: + self.print_assignment_horizontal(assignment, self.show_header, self.show_borders, + self.show_comments) + elif self.user_request_print_vertical: + self.print_assignment_vertical(assignment, self.show_header, self.show_borders, self.show_comments) + else: + if len(data) == 1 and len(data[0]) > 3: + self.print_assignment_vertical(assignment, self.show_header, self.show_borders, + self.show_comments) + else: + self.print_assignment_horizontal(assignment, self.show_header, self.show_borders, + self.show_comments) + else: + log.warning("Assignment contains no data") + else: + print ("Cannot fill data for assignment with this ID") + return 1 + + return 0 + + # ---------------------------------------- + # gets assignment by database id + # ---------------------------------------- + def get_assignment_by_id(self, assignment_id): + """gets assignment by database id""" + + provider = self.context.provider + assert isinstance(provider, AlchemyProvider) + return self.context.provider.get_assignment_by_id(assignment_id) + + # ---------------------------------------- + # gets assignment by parsed request + # ---------------------------------------- + def get_assignment_by_request(self, request): + """gets assignment by parsed request + @param request: Parsed request + @type request: ParseRequestResult + """ + return self._get_assignment_by_request(request) + + # ---------------------------------------- + # gets assignment by parsed request + # ---------------------------------------- + def _get_assignment_by_request(self, request): + + provider = self.context.provider + assert isinstance(provider, AlchemyProvider) + assert isinstance(request, ParseRequestResult) + + if not request.variation_is_parsed: + request.variation = self.context.current_variation + + if not request.run_is_parsed: + request.run = self.context.current_run + + # correct path + table_path = self.context.prepare_path(request.path) + time = request.time if request.time_is_parsed else None + + # check such table really exists (otherwise exception will be thrown) + # noinspection PyBroadException + try: + provider.get_type_table(table_path) + except: + log.error("Cant load: " + table_path) + + log.debug(Lfm(" |- getting assignments for path : '{0}', run: '{1}', var: '{2}', time: '{3}'" + "", table_path, request.run, request.variation, time)) + try: + assignment = provider.get_assignment(table_path, request.run, request.variation, time) + log.debug(Lfm(" |- found assignment: {0}", assignment)) + return assignment + + except NoResultFound: + # if we here there were no assignments selected + log.warning(Lfm("There is no data for table {}, run {}, variation '{}'", + table_path, request.run, request.variation)) + if request.time_is_parsed: + log.warning(" on ".format(request.time_str)) + + return None + + # ---------------------------------------- + # process_arguments + # ---------------------------------------- + def process_arguments(self, args): + # solo arguments + if ("-b" in args) or ("--borders" in args): + self.show_borders = True + if ("-nb" in args) or ("--no-borders" in args): + self.show_borders = False + if ("-h" in args) or "--header": + self.show_header = True + if ("-nh" in args) or ("--no-header" in args): + self.show_header = False + if ("-c" in args) or ("--comments" in args): + self.show_comments = True + if ("-nc" in args) or ("--no-comments" in args): + self.show_comments = False + if ("-t" in args) or ("--time" in args): + self.show_date = True + if ("-nt" in args) or ("--no-time" in args): + self.show_date = False + if ("-ph" in args) or ("--horizontal" in args): + self.user_request_print_horizontal = True + if ("-pa" in args) or ("--vertical" in args): + self.user_request_print_vertical = True + + # parse loop + i = 0 + while i < len(args): + token = args[i].strip() + i += 1 + if token.startswith('-'): + # it is some command, lets parse what is the command + + # variation + if token == "-v" or token.startswith("--variation"): + if i < len(args): + self.request.variation = args[i].strip() + self.request.variation_is_parsed = True + i += 1 + + # runrange + if token == "-r" or token == "--run": + try: + self.request.run = int(args[i].strip()) + self.request.run_is_parsed = True + i += 1 + except ValueError: + log.warning("Cannot read run from '{}' command", token) + return False + + # get assignment by id + if token == "--id" and i < len(args): + + token = args[i].strip() + i += 1 + try: + self.ass_id = int(token) + self.use_ass_id = True + log.debug(Lfm(" |- parsed DB id : '{}' ", self.ass_id)) + except ValueError: + log.warning("Cannot parse assignment DB id: '{}'", token) + return False + + else: # !token.startswith('-') + # it probably must be a request or just a table name + log.debug(Lfm(" |- parsing request : '{0}'", token)) + self.request = parse_request(token) + + return True + + # ---------------------------------------- + # validate + # ---------------------------------------- + def validate(self): + if not self.raw_table_path: + return False + return True + + # -------------------------------------------------------------------------------- + # print_assignment_vertical + # -------------------------------------------------------------------------------- + def print_assignment_horizontal(self, assignment, print_header=True, display_borders=True, comments=False): + """ + print table with assignment data horizontally + + :param assignment : Assignment object ot print + :type assignment: Assignment + + :param print_header: print header with column information or not + :type print_header: bool + + :param comments: print comments + + :param display_borders: print '|' borders or not + :type display_borders: bool + + """ + log.debug(Lfm(" |- print asgmnt horizontally: header {0}, borders {1}, comments {2}" + "", print_header, display_borders, comments)) + + border = "|" if display_borders else " " + + assert isinstance(assignment, Assignment) + table = assignment.constant_set.type_table + assert isinstance(table, TypeTable) + + # PRINT COMMENTS + if comments: + print(("#" + str(assignment.comment).replace(os.linesep, os.linesep + "#"))) + + column_names = [column.name for column in table.columns] + column_types = [column.type for column in table.columns] + data = assignment.constant_set.data_list + + columns_count = len(column_names) + + assert len(column_names) == len(column_types) + assert (len(data) % columns_count) == 0 + + min_width = 10 + column_width = [10 for _ in range(columns_count)] + total_data_width = 0 + + # determine column length + for i in range(0, columns_count): + if len(column_names[i]) > min_width: + column_width[i] = len(column_names[i]) + else: + column_width[i] = min_width + + total_data_width += column_width[i] + + # this is our cap, if we need it.... + cap = "+" + (total_data_width + 3 * columns_count - 1) * "-" + "+" + + # print header if needed + if print_header: + + # cap? + if display_borders: + print((self.theme.AsgmtBorder + cap)) + + # names line + for i in range(0, columns_count): + sys.stdout.write(self.theme.AsgmtBorder + border + self.theme.Reset) + col_format = " %%-%is " % column_width[i] + sys.stdout.write(self.theme.AsgmtHead + col_format % column_names[i] + self.theme.Reset) + + print((self.theme.AsgmtBorder + border + self.theme.Reset)) # last border + + # types line + for i in range(0, columns_count): + sys.stdout.write(self.theme.AsgmtBorder + border + self.theme.Reset) + col_format = " %%-%is " % column_width[i] + sys.stdout.write(self.theme.AsgmtType + col_format % column_types[i] + self.theme.Reset) + print((self.theme.AsgmtBorder + border + self.theme.Reset)) # last border + + # cap? + if display_borders: + print((self.theme.AsgmtBorder + cap)) + + # data line by line + column_iter = 0 + for dataItem in data: + # place data + sys.stdout.write(self.theme.AsgmtBorder + border + self.theme.Reset) + col_format = " %%-%is " % column_width[column_iter] + sys.stdout.write(self.theme.AsgmtValue + col_format % dataItem + self.theme.Reset) + column_iter += 1 + + # new line? + if column_iter == columns_count: + column_iter = 0 + print((self.theme.AsgmtBorder + border + self.theme.Reset)) + + # final cap? + if display_borders: + print((self.theme.AsgmtBorder + cap)) + + # -------------------------------------------------------------------------------- + # print_assignment_horizontal + # -------------------------------------------------------------------------------- + def print_assignment_vertical(self, assignment, print_header=True, display_borders=True, comments=False): + """ + print columns vertically and rows horizontally + + :param assignment : Assignment object ot print + :type assignment: Assignment + + :param print_header: print header with column information or not + :type print_header: bool + + :param display_borders: print '|' borders or not + :type display_borders: bool + + :param comments: print comments + """ + log.debug(Lfm(" |- print asgmnt vertically: header {0}, borders {1}, comments {2}", + print_header, display_borders, comments)) + + assert isinstance(assignment, Assignment) + + border = " " + if display_borders: + border = "|" + + table = assignment.constant_set.type_table + isinstance(table, TypeTable) + + # PRINT COMMENTS + if comments: + print(("#" + str(assignment.comment).replace(os.linesep, "#" + os.linesep))) + + column_names = [column.name for column in table.columns] + column_types = [column.type for column in table.columns] + data = assignment.constant_set.data_table + + if not data: # no rows + return + if not data[0]: # no columns + return + assert len(column_names) == len(column_types) + assert len(data[0]) == len(column_names) + + # present data as columns, each column has cells + columns = [] + header_columns_added = 0 + if print_header: + columns.append(column_names) + columns.append(column_types) + header_columns_added = 2 + + for _ in data: + columns.append([]) + + # fill data to columns + for rowI in range(0, len(data)): + for colI in range(0, len(data[rowI])): + columns[rowI + header_columns_added].append(data[rowI][colI]) + + column_widths = [len(max(column, key=len)) for column in columns] + total_width = 0 + for length in column_widths: + total_width += length + + # totalDataLength = 0 + + # #determine column length + # for i in range(0, columnsNum): + # if len(columnNames[i]) > minLength: + # columnLengths[i] = len(columnNames[i]) + # else: + # columnLengths[i] = minLength + + # totalDataLength += columnLengths[i]; + + # this is our cap, if we need it.... + cap = "+" + (total_width + 3 * len(columns) - 2) * "-" + "+" + + # print header if needed + # names line + # for i in range(0, columnsNum): + # sys.stdout.write(self.theme.AsgmtBorder + border + self.theme.Reset) + # frmt = " %%-%is "%columnLengths[i] + # sys.stdout.write(self.theme.AsgmtHead + frmt%columnNames[i] + self.theme.Reset) + # + # print self.theme.AsgmtBorder + border + self.theme.Reset #last border + # #types line + # for i in range(0, columnsNum): + # sys.stdout.write(self.theme.AsgmtBorder + border + self.theme.Reset) + # frmt = " %%-%is "%columnLengths[i] + # sys.stdout.write(self.theme.AsgmtType + frmt%columnTypes[i] + self.theme.Reset) + # print self.theme.AsgmtBorder + border + self.theme.Reset #last border + + # cap? + if display_borders: + print((self.theme.AsgmtBorder + cap + self.theme.Reset)) + + # #data line by line + # columnIter = 0 + + for rowI in range(0, len(columns[0])): + sys.stdout.write(self.theme.AsgmtBorder + border + self.theme.Reset) + + for colI in range(0, len(columns)): + # place data + data_item = columns[colI][rowI] + frmt = " %%-%is " % column_widths[colI] + if colI == 0 and print_header: + sys.stdout.write(self.theme.AsgmtHead + frmt % data_item + self.theme.Reset) + elif colI == 1 and print_header: + sys.stdout.write(self.theme.AsgmtType + '(' + (frmt % data_item).strip() + ')' + self.theme.Reset) + sys.stdout.write(self.theme.AsgmtBorder + border + self.theme.Reset) + else: + sys.stdout.write(self.theme.AsgmtValue + frmt % data_item + self.theme.Reset) + + sys.stdout.write(self.theme.AsgmtBorder + border + self.theme.Reset + os.linesep) + + # #final cap? + if display_borders: + print((self.theme.AsgmtBorder + cap + self.theme.Reset)) + + # ---------------------------------------- + # print_help + # ---------------------------------------- + def print_help(self): + """Prints help of the command""" + + print ("""Show data values for assignment. + +Usage: + cat + cat --id #Where assignment_id provided by 'vers
' command + +Formatting flags: + + -c or --comments - Show comments on/off + -nc or --no-comments + + -ph or --horizontal - Print table horizontally + -pa or --vertical - Print table vertically + (If no '--horizontal' or '--vertical' flag is given, the layout of table is determined automatically: + vertical layout if table has only 1 row and more than 3 columns, horizontal otherwise) + + -b or --borders - Switch show borders on of off + -nb or --no-borders + + -h or --header - Show header on/off + -nh or --no-header + + -t or --time - Show time + -nt or --no-time + +Examples: + > cat /test/test_vars/test_table #print latest data for test_table + > cat /test/test_vars/test_table::subtest #print latest data in subtest variation + > cat /test/test_vars/test_table:::2012-08 #print data latest for august 2012 + +See also 'dump' command which is 'cat' formatted to save data to files. 'help dump' + + """) diff --git a/python/ccdb/cmd/utils/dump.py b/python/ccdb/cmd/utils/dump.py index 2a781b6b..7d759533 100644 --- a/python/ccdb/cmd/utils/dump.py +++ b/python/ccdb/cmd/utils/dump.py @@ -1,76 +1,76 @@ -import os -import logging - -from ccdb.cmd import ConsoleUtilBase -from ccdb import NoColorTheme - -log = logging.getLogger("ccdb.cmd.utils.ls") - - -# ccdbcmd module interface -def create_util_instance(): - log.debug(" registering Dump") - return Dump() - - -# ********************************************************************* -# Class List - Dumps datat table to a file * -# * -# ********************************************************************* -class Dump(ConsoleUtilBase): - """ Dumps datat table to a file """ - - # ccdb utility class descr part - # ------------------------------ - command = "dump" - name = "Dump" - short_descr = "Dumps data table" - uses_db = True - - # variables for each process - - def process(self, args): - log.debug("{0}Dump is gained a control {0}\\".format(os.linesep)) - log.debug(" |- arguments: " + " ".join(args)) - - theme_backup = self.context.theme - - self.context.theme = NoColorTheme() - # self.context.utils["cat"].theme = NoColorTheme() - try: - command = "cat --no-borders --no-header --comments --time --horizontal " + " ".join(args) - self.context.process_command_line(command) - finally: - self.context.theme = theme_backup - # self.context.utils["cat"].theme = theme_backup - - def print_directory_tree(self, directory, print_full_path, level): - """prints a full tree of directories - This is recursive function""" - - # print this directory - if not print_full_path: - # noinspection PyUnusedLocal - print ("".join([" " for i in range(0, level)]) + directory.name) - else: - print (directory.full_path) - - # print subdirectories recursively - sub_dirs = directory.subdirs - if len(sub_dirs) > 0: - for subDir in sub_dirs: - self.print_directory_tree(subDir, print_full_path, level + 1) - - def print_help(self): - """Prints help of the command""" - print (""" Dumps data table to a file -Usage: - dump - -Example: - > cat /test/test_vars/test_table > file.txt # latest data for test_table - > cat /test/test_vars/test_table::subtest > file.txt # latest data in subtest variation - > cat /test/test_vars/test_table:::2012-08 > file.txt # data latest for august 2012 - -Dump accepts the same flags as 'cat' command. See 'help cat' for flags - """) +import os +import logging + +from ccdb.cmd import ConsoleUtilBase +from ccdb import NoColorTheme + +log = logging.getLogger("ccdb.cmd.utils.ls") + + +# ccdbcmd module interface +def create_util_instance(): + log.debug(" registering Dump") + return Dump() + + +# ********************************************************************* +# Class List - Dumps datat table to a file * +# * +# ********************************************************************* +class Dump(ConsoleUtilBase): + """ Dumps datat table to a file """ + + # ccdb utility class descr part + # ------------------------------ + command = "dump" + name = "Dump" + short_descr = "Dumps data table" + uses_db = True + + # variables for each process + + def process(self, args): + log.debug("{0}Dump is gained a control {0}\\".format(os.linesep)) + log.debug(" |- arguments: " + " ".join(args)) + + theme_backup = self.context.theme + + self.context.theme = NoColorTheme() + # self.context.utils["cat"].theme = NoColorTheme() + try: + command = "cat --no-borders --no-header --comments --time --horizontal " + " ".join(args) + self.context.process_command_line(command) + finally: + self.context.theme = theme_backup + # self.context.utils["cat"].theme = theme_backup + + def print_directory_tree(self, directory, print_full_path, level): + """prints a full tree of directories + This is recursive function""" + + # print this directory + if not print_full_path: + # noinspection PyUnusedLocal + print(("".join([" " for i in range(0, level)]) + directory.name)) + else: + print((directory.full_path)) + + # print subdirectories recursively + sub_dirs = directory.subdirs + if len(sub_dirs) > 0: + for subDir in sub_dirs: + self.print_directory_tree(subDir, print_full_path, level + 1) + + def print_help(self): + """Prints help of the command""" + print (""" Dumps data table to a file +Usage: + dump + +Example: + > cat /test/test_vars/test_table > file.txt # latest data for test_table + > cat /test/test_vars/test_table::subtest > file.txt # latest data in subtest variation + > cat /test/test_vars/test_table:::2012-08 > file.txt # data latest for august 2012 + +Dump accepts the same flags as 'cat' command. See 'help cat' for flags + """) diff --git a/python/ccdb/cmd/utils/help.py b/python/ccdb/cmd/utils/help.py index e6a0c7a0..fa9a3ae3 100644 --- a/python/ccdb/cmd/utils/help.py +++ b/python/ccdb/cmd/utils/help.py @@ -27,18 +27,18 @@ class HelpUtil(ConsoleUtilBase): # print_help #---------------------------------------- def print_help(self): - print self.help_text + print(self.help_text) if self.context: print ("Available commands:") - print (" %-10s %-15s %s:"%("(command)", "(name)", "(description)")) - print (" " + "\n ".join( + print((" %-10s %-15s %s:"%("(command)", "(name)", "(description)"))) + print((" " + "\n ".join( ["%-10s %-15s %s" % (command, util.name, util.short_descr) for command, util - in self.context.utils.items() - if not util.help_util])) - print self.flags_text - print self.enveron_text + in list(self.context.utils.items()) + if not util.help_util]))) + print(self.flags_text) + print(self.enveron_text) #---------------------------------------- @@ -46,12 +46,12 @@ def print_help(self): #---------------------------------------- def process(self, args): if self.context: - commands = self.context.utils.keys() + commands = list(self.context.utils.keys()) if len(args)>0: if args[0] in commands: self.context.utils[args[0]].print_help() else: - print ("Command %s not found. Available commands are: "%args[0]) + print(("Command %s not found. Available commands are: "%args[0])) else: self.print_help() diff --git a/python/ccdb/cmd/utils/info.py b/python/ccdb/cmd/utils/info.py index 837d4791..76510844 100644 --- a/python/ccdb/cmd/utils/info.py +++ b/python/ccdb/cmd/utils/info.py @@ -1,242 +1,242 @@ -import logging -import os - -from ccdb import Directory, TypeTable, Variation, read_ccdb_text_file, TextFileDOM -from ccdb.cmd import ConsoleUtilBase, UtilityArgumentParser -from ccdb import AlchemyProvider -from ccdb import BraceMessage as LogFmt - - -log = logging.getLogger("ccdb.cmd.utils.info") - - -#ccdbcmd module interface -def create_util_instance(): - log.debug(" registering Info") - return Info() - - -#********************************************************************* -# Class Info - Prints extended information of object by the path * -# * -#********************************************************************* -class Info(ConsoleUtilBase): - """ Prints extended information of object by the path """ - - # ccdb utility class descr part - #------------------------------ - command = "info" - name = "Info" - short_descr = "Prints extended information of object by the path" - uses_db = True - - #---------------------------------------- - # process - #---------------------------------------- - def process(self, args): - if log.isEnabledFor(logging.DEBUG): - log.debug(LogFmt("{0}Info is in charge{0}\\".format(os.linesep))) - log.debug(LogFmt(" |- arguments : '" + "' '".join(args) + "'")) - - assert self.context - provider = self.context.provider - assert isinstance(provider, AlchemyProvider) - - #process arguments - obj_name, obj_type = self._process_arguments(args) - path = self.context.prepare_path(obj_name) # more likely obj_name is path to dir or table - - if not obj_name: - log.warning("No path or name is given. Use 'help info' for getting help.") - - #it is a type table - if obj_type == InfoTypes.type_table: - self.print_type_table(provider.get_type_table(path)) - - #it is a directory - if obj_type == InfoTypes.directory: - self.print_directory(provider.get_directory(path)) - - #it is a variation - if obj_type == InfoTypes.variation: - self.print_variation(provider.get_variation(obj_name)) - - #it is a file!!! - if obj_type == InfoTypes.file: - self.print_file(obj_name) - - #everything is fine! - return 0 - - #---------------------------------------- - # process_arguments - #---------------------------------------- - - @staticmethod - def _process_arguments(args): - #solo arguments - - #utility argument parser is argparse which raises errors instead of exiting app - parser = UtilityArgumentParser() - parser.add_argument("obj_name", default="") - parser.add_argument("-v", "--variation", action="store_true") - parser.add_argument("-d", "--directory", action="store_true") - parser.add_argument("-f", "--file", action="store_true") - - result = parser.parse_args(args) - - if result.variation: - obj_type = InfoTypes.variation - elif result.directory: - obj_type = InfoTypes.directory - elif result.file: - obj_type = InfoTypes.file - else: - obj_type = InfoTypes.type_table - - log.debug(LogFmt(" |- parsed as (obj: '{0}', type: '{1}')", result.obj_name, obj_type)) - - return result.obj_name, obj_type - - # ---------------------------------------- - # print_directory - # ---------------------------------------- - def print_directory(self, directory): - assert isinstance(directory, Directory) - print " Name : " + self.theme.Success + directory.name - print " Full path : " + directory.path - try: - print " Created : " + directory.created.strftime("%Y-%m-%d %H-%M-%S") - except Exception as ex: - log.warning("Directory created time getting error: " + str(ex)) - try: - print " Modified : " + directory.modified.strftime("%Y-%m-%d %H-%M-%S") - except Exception as ex: - log.warning("Directory modify time getting error: " + str(ex)) - - #comment - print " Comment: " - print directory.comment - print - - #---------------------------------------- - # print_type_table - #---------------------------------------- - def print_type_table(self, table): - #basic values: name rows columns path - assert isinstance(table, TypeTable) - print "+------------------------------------------+" - print "| Type table information |" - print "+------------------------------------------+" - print " Name : " + self.theme.Success + table.name - print " Full path : " + table.path - print " Rows : " + self.theme.Accent + repr(int(table.rows_count)) - print " Columns : " + self.theme.Accent + repr(int(table.columns_count)) - print " Created : " + table.created.strftime("%Y-%m-%d %H-%M-%S") - print " Modified : " + table.modified.strftime("%Y-%m-%d %H-%M-%S") - print " DB Id : " + repr(int(table.id)) - print "+------------------------------------------+" - print "| Columns info |" - print "+------------------------------------------+" - #columns info - print - print "Columns info " - print " N. (type) : (name)" - for column in table.columns: - print " " + repr(int(column.order)).ljust(4) \ - + " " + self.theme.Type + "%-10s" % column.type + self.theme.Reset + ": " + column.name - - print - print "+------------------------------------------+" - #comment - print "Comment: " - print table.comment - print - - #---------------------------------------- - # print_variation - #---------------------------------------- - def print_variation(self, variation): - #basic values: name rows columns path - assert isinstance(variation, Variation) - print "+------------------------------------------+" - print "| Variation information |" - print "+------------------------------------------+" - print " Name : " + self.theme.Success + variation.name - print " Created : " + variation.created.strftime("%Y-%m-%d %H-%M-%S") - print " DB Id : " + repr(int(variation.id)) - print " Parent : " + (variation.parent.name if variation.parent else "--") - print " Comment: " - print variation.comment - print - - #---------------------------------------- - # print_help - #---------------------------------------- - def print_help(self): - """Prints help of the command""" - - print """Prints extended info about the object - info - info about type table with given path - info -d - info about directory with given path - info -v - info about variation with given name - info -f - info about text file (col. names), rows, etc. - - """ - - def print_file(self, file_path): - #reading file - try: - dom = read_ccdb_text_file(file_path) - except IOError as error: - log.warning(LogFmt("Unable to read file '{0}'. The error message is: '{1}'", file_path, error)) - raise - - #Is there data at all? - if not dom.has_data: - message = "Seems like file has no data" - log.warning(message) - raise ValueError(message=message) - - #check what we've got - assert isinstance(dom, TextFileDOM) - if not dom.data_is_consistent: - message = "Inconsistency error. " + dom.inconsistent_reason - log.warning(message) - raise ValueError(message=message) - - log.info(LogFmt("Rows: {}{}{}", self.theme.Accent, len(dom.rows), self.theme.Reset)) - log.info(LogFmt("Columns: {}{}{}", self.theme.Accent, len(dom.rows[0]), self.theme.Reset)) - - - #column names - if dom.column_names: - log.info("Column names:") - log.info(" " + (os.linesep + " ").join( - [self.theme.Accent + col_name + self.theme.Reset for col_name in dom.column_names])) - else: - log.info("No column names found (column name string starts with #&)") - - #meta data - if dom.metas: - log.info("Meta data:") - log.info((os.linesep + " ").join([key + " = " + val for key, val in dom.metas])) - - #comments - if dom.comment_lines: - log.info(LogFmt("{0}Comments in file: {0}{1}", os.linesep, os.linesep.join(ln for ln in dom.comment_lines))) - else: - log.info("No comments in file found") - - ccdb_prefix = "ccdb " if not self.context.is_interactive else "" - log.info("") - log.info(LogFmt("Type '{1}mktbl -f {0}' to see how to create a table for the file", file_path, ccdb_prefix)) - log.info(LogFmt("Type '{1}add
{0} #' to add the file to existing table" - " (rows and columns must consist)", file_path, ccdb_prefix)) - - -class InfoTypes(object): - variation = "variation" - type_table = "type_table" - directory = "directory" - file = "file" +import logging +import os + +from ccdb import Directory, TypeTable, Variation, read_ccdb_text_file, TextFileDOM +from ccdb.cmd import ConsoleUtilBase, UtilityArgumentParser +from ccdb import AlchemyProvider +from ccdb import BraceMessage as LogFmt + + +log = logging.getLogger("ccdb.cmd.utils.info") + + +#ccdbcmd module interface +def create_util_instance(): + log.debug(" registering Info") + return Info() + + +#********************************************************************* +# Class Info - Prints extended information of object by the path * +# * +#********************************************************************* +class Info(ConsoleUtilBase): + """ Prints extended information of object by the path """ + + # ccdb utility class descr part + #------------------------------ + command = "info" + name = "Info" + short_descr = "Prints extended information of object by the path" + uses_db = True + + #---------------------------------------- + # process + #---------------------------------------- + def process(self, args): + if log.isEnabledFor(logging.DEBUG): + log.debug(LogFmt("{0}Info is in charge{0}\\".format(os.linesep))) + log.debug(LogFmt(" |- arguments : '" + "' '".join(args) + "'")) + + assert self.context + provider = self.context.provider + assert isinstance(provider, AlchemyProvider) + + #process arguments + obj_name, obj_type = self._process_arguments(args) + path = self.context.prepare_path(obj_name) # more likely obj_name is path to dir or table + + if not obj_name: + log.warning("No path or name is given. Use 'help info' for getting help.") + + #it is a type table + if obj_type == InfoTypes.type_table: + self.print_type_table(provider.get_type_table(path)) + + #it is a directory + if obj_type == InfoTypes.directory: + self.print_directory(provider.get_directory(path)) + + #it is a variation + if obj_type == InfoTypes.variation: + self.print_variation(provider.get_variation(obj_name)) + + #it is a file!!! + if obj_type == InfoTypes.file: + self.print_file(obj_name) + + #everything is fine! + return 0 + + #---------------------------------------- + # process_arguments + #---------------------------------------- + + @staticmethod + def _process_arguments(args): + #solo arguments + + #utility argument parser is argparse which raises errors instead of exiting app + parser = UtilityArgumentParser() + parser.add_argument("obj_name", default="") + parser.add_argument("-v", "--variation", action="store_true") + parser.add_argument("-d", "--directory", action="store_true") + parser.add_argument("-f", "--file", action="store_true") + + result = parser.parse_args(args) + + if result.variation: + obj_type = InfoTypes.variation + elif result.directory: + obj_type = InfoTypes.directory + elif result.file: + obj_type = InfoTypes.file + else: + obj_type = InfoTypes.type_table + + log.debug(LogFmt(" |- parsed as (obj: '{0}', type: '{1}')", result.obj_name, obj_type)) + + return result.obj_name, obj_type + + # ---------------------------------------- + # print_directory + # ---------------------------------------- + def print_directory(self, directory): + assert isinstance(directory, Directory) + print(" Name : " + self.theme.Success + directory.name) + print(" Full path : " + directory.path) + try: + print(" Created : " + directory.created.strftime("%Y-%m-%d %H-%M-%S")) + except Exception as ex: + log.warning("Directory created time getting error: " + str(ex)) + try: + print(" Modified : " + directory.modified.strftime("%Y-%m-%d %H-%M-%S")) + except Exception as ex: + log.warning("Directory modify time getting error: " + str(ex)) + + #comment + print(" Comment: ") + print(directory.comment) + print() + + #---------------------------------------- + # print_type_table + #---------------------------------------- + def print_type_table(self, table): + #basic values: name rows columns path + assert isinstance(table, TypeTable) + print("+------------------------------------------+") + print("| Type table information |") + print("+------------------------------------------+") + print(" Name : " + self.theme.Success + table.name) + print(" Full path : " + table.path) + print(" Rows : " + self.theme.Accent + repr(int(table.rows_count))) + print(" Columns : " + self.theme.Accent + repr(int(table.columns_count))) + print(" Created : " + table.created.strftime("%Y-%m-%d %H-%M-%S")) + print(" Modified : " + table.modified.strftime("%Y-%m-%d %H-%M-%S")) + print(" DB Id : " + repr(int(table.id))) + print("+------------------------------------------+") + print("| Columns info |") + print("+------------------------------------------+") + #columns info + print() + print("Columns info ") + print(" N. (type) : (name)") + for column in table.columns: + print(" " + repr(int(column.order)).ljust(4) \ + + " " + self.theme.Type + "%-10s" % column.type + self.theme.Reset + ": " + column.name) + + print() + print("+------------------------------------------+") + #comment + print("Comment: ") + print(table.comment) + print() + + #---------------------------------------- + # print_variation + #---------------------------------------- + def print_variation(self, variation): + #basic values: name rows columns path + assert isinstance(variation, Variation) + print("+------------------------------------------+") + print("| Variation information |") + print("+------------------------------------------+") + print(" Name : " + self.theme.Success + variation.name) + print(" Created : " + variation.created.strftime("%Y-%m-%d %H-%M-%S")) + print(" DB Id : " + repr(int(variation.id))) + print(" Parent : " + (variation.parent.name if variation.parent else "--")) + print(" Comment: ") + print(variation.comment) + print() + + #---------------------------------------- + # print_help + #---------------------------------------- + def print_help(self): + """Prints help of the command""" + + print("""Prints extended info about the object + info - info about type table with given path + info -d - info about directory with given path + info -v - info about variation with given name + info -f - info about text file (col. names), rows, etc. + + """) + + def print_file(self, file_path): + #reading file + try: + dom = read_ccdb_text_file(file_path) + except IOError as error: + log.warning(LogFmt("Unable to read file '{0}'. The error message is: '{1}'", file_path, error)) + raise + + #Is there data at all? + if not dom.has_data: + message = "Seems like file has no data" + log.warning(message) + raise ValueError(message=message) + + #check what we've got + assert isinstance(dom, TextFileDOM) + if not dom.data_is_consistent: + message = "Inconsistency error. " + dom.inconsistent_reason + log.warning(message) + raise ValueError(message=message) + + log.info(LogFmt("Rows: {}{}{}", self.theme.Accent, len(dom.rows), self.theme.Reset)) + log.info(LogFmt("Columns: {}{}{}", self.theme.Accent, len(dom.rows[0]), self.theme.Reset)) + + + #column names + if dom.column_names: + log.info("Column names:") + log.info(" " + (os.linesep + " ").join( + [self.theme.Accent + col_name + self.theme.Reset for col_name in dom.column_names])) + else: + log.info("No column names found (column name string starts with #&)") + + #meta data + if dom.metas: + log.info("Meta data:") + log.info((os.linesep + " ").join([key + " = " + val for key, val in dom.metas])) + + #comments + if dom.comment_lines: + log.info(LogFmt("{0}Comments in file: {0}{1}", os.linesep, os.linesep.join(ln for ln in dom.comment_lines))) + else: + log.info("No comments in file found") + + ccdb_prefix = "ccdb " if not self.context.is_interactive else "" + log.info("") + log.info(LogFmt("Type '{1}mktbl -f {0}' to see how to create a table for the file", file_path, ccdb_prefix)) + log.info(LogFmt("Type '{1}add
{0} #' to add the file to existing table" + " (rows and columns must consist)", file_path, ccdb_prefix)) + + +class InfoTypes(object): + variation = "variation" + type_table = "type_table" + directory = "directory" + file = "file" diff --git a/python/ccdb/cmd/utils/log.py b/python/ccdb/cmd/utils/log.py index bf50df51..4a2bd81d 100644 --- a/python/ccdb/cmd/utils/log.py +++ b/python/ccdb/cmd/utils/log.py @@ -1,126 +1,126 @@ -import os -import logging -from sqlalchemy import desc - -import ccdb -from ccdb import TextFileDOM -from ccdb import AlchemyProvider -from ccdb.cmd import ConsoleUtilBase, UtilityArgumentParser -from ccdb.model import LogRecord -from ccdb import BraceMessage as LogFmt - -log = logging.getLogger("ccdb.cmd.utils.log") - -#ccdbcmd module interface -def create_util_instance(): - """ - This function is a module interface - - :return: new ShowLog util - :rtype: ShowLog - """ - log.debug(" registering ShowLog") - return ShowLog() - - -#********************************************************************* -# Class ShowLog - Add data constants * -# * -#********************************************************************* -class ShowLog(ConsoleUtilBase): - """ Show log record""" - - # ccdb utility class descr part - #------------------------------ - command = "log" - name = "ShowLog" - short_descr = "Shows log records" - uses_db = True - - def __init(self): - pass - - #---------------------------------------- - # process - #---------------------------------------- - def process(self, args): - if log.isEnabledFor(logging.DEBUG): - log.debug(LogFmt("{0}ShowLog is in charge{0}\\".format(os.linesep))) - log.debug(LogFmt(" |- arguments : '" + "' '".join(args)+"'")) - - provider = self.context.provider - assert isinstance(provider, AlchemyProvider) - - result = self.process_arguments(args) - log_records = self.filter(result) - - if len(log_records) == 0: - print("No entries matching the filter criteria.") - else: - self.print_logs(log_records) - - def process_arguments(self, args): - # utility argument parser is argparse which raises errors instead of exiting app - parser = UtilityArgumentParser() - parser.add_argument("obj_name", nargs="?", default="") - parser.add_argument("-t", "--table", default="") - parser.add_argument("-v", "--variation", default="") - parser.add_argument("-u", "--user", default="") - parser.add_argument("-d", "--date", default="") # TODO: Implement filter by date - result = parser.parse_args(args) - return result - -# ---------------------------------------- -# filtering -# ---------------------------------------- - def filter(self, result): - - query = self.context.provider.session.query(LogRecord) - - if result.user: # filter by user - user_id = self.context.provider.get_user(result.user).id - query = query.filter(LogRecord.author_id.like("{0}".format(user_id))) - - if result.table or result.obj_name: # filter by table - - user_input = result.obj_name if result.obj_name else result.table - - user_input = user_input.replace("_", "\\_").replace("*", "%").replace("?", "_") - full_table_path = self.context.prepare_path(user_input) - query = query.filter(LogRecord.description.like("%'{0}%:%:%:%".format(full_table_path))) - - if result.variation: # filter by variation - query = query.filter(LogRecord.description.like("%:%:{0}:%".format(result.variation))) - - query.order_by(desc(LogRecord.id)) - - return query.all() - - -#---------------------------------------- -# print logs -#---------------------------------------- - def print_logs(self, log_records): - - print self.theme.Directories + "(action) (author) (date) (description)" - - for log_record in log_records: - print " %-13s "%log_record.action +\ - " %-16s"%log_record.author.name + \ - " %-18s"%log_record.created.strftime("%Y-%m-%d %H-%M-%S ") + " " +\ - log_record.description - - -#---------------------------------------- -# print_help -#---------------------------------------- - def print_help(self): - """Prints help of the command""" - - print """Shows log records - log - -Example: - > log - > log 50 - """ +import os +import logging +from sqlalchemy import desc + +import ccdb +from ccdb import TextFileDOM +from ccdb import AlchemyProvider +from ccdb.cmd import ConsoleUtilBase, UtilityArgumentParser +from ccdb.model import LogRecord +from ccdb import BraceMessage as LogFmt + +log = logging.getLogger("ccdb.cmd.utils.log") + +#ccdbcmd module interface +def create_util_instance(): + """ + This function is a module interface + + :return: new ShowLog util + :rtype: ShowLog + """ + log.debug(" registering ShowLog") + return ShowLog() + + +#********************************************************************* +# Class ShowLog - Add data constants * +# * +#********************************************************************* +class ShowLog(ConsoleUtilBase): + """ Show log record""" + + # ccdb utility class descr part + #------------------------------ + command = "log" + name = "ShowLog" + short_descr = "Shows log records" + uses_db = True + + def __init(self): + pass + + #---------------------------------------- + # process + #---------------------------------------- + def process(self, args): + if log.isEnabledFor(logging.DEBUG): + log.debug(LogFmt("{0}ShowLog is in charge{0}\\".format(os.linesep))) + log.debug(LogFmt(" |- arguments : '" + "' '".join(args)+"'")) + + provider = self.context.provider + assert isinstance(provider, AlchemyProvider) + + result = self.process_arguments(args) + log_records = self.filter(result) + + if len(log_records) == 0: + print("No entries matching the filter criteria.") + else: + self.print_logs(log_records) + + def process_arguments(self, args): + # utility argument parser is argparse which raises errors instead of exiting app + parser = UtilityArgumentParser() + parser.add_argument("obj_name", nargs="?", default="") + parser.add_argument("-t", "--table", default="") + parser.add_argument("-v", "--variation", default="") + parser.add_argument("-u", "--user", default="") + parser.add_argument("-d", "--date", default="") # TODO: Implement filter by date + result = parser.parse_args(args) + return result + +# ---------------------------------------- +# filtering +# ---------------------------------------- + def filter(self, result): + + query = self.context.provider.session.query(LogRecord) + + if result.user: # filter by user + user_id = self.context.provider.get_user(result.user).id + query = query.filter(LogRecord.author_id.like("{0}".format(user_id))) + + if result.table or result.obj_name: # filter by table + + user_input = result.obj_name if result.obj_name else result.table + + user_input = user_input.replace("_", "\\_").replace("*", "%").replace("?", "_") + full_table_path = self.context.prepare_path(user_input) + query = query.filter(LogRecord.description.like("%'{0}%:%:%:%".format(full_table_path))) + + if result.variation: # filter by variation + query = query.filter(LogRecord.description.like("%:%:{0}:%".format(result.variation))) + + query.order_by(desc(LogRecord.id)) + + return query.all() + + +#---------------------------------------- +# print logs +#---------------------------------------- + def print_logs(self, log_records): + + print(self.theme.Directories + "(action) (author) (date) (description)") + + for log_record in log_records: + print(" %-13s "%log_record.action +\ + " %-16s"%log_record.author.name + \ + " %-18s"%log_record.created.strftime("%Y-%m-%d %H-%M-%S ") + " " +\ + log_record.description) + + +#---------------------------------------- +# print_help +#---------------------------------------- + def print_help(self): + """Prints help of the command""" + + print("""Shows log records + log + +Example: + > log + > log 50 + """) diff --git a/python/ccdb/cmd/utils/ls.py b/python/ccdb/cmd/utils/ls.py index 327a3158..51aca306 100644 --- a/python/ccdb/cmd/utils/ls.py +++ b/python/ccdb/cmd/utils/ls.py @@ -239,9 +239,9 @@ def print_directory_tree(self, directory, printFullPath, level): #print this directory if not printFullPath: - print "".join([" " for i in range(0, level)]) + directory.name + print("".join([" " for i in range(0, level)]) + directory.name) else: - print directory.path + print(directory.path) #print subdirectories recursively sub_dirs = directory.sub_dirs @@ -251,7 +251,7 @@ def print_directory_tree(self, directory, printFullPath, level): def print_variations(self): default_variation = self.context.provider.get_variation("default") - print( self._get_variation_tree_str(default_variation) ) + print(( self._get_variation_tree_str(default_variation) )) def _get_variation_tree_str(self, variation, level=0): ret = " "*level + str(variation.name)+"\n" @@ -263,7 +263,7 @@ def print_tables(self): tables = self.context.provider.search_type_tables("*") for table in tables: assert (isinstance(table, TypeTable)) - print(table.path) + print((table.path)) def table_info(self, table, is_extended): log.info(table.path) @@ -274,7 +274,7 @@ def table_info(self, table, is_extended): def print_help(self): """Prints help of the command""" - print """ + print(""" Lists directories and tables for current directory - Accepts wildcards symbols '*', and '?' @@ -289,7 +289,7 @@ def print_help(self): -x or --dtree - draws directory tree -l or --extended - shows extended info when is used on table -""" +""") class ListTasks(object): diff --git a/python/ccdb/cmd/utils/mktbl.py b/python/ccdb/cmd/utils/mktbl.py index 1d3b2601..9c6d7dc4 100644 --- a/python/ccdb/cmd/utils/mktbl.py +++ b/python/ccdb/cmd/utils/mktbl.py @@ -120,11 +120,11 @@ def interactive_mode(self): """asks data in interactive mode""" if not self.table_path_set: - self.table_name = raw_input("Enter table name :") - self.table_parent_path = raw_input("Enter table parent path: ") + self.table_name = input("Enter table name :") + self.table_parent_path = input("Enter table parent path: ") self.table_path = posixpath.join(self.table_parent_path, self.table_name) if not self.comment_set: - self.comment = raw_input("Enter comment :") + self.comment = input("Enter comment :") #---------------------------------------------- # do_create_type - creates table @@ -135,7 +135,7 @@ def do_create_type(self): log.debug(" write table to database...") self.context.provider.create_type_table(self.table_name, self.table_parent_path, self.rows, self.columns, self.comment) - print "saving table to database... " + self.theme.Success + " completed" + self.theme.Reset + print("saving table to database... " + self.theme.Success + " completed" + self.theme.Reset) #---------------------------------------------- # process_arguments - process input arguments @@ -323,7 +323,7 @@ def analyse_file(self): def print_help(self): """prints help for MakeTable""" - print """ + print(""" MakeTable or mktbl - create type table with the specified namepath and parameters usage: @@ -387,67 +387,67 @@ def print_help(self): mktbl -nq ... 10val - creates 1 column named '10val' -f or --file Infer type table from text table file.(Hint: column names row should start with #&) - """ + """) #---------------------------------------------- # print_validation - PRINTS VALIDATION TABLE #---------------------------------------------- def print_validation(self): #basic values: name rows columns path - print + print() if not len(self.table_name): - print "Table: " + self.theme.Fail + "Name is not set" + print("Table: " + self.theme.Fail + "Name is not set") else: - print "Table: " + self.theme.Success + self.table_name + print("Table: " + self.theme.Success + self.table_name) - print "Rows num: " + repr(self.rows) + self.theme.Reset + \ - " Columns num: " + repr(len(self.columns)) - print "Full path: " + self.table_path + print("Rows num: " + repr(self.rows) + self.theme.Reset + \ + " Columns num: " + repr(len(self.columns))) + print("Full path: " + self.table_path) #columns info - print - print "Columns: " - print " (type) : (name)" + print() + print("Columns: ") + print(" (type) : (name)") for (colname, coltype) in self.columns: - print " " + self.theme.Type + "%-10s" % coltype + self.theme.Reset + ": " + colname - print + print(" " + self.theme.Type + "%-10s" % coltype + self.theme.Reset + ": " + colname) + print() #comment - print "Comment: " + print("Comment: ") if len(self.comment): - print self.comment + print(self.comment) else: - print self.theme.Fail + "Comment is empty" + print(self.theme.Fail + "Comment is empty") #additional info print - print - print "Additional info: " + print() + print("Additional info: ") if self.rows_set: - print " Rows number is set by " + self.theme.Success + "User" + print(" Rows number is set by " + self.theme.Success + "User") else: - print " Rows number is set by " + self.theme.Accent + "Default" + print(" Rows number is set by " + self.theme.Accent + "Default") if self.comment_set: - print " Comments added by " + self.theme.Success + "User" + print(" Comments added by " + self.theme.Success + "User") else: - print " No comments are set" + print(" No comments are set") def print_settings_summary(self): - print self.theme.Success + " Summary: " - print " columns: ", self.columns - print " unparsed_columns: ", self.unparsed_columns - print - print " rows : ", self.rows - print " rows_set : ", repr(self.rows_set) - print - print " interactive : ", repr(self.interactive) - print " interactive_set : ", repr(self.interactive_set) - print - print " comment : ", self.comment - print " comment_set : ", repr(self.comment_set) - print - print " table_name : ", self.table_name - print - print " table_path : ", self.table_path - print " table_path_set : ", repr(self.table_path_set) - print - print " table_parent_path : ", self.table_parent_path \ No newline at end of file + print(self.theme.Success + " Summary: ") + print(" columns: ", self.columns) + print(" unparsed_columns: ", self.unparsed_columns) + print() + print(" rows : ", self.rows) + print(" rows_set : ", repr(self.rows_set)) + print() + print(" interactive : ", repr(self.interactive)) + print(" interactive_set : ", repr(self.interactive_set)) + print() + print(" comment : ", self.comment) + print(" comment_set : ", repr(self.comment_set)) + print() + print(" table_name : ", self.table_name) + print() + print(" table_path : ", self.table_path) + print(" table_path_set : ", repr(self.table_path_set)) + print() + print(" table_parent_path : ", self.table_parent_path) \ No newline at end of file diff --git a/python/ccdb/cmd/utils/mkvar.py b/python/ccdb/cmd/utils/mkvar.py index ce0ace61..4dd1150d 100644 --- a/python/ccdb/cmd/utils/mkvar.py +++ b/python/ccdb/cmd/utils/mkvar.py @@ -1,83 +1,83 @@ -from ccdb.cmd import ConsoleUtilBase, UtilityArgumentParser -from ccdb.path_utils import validate_name -import logging - -log = logging.getLogger("ccdb.cmd.utils.mkvar") - - -#ccdbcmd module interface -def create_util_instance(): - log.debug(" registering MakeVariation") - return MakeVariation() - - -#********************************************************************* -# Class MakeVariation - Create variation * -# * -#********************************************************************* -class MakeVariation(ConsoleUtilBase): - """ Create variation """ - - # ccdb utility class descr part - #------------------------------ - command = "mkvar" - name = "MakeVariation" - short_descr = "Create variation" - uses_db = True - - - def process(self, args): - log.debug("MakeVariation module gained control") - log.debug("Arguments: \n " + " ".join(args)) - - if not len(args): - return - - #find #comment - comment = "" - for i in range(len(args)): - arg = args[i] - if arg.startswith("#"): - comment = (" ".join(args[i:]))[1:] # [1:] to remove # - args = args[:i] - break - - #utility argument parser is argparse which raises errors instead of exiting app - parser = UtilityArgumentParser() - parser.add_argument("name", default="") - parser.add_argument("-p", "--parent", default="") - result = parser.parse_args(args) - - parser.add_argument("--verbose", help="increase output verbosity", - action="store_true") - - #in case there is a space between comments and name - - if not validate_name(result.name): - raise ValueError("Invalid variation name. Only [a-z A-Z 0-9 _] symbols are allowed for variation name") - - #try to create directory - log.debug(" creating variation. Name: {0}, comment: {1}".format(result.name, comment)) - - self.context.provider.create_variation(result.name, comment, result.parent) - - log.info("Variation " + result.name + self.theme.Success + " created" + self.theme.Reset) - - #---------------------------------------------- - # print_help - prints help - #---------------------------------------------- - def print_help(self): - """prints help to user""" - - print """ -MakeVariation or mkvar - create variation with specified name - -usage: - - mkvar # - mkvar -p # - - name - is a variation name. [a-z A-Z 0-9 _] are allowed symbols - comments - are comments... don't forget space before # - -p or --parent - name of parent variation. If no name provided, "default" variation is the parent - """ +from ccdb.cmd import ConsoleUtilBase, UtilityArgumentParser +from ccdb.path_utils import validate_name +import logging + +log = logging.getLogger("ccdb.cmd.utils.mkvar") + + +#ccdbcmd module interface +def create_util_instance(): + log.debug(" registering MakeVariation") + return MakeVariation() + + +#********************************************************************* +# Class MakeVariation - Create variation * +# * +#********************************************************************* +class MakeVariation(ConsoleUtilBase): + """ Create variation """ + + # ccdb utility class descr part + #------------------------------ + command = "mkvar" + name = "MakeVariation" + short_descr = "Create variation" + uses_db = True + + + def process(self, args): + log.debug("MakeVariation module gained control") + log.debug("Arguments: \n " + " ".join(args)) + + if not len(args): + return + + #find #comment + comment = "" + for i in range(len(args)): + arg = args[i] + if arg.startswith("#"): + comment = (" ".join(args[i:]))[1:] # [1:] to remove # + args = args[:i] + break + + #utility argument parser is argparse which raises errors instead of exiting app + parser = UtilityArgumentParser() + parser.add_argument("name", default="") + parser.add_argument("-p", "--parent", default="") + result = parser.parse_args(args) + + parser.add_argument("--verbose", help="increase output verbosity", + action="store_true") + + #in case there is a space between comments and name + + if not validate_name(result.name): + raise ValueError("Invalid variation name. Only [a-z A-Z 0-9 _] symbols are allowed for variation name") + + #try to create directory + log.debug(" creating variation. Name: {0}, comment: {1}".format(result.name, comment)) + + self.context.provider.create_variation(result.name, comment, result.parent) + + log.info("Variation " + result.name + self.theme.Success + " created" + self.theme.Reset) + + #---------------------------------------------- + # print_help - prints help + #---------------------------------------------- + def print_help(self): + """prints help to user""" + + print(""" +MakeVariation or mkvar - create variation with specified name + +usage: + + mkvar # + mkvar -p # + + name - is a variation name. [a-z A-Z 0-9 _] are allowed symbols + comments - are comments... don't forget space before # + -p or --parent - name of parent variation. If no name provided, "default" variation is the parent + """) diff --git a/python/ccdb/cmd/utils/pwd.py b/python/ccdb/cmd/utils/pwd.py index 48ae6035..d9aa7c7e 100644 --- a/python/ccdb/cmd/utils/pwd.py +++ b/python/ccdb/cmd/utils/pwd.py @@ -1,34 +1,34 @@ -from ccdb.cmd import ConsoleUtilBase - -import logging -log = logging.getLogger("ccdb.cmd.utils.pwd") - - -#ccdbcmd module interface -def create_util_instance(): - log.debug(" registering PrintWorkDir") - return PrintWorkDir() - - -#********************************************************************* -# Class PrintWorkDir - Prints working directory * -# * -#********************************************************************* -class PrintWorkDir(ConsoleUtilBase): - """ Prints working directory """ - - # ccdb utility class descr part - #------------------------------ - command = "pwd" - name = "PrintWorkDir" - short_descr = "Prints working directory" - - def print_help(self): - print """ Prints working directory """ - - - def process(self, args): - log.debug(" PrintWorkDir is gained a control over the process.") - log.debug(" ".join(args)) - assert self.context is not None - print self.context.current_path \ No newline at end of file +from ccdb.cmd import ConsoleUtilBase + +import logging +log = logging.getLogger("ccdb.cmd.utils.pwd") + + +#ccdbcmd module interface +def create_util_instance(): + log.debug(" registering PrintWorkDir") + return PrintWorkDir() + + +#********************************************************************* +# Class PrintWorkDir - Prints working directory * +# * +#********************************************************************* +class PrintWorkDir(ConsoleUtilBase): + """ Prints working directory """ + + # ccdb utility class descr part + #------------------------------ + command = "pwd" + name = "PrintWorkDir" + short_descr = "Prints working directory" + + def print_help(self): + print(""" Prints working directory """) + + + def process(self, args): + log.debug(" PrintWorkDir is gained a control over the process.") + log.debug(" ".join(args)) + assert self.context is not None + print(self.context.current_path) \ No newline at end of file diff --git a/python/ccdb/cmd/utils/rm.py b/python/ccdb/cmd/utils/rm.py index d478f41f..0a19a10d 100644 --- a/python/ccdb/cmd/utils/rm.py +++ b/python/ccdb/cmd/utils/rm.py @@ -1,182 +1,182 @@ -import logging -import os -from ccdb import AlchemyProvider -from ccdb.cmd import ConsoleUtilBase -from sqlalchemy.orm.exc import NoResultFound - -log = logging.getLogger("ccdb.cmd.utils.rm") - -#ccdbcmd module interface -def create_util_instance(): - log.debug(" registering Remove") - return Remove() - - -#********************************************************************* -# Class Info - Prints extended information of object by the path * -# * -#********************************************************************* -class Remove(ConsoleUtilBase): - """ Removes directory or type table """ - - # ccdb utility class descr part - #------------------------------ - command = "rm" - name = "Remove" - short_descr = "Removes directory or type table" - uses_db = True - - #variables for each process - - raw_entry = "/" #object path with possible pattern, like /mole/* - path = "/" #parent path - -#---------------------------------------- -# process -#---------------------------------------- - def process(self, args): - log.debug("{0}Remove is gained a control{0}\\".format(os.linesep)) - log.debug(" |- arguments: " + " ".join(["'"+arg+"'" for arg in args])) - - assert self.context is not None - provider = self.context.provider - isinstance(provider, AlchemyProvider) - - #process arguments - self.raw_entry = "" - self.object_type = "type_table" - self.ask_confirm = True - self.process_arguments(args) - log.debug(" |- object_type: '{0}'".format(self.object_type)) - log.debug(" |- ask confirm: '{0}'".format(self.ask_confirm)) - log.debug(" |- raw entry: '{0}'".format(self.raw_entry)) - - #correct ending / - self.path = self.context.prepare_path(self.raw_entry) - - if not self.raw_entry: - log.warning("No path is given. Use 'help info' or 'usage info' for getting help.") - - self.print_warning() - - #ask confirmation - if self.ask_confirm: - result = raw_input("To confirm delete type 'yes': ") - if result != 'yes': - return 0 - - #it is a type table - if self.object_type == "type_table": - - try: - self.type_table = provider.get_type_table(self.path) - provider.delete_type_table(self.type_table) - except NoResultFound: - log.warning("No type table with this path: '{0}'".format(self.path)) - raise - - #it is a directory - if self.object_type == "directory": - try: - parent_dir = provider.get_directory(self.path) - provider.delete_directory(parent_dir) - except KeyError: - log.warning("No directory with this path: '{0}'".format(self.path)) - raise - - #it is a variation - if self.object_type == "variation": - try: - variation = provider.get_variation(self.raw_entry) - provider.delete_variation(variation) - except NoResultFound: - log.warning("Unable to delete variation '{0}'".format(self.raw_entry)) - raise - - if self.object_type == "assignment": - assignment = provider.get_assignment_by_id(int(self.raw_entry)) - provider.delete_assignment(assignment) - #TODO use request instead of id - - #everything is fine! - return 0 - -#---------------------------------------- -# process_arguments -#---------------------------------------- - def process_arguments(self, args): - - #parse loop - i=0 - while i < len(args): - token = args[i].strip() - i+=1 - if token.startswith('-'): - #it is some command, lets parse what is the command - - #variation - if token == "-v" or token.startswith("--variation"): - if i - removes type table with given path - rm -d - removes directory with given path - rm -v - removes variation with given name - rm -a - Assignment db id from 'vers' command* - - Flags: - - -f or --force - removes object without question - - """ +import logging +import os +from ccdb import AlchemyProvider +from ccdb.cmd import ConsoleUtilBase +from sqlalchemy.orm.exc import NoResultFound + +log = logging.getLogger("ccdb.cmd.utils.rm") + +#ccdbcmd module interface +def create_util_instance(): + log.debug(" registering Remove") + return Remove() + + +#********************************************************************* +# Class Info - Prints extended information of object by the path * +# * +#********************************************************************* +class Remove(ConsoleUtilBase): + """ Removes directory or type table """ + + # ccdb utility class descr part + #------------------------------ + command = "rm" + name = "Remove" + short_descr = "Removes directory or type table" + uses_db = True + + #variables for each process + + raw_entry = "/" #object path with possible pattern, like /mole/* + path = "/" #parent path + +#---------------------------------------- +# process +#---------------------------------------- + def process(self, args): + log.debug("{0}Remove is gained a control{0}\\".format(os.linesep)) + log.debug(" |- arguments: " + " ".join(["'"+arg+"'" for arg in args])) + + assert self.context is not None + provider = self.context.provider + isinstance(provider, AlchemyProvider) + + #process arguments + self.raw_entry = "" + self.object_type = "type_table" + self.ask_confirm = True + self.process_arguments(args) + log.debug(" |- object_type: '{0}'".format(self.object_type)) + log.debug(" |- ask confirm: '{0}'".format(self.ask_confirm)) + log.debug(" |- raw entry: '{0}'".format(self.raw_entry)) + + #correct ending / + self.path = self.context.prepare_path(self.raw_entry) + + if not self.raw_entry: + log.warning("No path is given. Use 'help info' or 'usage info' for getting help.") + + self.print_warning() + + #ask confirmation + if self.ask_confirm: + result = input("To confirm delete type 'yes': ") + if result != 'yes': + return 0 + + #it is a type table + if self.object_type == "type_table": + + try: + self.type_table = provider.get_type_table(self.path) + provider.delete_type_table(self.type_table) + except NoResultFound: + log.warning("No type table with this path: '{0}'".format(self.path)) + raise + + #it is a directory + if self.object_type == "directory": + try: + parent_dir = provider.get_directory(self.path) + provider.delete_directory(parent_dir) + except KeyError: + log.warning("No directory with this path: '{0}'".format(self.path)) + raise + + #it is a variation + if self.object_type == "variation": + try: + variation = provider.get_variation(self.raw_entry) + provider.delete_variation(variation) + except NoResultFound: + log.warning("Unable to delete variation '{0}'".format(self.raw_entry)) + raise + + if self.object_type == "assignment": + assignment = provider.get_assignment_by_id(int(self.raw_entry)) + provider.delete_assignment(assignment) + #TODO use request instead of id + + #everything is fine! + return 0 + +#---------------------------------------- +# process_arguments +#---------------------------------------- + def process_arguments(self, args): + + #parse loop + i=0 + while i < len(args): + token = args[i].strip() + i+=1 + if token.startswith('-'): + #it is some command, lets parse what is the command + + #variation + if token == "-v" or token.startswith("--variation"): + if i - removes type table with given path + rm -d - removes directory with given path + rm -v - removes variation with given name + rm -a - Assignment db id from 'vers' command* + + Flags: + + -f or --force - removes object without question + + """) diff --git a/python/ccdb/cmd/utils/run.py b/python/ccdb/cmd/utils/run.py index 858dde01..a80a1cae 100644 --- a/python/ccdb/cmd/utils/run.py +++ b/python/ccdb/cmd/utils/run.py @@ -1,66 +1,66 @@ -import logging -from ccdb.cmd import ConsoleUtilBase -from ccdb.brace_log_message import BraceMessage as Lfm - -log = logging.getLogger("ccdb.cmd.utils.run") - -#ccdbcmd module interface -def create_util_instance(): - log.debug(" registering CurrentRun") - return CurrentRun() - - -#********************************************************************* -# Class CurrentRun - gets or sets current working run * -# * -#********************************************************************* -class CurrentRun(ConsoleUtilBase): - """ gets or sets current working run """ - - # ccdb utility class descr part - #------------------------------ - command = "run" - name = "CurrentRun" - short_descr = "gets or sets current working run" - - def print_help(self): - print """ gets or sets current working run - run (with no arguments) will display current working run - run will set current working run to this number - - -Current working run is the run that is used no run specified explicitly. -Example: - >> run 1000 #set run 1000 as current working run - >> cat some_constants #prints constants for run 1000 - >> cat some_constants:500 #prints values for run 500 - -Current working run can be set on ccdb start by '-r' flag. -Example: - > ccdb -r 1000 cat /test/some_constants - -If no run specified at ccdb start, current working run is 0 - """ - # ---- end of print_help() ---- - - - def process(self, args): - log.debug("CurrentRun is gained a control over the process.") - log.debug(" " + " ".join(args)) - - assert self.context is not None - - if len(args): - #set working run? - try: - self.context.current_run = int(args[0]) - log.info("Working run is %i", self.context.current_run) - except ValueError: - log.warning("cannot read run number") - return 1 - else: - #get working run - print self.context.current_run - - # all is fine +import logging +from ccdb.cmd import ConsoleUtilBase +from ccdb.brace_log_message import BraceMessage as Lfm + +log = logging.getLogger("ccdb.cmd.utils.run") + +#ccdbcmd module interface +def create_util_instance(): + log.debug(" registering CurrentRun") + return CurrentRun() + + +#********************************************************************* +# Class CurrentRun - gets or sets current working run * +# * +#********************************************************************* +class CurrentRun(ConsoleUtilBase): + """ gets or sets current working run """ + + # ccdb utility class descr part + #------------------------------ + command = "run" + name = "CurrentRun" + short_descr = "gets or sets current working run" + + def print_help(self): + print(""" gets or sets current working run + run (with no arguments) will display current working run + run will set current working run to this number + + +Current working run is the run that is used no run specified explicitly. +Example: + >> run 1000 #set run 1000 as current working run + >> cat some_constants #prints constants for run 1000 + >> cat some_constants:500 #prints values for run 500 + +Current working run can be set on ccdb start by '-r' flag. +Example: + > ccdb -r 1000 cat /test/some_constants + +If no run specified at ccdb start, current working run is 0 + """) + # ---- end of print_help() ---- + + + def process(self, args): + log.debug("CurrentRun is gained a control over the process.") + log.debug(" " + " ".join(args)) + + assert self.context is not None + + if len(args): + #set working run? + try: + self.context.current_run = int(args[0]) + log.info("Working run is %i", self.context.current_run) + except ValueError: + log.warning("cannot read run number") + return 1 + else: + #get working run + print(self.context.current_run) + + # all is fine return 0 \ No newline at end of file diff --git a/python/ccdb/cmd/utils/usage.py b/python/ccdb/cmd/utils/usage.py index 94a61720..eab38fbb 100644 --- a/python/ccdb/cmd/utils/usage.py +++ b/python/ccdb/cmd/utils/usage.py @@ -1,33 +1,33 @@ -from ccdb.cmd import ConsoleUtilBase -import logging - -log = logging.getLogger("ccdb.cmd.utils.usage") - -#ccdbcmd module interface -def create_util_instance(): - log.debug(" registering Usage") - return Usage() - - -#********************************************************************* -# Class Usage - Prints usage for each util * -# * -#********************************************************************* -class Usage(ConsoleUtilBase): - """ Prints usage for each util """ - - # ccdb utility class descr part - #------------------------------ - command = "usage" - name = "Usage" - short_descr = "Prints usage for each util" - help_util = True - - def print_help(self): - print """ Prints the usage of the command """ - # ---- end of print_help() ---- - - - def process(self, args): - log.debug(" Usage is gained a control over the process") +from ccdb.cmd import ConsoleUtilBase +import logging + +log = logging.getLogger("ccdb.cmd.utils.usage") + +#ccdbcmd module interface +def create_util_instance(): + log.debug(" registering Usage") + return Usage() + + +#********************************************************************* +# Class Usage - Prints usage for each util * +# * +#********************************************************************* +class Usage(ConsoleUtilBase): + """ Prints usage for each util """ + + # ccdb utility class descr part + #------------------------------ + command = "usage" + name = "Usage" + short_descr = "Prints usage for each util" + help_util = True + + def print_help(self): + print(""" Prints the usage of the command """) + # ---- end of print_help() ---- + + + def process(self, args): + log.debug(" Usage is gained a control over the process") log.debug(" ".join(args)) \ No newline at end of file diff --git a/python/ccdb/cmd/utils/user.py b/python/ccdb/cmd/utils/user.py index d9b63a4a..a23d8757 100644 --- a/python/ccdb/cmd/utils/user.py +++ b/python/ccdb/cmd/utils/user.py @@ -29,7 +29,7 @@ def process(self, args): if not len(args): # print current user - print (self.context.provider.authentication.current_user_name) + print((self.context.provider.authentication.current_user_name)) return @@ -42,14 +42,14 @@ def process(self, args): if result.list: users = self.context.provider.get_users() for user in users: - print(user.name) + print((user.name)) return if result.create: if not validate_name(result.create): raise ValueError("Invalid user name. Only [a-z A-Z 0-9 _] symbols are allowed for user name") self.context.provider.create_user(result.create) - print("{0} was created.".format(result.create)) + print(("{0} was created.".format(result.create))) return #---------------------------------------------- @@ -58,7 +58,7 @@ def process(self, args): def print_help(self): """prints help to user""" - print """ + print(""" User or user - manages users usage: @@ -68,4 +68,4 @@ def print_help(self): user - prints the current user of the database. name - is the username of the new user. [a-z A-Z 0-9 _] are allowed symbols - """ + """) diff --git a/python/ccdb/cmd/utils/var.py b/python/ccdb/cmd/utils/var.py index 7a00188a..6cb29f4c 100644 --- a/python/ccdb/cmd/utils/var.py +++ b/python/ccdb/cmd/utils/var.py @@ -1,68 +1,68 @@ -import logging -import os - -from ccdb.cmd import ConsoleUtilBase -from ccdb.path_utils import validate_name -from ccdb.brace_log_message import BraceMessage as Lfm - - -log = logging.getLogger("ccdb.cmd.utils.var") - - -#ccdbcmd module interface -def create_util_instance(): - log.debug(" registering CurrentVariation") - return CurrentVariation() - - -#********************************************************************* -# Class CurrentVariation - gets or sets current working variation * -# * -#********************************************************************* -class CurrentVariation(ConsoleUtilBase): - """ gets or sets current working run """ - - # ccdb utility class descr part - #------------------------------ - command = "var" - name = "CurrentVariation" - short_descr = "gets or sets current working variation" - - def print_help(self): - print """Gets or sets current working variation - var (no arguments) displays current variation - var sets current variation - -Current working variation is the variation that is used by default if no variation specified. -Example: - >> var smith #set 'smith' variation as current working variation - >> cat some_constants #prints values for latest constants of smith variation - >> cat some_constants::john #prints values for latest constants of john variation - -Current working variation can be set on ccdb start by '-v' flag. - Example: - \\> ccdb -v smith cat /test/some_constants #set variation at ccdb start - -If no variation specified at ccdb start, current working variation is 'default' - """ - # ---- end of print_help() ---- - - - def process(self, args): - if log.isEnabledFor(logging.DEBUG): - log.debug(Lfm("{0}CurrentVariation is in charge{0}\\".format(os.linesep))) - log.debug(Lfm(" |- arguments : '" + "' '".join(args)+"'")) - - assert self.context is not None - - if len(args): - if not validate_name(args[0]): - raise ValueError("Error. Variation name should consist of A-Z, a-z, 0-9, _") - self.context.current_variation = args[0] - log.info(Lfm("Working variation is set to '{}'", self.context.current_variation)) - else: - #get working run - print self.context.current_variation - - # all is fine +import logging +import os + +from ccdb.cmd import ConsoleUtilBase +from ccdb.path_utils import validate_name +from ccdb.brace_log_message import BraceMessage as Lfm + + +log = logging.getLogger("ccdb.cmd.utils.var") + + +#ccdbcmd module interface +def create_util_instance(): + log.debug(" registering CurrentVariation") + return CurrentVariation() + + +#********************************************************************* +# Class CurrentVariation - gets or sets current working variation * +# * +#********************************************************************* +class CurrentVariation(ConsoleUtilBase): + """ gets or sets current working run """ + + # ccdb utility class descr part + #------------------------------ + command = "var" + name = "CurrentVariation" + short_descr = "gets or sets current working variation" + + def print_help(self): + print("""Gets or sets current working variation + var (no arguments) displays current variation + var sets current variation + +Current working variation is the variation that is used by default if no variation specified. +Example: + >> var smith #set 'smith' variation as current working variation + >> cat some_constants #prints values for latest constants of smith variation + >> cat some_constants::john #prints values for latest constants of john variation + +Current working variation can be set on ccdb start by '-v' flag. + Example: + \\> ccdb -v smith cat /test/some_constants #set variation at ccdb start + +If no variation specified at ccdb start, current working variation is 'default' + """) + # ---- end of print_help() ---- + + + def process(self, args): + if log.isEnabledFor(logging.DEBUG): + log.debug(Lfm("{0}CurrentVariation is in charge{0}\\".format(os.linesep))) + log.debug(Lfm(" |- arguments : '" + "' '".join(args)+"'")) + + assert self.context is not None + + if len(args): + if not validate_name(args[0]): + raise ValueError("Error. Variation name should consist of A-Z, a-z, 0-9, _") + self.context.current_variation = args[0] + log.info(Lfm("Working variation is set to '{}'", self.context.current_variation)) + else: + #get working run + print(self.context.current_variation) + + # all is fine return 0 \ No newline at end of file diff --git a/python/ccdb/cmd/utils/vers.py b/python/ccdb/cmd/utils/vers.py index 88b7da41..30e75b31 100644 --- a/python/ccdb/cmd/utils/vers.py +++ b/python/ccdb/cmd/utils/vers.py @@ -1,117 +1,117 @@ -import logging -import os - - -from ccdb import AlchemyProvider -import ccdb -from ccdb.cmd import ConsoleUtilBase, UtilityArgumentParser -from ccdb.brace_log_message import BraceMessage as LogFmt - - -log = logging.getLogger("ccdb.cmd.utils.vers") - - -# ccdbcmd module interface -def create_util_instance(): - log.debug(" registering Versions") - return Versions() - - -#********************************************************************* -# Class Versions - Show versions of data for specified type table * -# * -#********************************************************************* -class Versions(ConsoleUtilBase): - """" Show versions of data. Assignments in terms of CCDB """ - - # ccdb utility class descr part - # ------------------------------ - command = "vers" - name = "Versions" - short_descr = "Show versions of data for specified type table" - uses_db = True - - # - - - - - - - - - - - - - - - - - - - - - - def process(self, args): - """Main function that do the job""" - - if log.isEnabledFor(logging.DEBUG): - log.debug(LogFmt("{0}Versions is in charge{0}\\".format(os.linesep))) - log.debug(LogFmt(" |- arguments : '" + "' '".join(args)+"'")) - - # preparations - assert self.context is not None - provider = self.context.provider - isinstance(provider, AlchemyProvider) - - # process arguments - (raw_table_path, variation, run) = self._process_arguments(args) - - if not raw_table_path: - log.info("Table name (path) is required. See 'help vers'") - return 0 # OK return - - # correct path - table_path = self.context.prepare_path(raw_table_path) - - # get and print assignments - assignments = provider.get_assignments(table_path, run, variation) - self.print_assignments(assignments, variation, run) - return 0 - - # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @staticmethod - def _process_arguments(args): - parser = UtilityArgumentParser() - parser.add_argument("raw_path", nargs='?', default="") - parser.add_argument("-v", "--variation", default="") - parser.add_argument("-r", "--run", type=int, default=-1) - - # parse - result = parser.parse_args(args) - - return result.raw_path, result.variation, result.run - - # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - def print_assignments(self, assignments, variation, run): - - # if user specifies run, show it! - if run != -1: - print("For run: {}".format(run)) - - # if user specified variation, show it! - if variation: - print("For variation: {}".format(variation)) - - # table header... and table =) - print self.theme.Directories + "(ID) (Created) (Modified) (variation) (run range) (comments)" - for asgmnt in assignments: - assert isinstance(asgmnt, ccdb.Assignment) - max_str = repr(asgmnt.run_range.max) - if asgmnt.run_range.max == ccdb.INFINITE_RUN: - max_str="inf" - print " %-5i "%asgmnt.id +\ - " %-20s"%asgmnt.created.strftime("%Y-%m-%d %H-%M-%S ") +\ - " %-20s"%asgmnt.modified.strftime("%Y-%m-%d %H-%M-%S ") + " " +\ - " %-14s "%asgmnt.variation.name +\ - " %-15s "%(repr(asgmnt.run_range.min) + "-" + max_str) +\ - asgmnt.comment[0:20].replace("\n", " ") - - # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - def print_help(self): - """Prints help of the command""" - - print """Show versions of data for specified type table - -Flags: - -v or --variation - filters output by variation - -r or --run - filters output by run - -Remark: Current working variation which is set by 'var' command or '-v' ccdb flag -is not propagated to 'vers' command. - -Example: - >> vers /test/test_vars/test_table #get all data versions - >> cd /test/test_vars #navigate to directory - >> vers -v default test_table #shows only data versions in default variation - """ \ No newline at end of file +import logging +import os + + +from ccdb import AlchemyProvider +import ccdb +from ccdb.cmd import ConsoleUtilBase, UtilityArgumentParser +from ccdb.brace_log_message import BraceMessage as LogFmt + + +log = logging.getLogger("ccdb.cmd.utils.vers") + + +# ccdbcmd module interface +def create_util_instance(): + log.debug(" registering Versions") + return Versions() + + +#********************************************************************* +# Class Versions - Show versions of data for specified type table * +# * +#********************************************************************* +class Versions(ConsoleUtilBase): + """" Show versions of data. Assignments in terms of CCDB """ + + # ccdb utility class descr part + # ------------------------------ + command = "vers" + name = "Versions" + short_descr = "Show versions of data for specified type table" + uses_db = True + + # - - - - - - - - - - - - - - - - - - - - - + def process(self, args): + """Main function that do the job""" + + if log.isEnabledFor(logging.DEBUG): + log.debug(LogFmt("{0}Versions is in charge{0}\\".format(os.linesep))) + log.debug(LogFmt(" |- arguments : '" + "' '".join(args)+"'")) + + # preparations + assert self.context is not None + provider = self.context.provider + isinstance(provider, AlchemyProvider) + + # process arguments + (raw_table_path, variation, run) = self._process_arguments(args) + + if not raw_table_path: + log.info("Table name (path) is required. See 'help vers'") + return 0 # OK return + + # correct path + table_path = self.context.prepare_path(raw_table_path) + + # get and print assignments + assignments = provider.get_assignments(table_path, run, variation) + self.print_assignments(assignments, variation, run) + return 0 + + # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + @staticmethod + def _process_arguments(args): + parser = UtilityArgumentParser() + parser.add_argument("raw_path", nargs='?', default="") + parser.add_argument("-v", "--variation", default="") + parser.add_argument("-r", "--run", type=int, default=-1) + + # parse + result = parser.parse_args(args) + + return result.raw_path, result.variation, result.run + + # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + def print_assignments(self, assignments, variation, run): + + # if user specifies run, show it! + if run != -1: + print(("For run: {}".format(run))) + + # if user specified variation, show it! + if variation: + print(("For variation: {}".format(variation))) + + # table header... and table =) + print(self.theme.Directories + "(ID) (Created) (Modified) (variation) (run range) (comments)") + for asgmnt in assignments: + assert isinstance(asgmnt, ccdb.Assignment) + max_str = repr(asgmnt.run_range.max) + if asgmnt.run_range.max == ccdb.INFINITE_RUN: + max_str="inf" + print(" %-5i "%asgmnt.id +\ + " %-20s"%asgmnt.created.strftime("%Y-%m-%d %H-%M-%S ") +\ + " %-20s"%asgmnt.modified.strftime("%Y-%m-%d %H-%M-%S ") + " " +\ + " %-14s "%asgmnt.variation.name +\ + " %-15s "%(repr(asgmnt.run_range.min) + "-" + max_str) +\ + asgmnt.comment[0:20].replace("\n", " ")) + + # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + def print_help(self): + """Prints help of the command""" + + print("""Show versions of data for specified type table + +Flags: + -v or --variation - filters output by variation + -r or --run - filters output by run + +Remark: Current working variation which is set by 'var' command or '-v' ccdb flag +is not propagated to 'vers' command. + +Example: + >> vers /test/test_vars/test_table #get all data versions + >> cd /test/test_vars #navigate to directory + >> vers -v default test_table #shows only data versions in default variation + """) \ No newline at end of file diff --git a/python/ccdb/model.py b/python/ccdb/model.py index 922a6adc..91b42f87 100644 --- a/python/ccdb/model.py +++ b/python/ccdb/model.py @@ -1,568 +1,568 @@ -from __future__ import print_function - -import collections -import datetime -import posixpath - -from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy.schema import Column, ForeignKey -from sqlalchemy.types import Integer, String, Text, DateTime, Enum, Boolean -from sqlalchemy.orm import reconstructor, relation -from sqlalchemy.orm import relationship, backref - -Base = declarative_base() - -#This thing separates cells in data blob -blob_delimiter = "|" - -# if cell of data table is a string and the string already contains blob_delimiter -# we have to encode blob_delimiter to blob_delimiter_replace on data write and decode it bach on data read -blob_delimiter_replacement = "&delimiter;" - - -#-------------------------------------------- -# class CcdbSchemaVersion -#-------------------------------------------- -class CcdbSchemaVersion(Base): - """ - Represents CCDB directory object. - Directories may contain other directories or TypeTable objects - """ - __tablename__ = 'schemaVersions' - id = Column(Integer, primary_key=True) - version = Column("schemaVersion", Integer) - - def __repr__(self): - return "".format(self.id, self.version) - - -# -------------------------------------------- -# class Directory -# -------------------------------------------- -class Directory(Base): - """ - Represents CCDB directory object. - Directories may contain other directories or TypeTable objects - """ - __tablename__ = 'directories' - id = Column(Integer, primary_key=True) - name = Column(String(255)) - comment = Column(Text) - created = Column(DateTime, default=datetime.datetime.now) - modified = Column(DateTime, default=datetime.datetime.now, onupdate=datetime.datetime.now) - parent_id = Column('parentId', Integer) - author_id = Column('authorId', Integer, default=1) - - def __init__(self): - self.path = "" - self.parent_dir = None - self.sub_dirs = [] - - @reconstructor - def on_load_init(self): - self.path = "" - self.parent_dir = None - self.sub_dirs = [] - - def __repr__(self): - return "".format(self.id, self.name) - - -# -------------------------------------------- -# class TypeTable -# -------------------------------------------- -class TypeTable(Base): - __tablename__ = 'typeTables' - id = Column(Integer, primary_key=True) - name = Column(String(255)) - comment = Column(Text) - created = Column(DateTime, default=datetime.datetime.now) - modified = Column(DateTime, default=datetime.datetime.now, onupdate=datetime.datetime.now) - parent_dir_id = Column('directoryId', Integer, ForeignKey('directories.id')) - parent_dir = relationship("Directory", backref=backref('type_tables', order_by=id)) - constant_sets = relationship("ConstantSet", backref=backref('type_table')) - columns = relationship("TypeTableColumn", - order_by="TypeTableColumn.order", - cascade="all, delete, delete-orphan", - backref=backref("type_table")) - rows_count = Column('nRows', Integer) - _columns_count = Column('nColumns', Integer) - author_id = Column('authorId', Integer, default=1) - - @property - def columns_count(self): - """ - :return: Number of columns of the table - :rtype: int - """ - return self._columns_count - - @property - def path(self): - """ - :return: full path of the table - :rtype: str - """ - return posixpath.join(self.parent_dir.path, self.name) - - def __repr__(self): - return "".format(self.id, self.name) - - -# -------------------------------------------- -# class TypeTableColumn -# -------------------------------------------- -class TypeTableColumn(Base): - __tablename__ = 'columns' - id = Column(Integer, primary_key=True) - name = Column(String(255)) - comment = Column(Text) - created = Column(DateTime, default=datetime.datetime.now) - modified = Column(DateTime, default=datetime.datetime.now, onupdate=datetime.datetime.now) - order = Column(Integer) - type = Column('columnType', Enum('int', 'uint', 'long', 'ulong', 'double', 'string', 'bool')) - type_table_id = Column('typeId', Integer, ForeignKey('typeTables.id')) - - @property - def path(self): - return posixpath.join(self.parent_dir.path, self.name) - - def __repr__(self): - return "".format(self.name) - - -# -------------------------------------------- -# class ConstantSet -# -------------------------------------------- -class ConstantSet(Base): - __tablename__ = 'constantSets' - id = Column(Integer, primary_key=True) - _vault = Column('vault', Text) - created = Column(DateTime, default=datetime.datetime.now) - modified = Column(DateTime, default=datetime.datetime.now, onupdate=datetime.datetime.now) - assignment = relationship("Assignment", uselist=False, back_populates="constant_set") - type_table_id = Column('constantTypeId', Integer, ForeignKey('typeTables.id')) - - @property - def vault(self): - """ - Text-blob with data as it is presented in database - :return: string with text-blob from db - :rtype: string - """ - return self._vault - - @property - def data_list(self): - return blob_to_list(self._vault) - - @data_list.setter - def data_list(self, data_list): - self._vault = list_to_blob(data_list) - - @property - def data_table(self): - return list_to_table(self.data_list, self.type_table.columns_count) - - @data_table.setter - def data_table(self, data): - self.data_list = list(gen_flatten_data(data)) - - def __repr__(self): - return "".format(self.id) - - -# -------------------------------------------- -# class Assignment -# -------------------------------------------- -class Assignment(Base): - __tablename__ = 'assignments' - - id = Column(Integer, primary_key=True) - created = Column(DateTime, default=datetime.datetime.now) - modified = Column(DateTime, default=datetime.datetime.now, onupdate=datetime.datetime.now) - constant_set_id = Column('constantSetId', Integer, ForeignKey('constantSets.id')) - constant_set = relationship("ConstantSet", - uselist=False, - back_populates="assignment", - cascade="all, delete, delete-orphan", - single_parent=True) - run_range_id = Column('runRangeId', Integer, ForeignKey('runRanges.id')) - run_range = relationship("RunRange", backref=backref('assignments')) - variation_id = Column('variationId', Integer, ForeignKey('variations.id')) - variation = relationship("Variation", backref=backref('assignments')) - _comment = Column('comment', Text) - author_id = Column('authorId', Integer, ForeignKey('users.id'), default=1) - author = relationship("User", uselist=False) - - @property - def comment(self): - """ - returns comment for the object - :rtype: basestring - """ - return self._comment if self._comment is not None else "" - - @comment.setter - def comment(self, value): - self._comment = value - - @property - def request(self): - """ - Gets the unique "request" string in form of :::
%(iq)s[^%(fq)s]+%(fq)s' - r'(?:\.%(iq)s[^%(fq)s]+%(fq)s)?) +' - r'\((?P[^\)]+?)\)' - r'(?: +(?PMATCH \w+))?' - r'(?: +ON DELETE (?P%(on)s))?' - r'(?: +ON UPDATE (?P%(on)s))?' - % kw - ) - - # PARTITION - # - # punt! - self._re_partition = _re_compile(r'(?:.*)(?:SUB)?PARTITION(?:.*)') - - # Table-level options (COLLATE, ENGINE, etc.) - # Do the string options first, since they have quoted - # strings we need to get rid of. - for option in _options_of_type_string: - self._add_option_string(option) - - for option in ('ENGINE', 'TYPE', 'AUTO_INCREMENT', - 'AVG_ROW_LENGTH', 'CHARACTER SET', - 'DEFAULT CHARSET', 'CHECKSUM', - 'COLLATE', 'DELAY_KEY_WRITE', 'INSERT_METHOD', - 'MAX_ROWS', 'MIN_ROWS', 'PACK_KEYS', 'ROW_FORMAT', - 'KEY_BLOCK_SIZE'): - self._add_option_word(option) - - self._add_option_regex('UNION', r'\([^\)]+\)') - self._add_option_regex('TABLESPACE', r'.*? STORAGE DISK') - self._add_option_regex( - 'RAID_TYPE', - r'\w+\s+RAID_CHUNKS\s*\=\s*\w+RAID_CHUNKSIZE\s*=\s*\w+') - - _optional_equals = r'(?:\s*(?:=\s*)|\s+)' - - def _add_option_string(self, directive): - regex = (r'(?P%s)%s' - r"'(?P(?:[^']|'')*?)'(?!')" % - (re.escape(directive), self._optional_equals)) - self._pr_options.append(_pr_compile( - regex, lambda v: v.replace("\\\\", "\\").replace("''", "'") - )) - - def _add_option_word(self, directive): - regex = (r'(?P%s)%s' - r'(?P\w+)' % - (re.escape(directive), self._optional_equals)) - self._pr_options.append(_pr_compile(regex)) - - def _add_option_regex(self, directive, regex): - regex = (r'(?P%s)%s' - r'(?P%s)' % - (re.escape(directive), self._optional_equals, regex)) - self._pr_options.append(_pr_compile(regex)) - -_options_of_type_string = ('COMMENT', 'DATA DIRECTORY', 'INDEX DIRECTORY', - 'PASSWORD', 'CONNECTION') - - -class _DecodingRowProxy(object): - """Return unicode-decoded values based on type inspection. - - Smooth over data type issues (esp. with alpha driver versions) and - normalize strings as Unicode regardless of user-configured driver - encoding settings. - - """ - - # Some MySQL-python versions can return some columns as - # sets.Set(['value']) (seriously) but thankfully that doesn't - # seem to come up in DDL queries. - - _encoding_compat = { - 'koi8r': 'koi8_r', - 'koi8u': 'koi8_u', - 'utf16': 'utf-16-be', # MySQL's uft16 is always bigendian - 'utf8mb4': 'utf8', # real utf8 - 'eucjpms': 'ujis', - } - - def __init__(self, rowproxy, charset): - self.rowproxy = rowproxy - self.charset = self._encoding_compat.get(charset, charset) - - def __getitem__(self, index): - item = self.rowproxy[index] - if isinstance(item, _array): - item = item.tostring() - - if self.charset and isinstance(item, util.binary_type): - return item.decode(self.charset) - else: - return item - - def __getattr__(self, attr): - item = getattr(self.rowproxy, attr) - if isinstance(item, _array): - item = item.tostring() - if self.charset and isinstance(item, util.binary_type): - return item.decode(self.charset) - else: - return item - - -def _pr_compile(regex, cleanup=None): - """Prepare a 2-tuple of compiled regex and callable.""" - - return (_re_compile(regex), cleanup) - - -def _re_compile(regex): - """Compile a string to regex, I and UNICODE.""" - - return re.compile(regex, re.I | re.UNICODE) diff --git a/python/sqlalchemy/dialects/mysql/cymysql.py b/python/sqlalchemy/dialects/mysql/cymysql.py deleted file mode 100644 index 6d8466ab..00000000 --- a/python/sqlalchemy/dialects/mysql/cymysql.py +++ /dev/null @@ -1,87 +0,0 @@ -# mysql/cymysql.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" - -.. dialect:: mysql+cymysql - :name: CyMySQL - :dbapi: cymysql - :connectstring: mysql+cymysql://:@/\ -[?] - :url: https://github.com/nakagami/CyMySQL - -""" -import re - -from .mysqldb import MySQLDialect_mysqldb -from .base import (BIT, MySQLDialect) -from ... import util - - -class _cymysqlBIT(BIT): - def result_processor(self, dialect, coltype): - """Convert a MySQL's 64 bit, variable length binary string to a long. - """ - - def process(value): - if value is not None: - v = 0 - for i in util.iterbytes(value): - v = v << 8 | i - return v - return value - return process - - -class MySQLDialect_cymysql(MySQLDialect_mysqldb): - driver = 'cymysql' - - description_encoding = None - supports_sane_rowcount = True - supports_sane_multi_rowcount = False - supports_unicode_statements = True - - colspecs = util.update_copy( - MySQLDialect.colspecs, - { - BIT: _cymysqlBIT, - } - ) - - @classmethod - def dbapi(cls): - return __import__('cymysql') - - def _get_server_version_info(self, connection): - dbapi_con = connection.connection - version = [] - r = re.compile('[.\-]') - for n in r.split(dbapi_con.server_version): - try: - version.append(int(n)) - except ValueError: - version.append(n) - return tuple(version) - - def _detect_charset(self, connection): - return connection.connection.charset - - def _extract_error_code(self, exception): - return exception.errno - - def is_disconnect(self, e, connection, cursor): - if isinstance(e, self.dbapi.OperationalError): - return self._extract_error_code(e) in \ - (2006, 2013, 2014, 2045, 2055) - elif isinstance(e, self.dbapi.InterfaceError): - # if underlying connection is closed, - # this is the error you get - return True - else: - return False - -dialect = MySQLDialect_cymysql diff --git a/python/sqlalchemy/dialects/mysql/gaerdbms.py b/python/sqlalchemy/dialects/mysql/gaerdbms.py deleted file mode 100644 index 58b70737..00000000 --- a/python/sqlalchemy/dialects/mysql/gaerdbms.py +++ /dev/null @@ -1,102 +0,0 @@ -# mysql/gaerdbms.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php -""" -.. dialect:: mysql+gaerdbms - :name: Google Cloud SQL - :dbapi: rdbms - :connectstring: mysql+gaerdbms:///?instance= - :url: https://developers.google.com/appengine/docs/python/cloud-sql/\ -developers-guide - - This dialect is based primarily on the :mod:`.mysql.mysqldb` dialect with - minimal changes. - - .. versionadded:: 0.7.8 - - .. deprecated:: 1.0 This dialect is **no longer necessary** for - Google Cloud SQL; the MySQLdb dialect can be used directly. - Cloud SQL now recommends creating connections via the - mysql dialect using the URL format - - ``mysql+mysqldb://root@/?unix_socket=/cloudsql/:`` - - -Pooling -------- - -Google App Engine connections appear to be randomly recycled, -so the dialect does not pool connections. The :class:`.NullPool` -implementation is installed within the :class:`.Engine` by -default. - -""" - -import os - -from .mysqldb import MySQLDialect_mysqldb -from ...pool import NullPool -import re -from sqlalchemy.util import warn_deprecated - - -def _is_dev_environment(): - return os.environ.get('SERVER_SOFTWARE', '').startswith('Development/') - - -class MySQLDialect_gaerdbms(MySQLDialect_mysqldb): - - @classmethod - def dbapi(cls): - - warn_deprecated( - "Google Cloud SQL now recommends creating connections via the " - "MySQLdb dialect directly, using the URL format " - "mysql+mysqldb://root@/?unix_socket=/cloudsql/" - ":" - ) - - # from django: - # http://code.google.com/p/googleappengine/source/ - # browse/trunk/python/google/storage/speckle/ - # python/django/backend/base.py#118 - # see also [ticket:2649] - # see also http://stackoverflow.com/q/14224679/34549 - from google.appengine.api import apiproxy_stub_map - - if _is_dev_environment(): - from google.appengine.api import rdbms_mysqldb - return rdbms_mysqldb - elif apiproxy_stub_map.apiproxy.GetStub('rdbms'): - from google.storage.speckle.python.api import rdbms_apiproxy - return rdbms_apiproxy - else: - from google.storage.speckle.python.api import rdbms_googleapi - return rdbms_googleapi - - @classmethod - def get_pool_class(cls, url): - # Cloud SQL connections die at any moment - return NullPool - - def create_connect_args(self, url): - opts = url.translate_connect_args() - if not _is_dev_environment(): - # 'dsn' and 'instance' are because we are skipping - # the traditional google.api.rdbms wrapper - opts['dsn'] = '' - opts['instance'] = url.query['instance'] - return [], opts - - def _extract_error_code(self, exception): - match = re.compile(r"^(\d+)L?:|^\((\d+)L?,").match(str(exception)) - # The rdbms api will wrap then re-raise some types of errors - # making this regex return no matches. - code = match.group(1) or match.group(2) if match else None - if code: - return int(code) - -dialect = MySQLDialect_gaerdbms diff --git a/python/sqlalchemy/dialects/mysql/mysqlconnector.py b/python/sqlalchemy/dialects/mysql/mysqlconnector.py deleted file mode 100644 index 3a4eeec0..00000000 --- a/python/sqlalchemy/dialects/mysql/mysqlconnector.py +++ /dev/null @@ -1,176 +0,0 @@ -# mysql/mysqlconnector.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" -.. dialect:: mysql+mysqlconnector - :name: MySQL Connector/Python - :dbapi: myconnpy - :connectstring: mysql+mysqlconnector://:@\ -[:]/ - :url: http://dev.mysql.com/downloads/connector/python/ - - -Unicode -------- - -Please see :ref:`mysql_unicode` for current recommendations on unicode -handling. - -""" - -from .base import (MySQLDialect, MySQLExecutionContext, - MySQLCompiler, MySQLIdentifierPreparer, - BIT) - -from ... import util -import re - - -class MySQLExecutionContext_mysqlconnector(MySQLExecutionContext): - - def get_lastrowid(self): - return self.cursor.lastrowid - - -class MySQLCompiler_mysqlconnector(MySQLCompiler): - def visit_mod_binary(self, binary, operator, **kw): - if self.dialect._mysqlconnector_double_percents: - return self.process(binary.left, **kw) + " %% " + \ - self.process(binary.right, **kw) - else: - return self.process(binary.left, **kw) + " % " + \ - self.process(binary.right, **kw) - - def post_process_text(self, text): - if self.dialect._mysqlconnector_double_percents: - return text.replace('%', '%%') - else: - return text - - def escape_literal_column(self, text): - if self.dialect._mysqlconnector_double_percents: - return text.replace('%', '%%') - else: - return text - - -class MySQLIdentifierPreparer_mysqlconnector(MySQLIdentifierPreparer): - - def _escape_identifier(self, value): - value = value.replace(self.escape_quote, self.escape_to_quote) - if self.dialect._mysqlconnector_double_percents: - return value.replace("%", "%%") - else: - return value - - -class _myconnpyBIT(BIT): - def result_processor(self, dialect, coltype): - """MySQL-connector already converts mysql bits, so.""" - - return None - - -class MySQLDialect_mysqlconnector(MySQLDialect): - driver = 'mysqlconnector' - - supports_unicode_binds = True - - supports_sane_rowcount = True - supports_sane_multi_rowcount = True - - supports_native_decimal = True - - default_paramstyle = 'format' - execution_ctx_cls = MySQLExecutionContext_mysqlconnector - statement_compiler = MySQLCompiler_mysqlconnector - - preparer = MySQLIdentifierPreparer_mysqlconnector - - colspecs = util.update_copy( - MySQLDialect.colspecs, - { - BIT: _myconnpyBIT, - } - ) - - @util.memoized_property - def supports_unicode_statements(self): - return util.py3k or self._mysqlconnector_version_info > (2, 0) - - @classmethod - def dbapi(cls): - from mysql import connector - return connector - - def create_connect_args(self, url): - opts = url.translate_connect_args(username='user') - - opts.update(url.query) - - util.coerce_kw_type(opts, 'buffered', bool) - util.coerce_kw_type(opts, 'raise_on_warnings', bool) - - # unfortunately, MySQL/connector python refuses to release a - # cursor without reading fully, so non-buffered isn't an option - opts.setdefault('buffered', True) - - # FOUND_ROWS must be set in ClientFlag to enable - # supports_sane_rowcount. - if self.dbapi is not None: - try: - from mysql.connector.constants import ClientFlag - client_flags = opts.get( - 'client_flags', ClientFlag.get_default()) - client_flags |= ClientFlag.FOUND_ROWS - opts['client_flags'] = client_flags - except Exception: - pass - return [[], opts] - - @util.memoized_property - def _mysqlconnector_version_info(self): - if self.dbapi and hasattr(self.dbapi, '__version__'): - m = re.match(r'(\d+)\.(\d+)(?:\.(\d+))?', - self.dbapi.__version__) - if m: - return tuple( - int(x) - for x in m.group(1, 2, 3) - if x is not None) - - @util.memoized_property - def _mysqlconnector_double_percents(self): - return not util.py3k and self._mysqlconnector_version_info < (2, 0) - - def _get_server_version_info(self, connection): - dbapi_con = connection.connection - version = dbapi_con.get_server_version() - return tuple(version) - - def _detect_charset(self, connection): - return connection.connection.charset - - def _extract_error_code(self, exception): - return exception.errno - - def is_disconnect(self, e, connection, cursor): - errnos = (2006, 2013, 2014, 2045, 2055, 2048) - exceptions = (self.dbapi.OperationalError, self.dbapi.InterfaceError) - if isinstance(e, exceptions): - return e.errno in errnos or \ - "MySQL Connection not available." in str(e) - else: - return False - - def _compat_fetchall(self, rp, charset=None): - return rp.fetchall() - - def _compat_fetchone(self, rp, charset=None): - return rp.fetchone() - -dialect = MySQLDialect_mysqlconnector diff --git a/python/sqlalchemy/dialects/mysql/mysqldb.py b/python/sqlalchemy/dialects/mysql/mysqldb.py deleted file mode 100644 index 4a7ba7e1..00000000 --- a/python/sqlalchemy/dialects/mysql/mysqldb.py +++ /dev/null @@ -1,198 +0,0 @@ -# mysql/mysqldb.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" - -.. dialect:: mysql+mysqldb - :name: MySQL-Python - :dbapi: mysqldb - :connectstring: mysql+mysqldb://:@[:]/ - :url: http://sourceforge.net/projects/mysql-python - -.. _mysqldb_unicode: - -Unicode -------- - -Please see :ref:`mysql_unicode` for current recommendations on unicode -handling. - -Py3K Support ------------- - -Currently, MySQLdb only runs on Python 2 and development has been stopped. -`mysqlclient`_ is fork of MySQLdb and provides Python 3 support as well -as some bugfixes. - -.. _mysqlclient: https://github.com/PyMySQL/mysqlclient-python - -Using MySQLdb with Google Cloud SQL ------------------------------------ - -Google Cloud SQL now recommends use of the MySQLdb dialect. Connect -using a URL like the following:: - - mysql+mysqldb://root@/?unix_socket=/cloudsql/: - -""" - -from .base import (MySQLDialect, MySQLExecutionContext, - MySQLCompiler, MySQLIdentifierPreparer) -from .base import TEXT -from ... import sql -from ... import util -import re - - -class MySQLExecutionContext_mysqldb(MySQLExecutionContext): - - @property - def rowcount(self): - if hasattr(self, '_rowcount'): - return self._rowcount - else: - return self.cursor.rowcount - - -class MySQLCompiler_mysqldb(MySQLCompiler): - def visit_mod_binary(self, binary, operator, **kw): - return self.process(binary.left, **kw) + " %% " + \ - self.process(binary.right, **kw) - - def post_process_text(self, text): - return text.replace('%', '%%') - - -class MySQLIdentifierPreparer_mysqldb(MySQLIdentifierPreparer): - - def _escape_identifier(self, value): - value = value.replace(self.escape_quote, self.escape_to_quote) - return value.replace("%", "%%") - - -class MySQLDialect_mysqldb(MySQLDialect): - driver = 'mysqldb' - supports_unicode_statements = True - supports_sane_rowcount = True - supports_sane_multi_rowcount = True - - supports_native_decimal = True - - default_paramstyle = 'format' - execution_ctx_cls = MySQLExecutionContext_mysqldb - statement_compiler = MySQLCompiler_mysqldb - preparer = MySQLIdentifierPreparer_mysqldb - - @classmethod - def dbapi(cls): - return __import__('MySQLdb') - - def do_executemany(self, cursor, statement, parameters, context=None): - rowcount = cursor.executemany(statement, parameters) - if context is not None: - context._rowcount = rowcount - - def _check_unicode_returns(self, connection): - # work around issue fixed in - # https://github.com/farcepest/MySQLdb1/commit/cd44524fef63bd3fcb71947392326e9742d520e8 - # specific issue w/ the utf8_bin collation and unicode returns - - has_utf8_bin = self.server_version_info > (5, ) and \ - connection.scalar( - "show collation where %s = 'utf8' and %s = 'utf8_bin'" - % ( - self.identifier_preparer.quote("Charset"), - self.identifier_preparer.quote("Collation") - )) - if has_utf8_bin: - additional_tests = [ - sql.collate(sql.cast( - sql.literal_column( - "'test collated returns'"), - TEXT(charset='utf8')), "utf8_bin") - ] - else: - additional_tests = [] - return super(MySQLDialect_mysqldb, self)._check_unicode_returns( - connection, additional_tests) - - def create_connect_args(self, url): - opts = url.translate_connect_args(database='db', username='user', - password='passwd') - opts.update(url.query) - - util.coerce_kw_type(opts, 'compress', bool) - util.coerce_kw_type(opts, 'connect_timeout', int) - util.coerce_kw_type(opts, 'read_timeout', int) - util.coerce_kw_type(opts, 'client_flag', int) - util.coerce_kw_type(opts, 'local_infile', int) - # Note: using either of the below will cause all strings to be - # returned as Unicode, both in raw SQL operations and with column - # types like String and MSString. - util.coerce_kw_type(opts, 'use_unicode', bool) - util.coerce_kw_type(opts, 'charset', str) - - # Rich values 'cursorclass' and 'conv' are not supported via - # query string. - - ssl = {} - keys = ['ssl_ca', 'ssl_key', 'ssl_cert', 'ssl_capath', 'ssl_cipher'] - for key in keys: - if key in opts: - ssl[key[4:]] = opts[key] - util.coerce_kw_type(ssl, key[4:], str) - del opts[key] - if ssl: - opts['ssl'] = ssl - - # FOUND_ROWS must be set in CLIENT_FLAGS to enable - # supports_sane_rowcount. - client_flag = opts.get('client_flag', 0) - if self.dbapi is not None: - try: - CLIENT_FLAGS = __import__( - self.dbapi.__name__ + '.constants.CLIENT' - ).constants.CLIENT - client_flag |= CLIENT_FLAGS.FOUND_ROWS - except (AttributeError, ImportError): - self.supports_sane_rowcount = False - opts['client_flag'] = client_flag - return [[], opts] - - def _get_server_version_info(self, connection): - dbapi_con = connection.connection - version = [] - r = re.compile('[.\-]') - for n in r.split(dbapi_con.get_server_info()): - try: - version.append(int(n)) - except ValueError: - version.append(n) - return tuple(version) - - def _extract_error_code(self, exception): - return exception.args[0] - - def _detect_charset(self, connection): - """Sniff out the character set in use for connection results.""" - - try: - # note: the SQL here would be - # "SHOW VARIABLES LIKE 'character_set%%'" - cset_name = connection.connection.character_set_name - except AttributeError: - util.warn( - "No 'character_set_name' can be detected with " - "this MySQL-Python version; " - "please upgrade to a recent version of MySQL-Python. " - "Assuming latin1.") - return 'latin1' - else: - return cset_name() - - -dialect = MySQLDialect_mysqldb diff --git a/python/sqlalchemy/dialects/mysql/oursql.py b/python/sqlalchemy/dialects/mysql/oursql.py deleted file mode 100644 index ae8abc32..00000000 --- a/python/sqlalchemy/dialects/mysql/oursql.py +++ /dev/null @@ -1,254 +0,0 @@ -# mysql/oursql.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" - -.. dialect:: mysql+oursql - :name: OurSQL - :dbapi: oursql - :connectstring: mysql+oursql://:@[:]/ - :url: http://packages.python.org/oursql/ - -Unicode -------- - -Please see :ref:`mysql_unicode` for current recommendations on unicode -handling. - - -""" - -import re - -from .base import (BIT, MySQLDialect, MySQLExecutionContext) -from ... import types as sqltypes, util - - -class _oursqlBIT(BIT): - def result_processor(self, dialect, coltype): - """oursql already converts mysql bits, so.""" - - return None - - -class MySQLExecutionContext_oursql(MySQLExecutionContext): - - @property - def plain_query(self): - return self.execution_options.get('_oursql_plain_query', False) - - -class MySQLDialect_oursql(MySQLDialect): - driver = 'oursql' - - if util.py2k: - supports_unicode_binds = True - supports_unicode_statements = True - - supports_native_decimal = True - - supports_sane_rowcount = True - supports_sane_multi_rowcount = True - execution_ctx_cls = MySQLExecutionContext_oursql - - colspecs = util.update_copy( - MySQLDialect.colspecs, - { - sqltypes.Time: sqltypes.Time, - BIT: _oursqlBIT, - } - ) - - @classmethod - def dbapi(cls): - return __import__('oursql') - - def do_execute(self, cursor, statement, parameters, context=None): - """Provide an implementation of - *cursor.execute(statement, parameters)*.""" - - if context and context.plain_query: - cursor.execute(statement, plain_query=True) - else: - cursor.execute(statement, parameters) - - def do_begin(self, connection): - connection.cursor().execute('BEGIN', plain_query=True) - - def _xa_query(self, connection, query, xid): - if util.py2k: - arg = connection.connection._escape_string(xid) - else: - charset = self._connection_charset - arg = connection.connection._escape_string( - xid.encode(charset)).decode(charset) - arg = "'%s'" % arg - connection.execution_options( - _oursql_plain_query=True).execute(query % arg) - - # Because mysql is bad, these methods have to be - # reimplemented to use _PlainQuery. Basically, some queries - # refuse to return any data if they're run through - # the parameterized query API, or refuse to be parameterized - # in the first place. - def do_begin_twophase(self, connection, xid): - self._xa_query(connection, 'XA BEGIN %s', xid) - - def do_prepare_twophase(self, connection, xid): - self._xa_query(connection, 'XA END %s', xid) - self._xa_query(connection, 'XA PREPARE %s', xid) - - def do_rollback_twophase(self, connection, xid, is_prepared=True, - recover=False): - if not is_prepared: - self._xa_query(connection, 'XA END %s', xid) - self._xa_query(connection, 'XA ROLLBACK %s', xid) - - def do_commit_twophase(self, connection, xid, is_prepared=True, - recover=False): - if not is_prepared: - self.do_prepare_twophase(connection, xid) - self._xa_query(connection, 'XA COMMIT %s', xid) - - # Q: why didn't we need all these "plain_query" overrides earlier ? - # am i on a newer/older version of OurSQL ? - def has_table(self, connection, table_name, schema=None): - return MySQLDialect.has_table( - self, - connection.connect().execution_options(_oursql_plain_query=True), - table_name, - schema - ) - - def get_table_options(self, connection, table_name, schema=None, **kw): - return MySQLDialect.get_table_options( - self, - connection.connect().execution_options(_oursql_plain_query=True), - table_name, - schema=schema, - **kw - ) - - def get_columns(self, connection, table_name, schema=None, **kw): - return MySQLDialect.get_columns( - self, - connection.connect().execution_options(_oursql_plain_query=True), - table_name, - schema=schema, - **kw - ) - - def get_view_names(self, connection, schema=None, **kw): - return MySQLDialect.get_view_names( - self, - connection.connect().execution_options(_oursql_plain_query=True), - schema=schema, - **kw - ) - - def get_table_names(self, connection, schema=None, **kw): - return MySQLDialect.get_table_names( - self, - connection.connect().execution_options(_oursql_plain_query=True), - schema - ) - - def get_schema_names(self, connection, **kw): - return MySQLDialect.get_schema_names( - self, - connection.connect().execution_options(_oursql_plain_query=True), - **kw - ) - - def initialize(self, connection): - return MySQLDialect.initialize( - self, - connection.execution_options(_oursql_plain_query=True) - ) - - def _show_create_table(self, connection, table, charset=None, - full_name=None): - return MySQLDialect._show_create_table( - self, - connection.contextual_connect(close_with_result=True). - execution_options(_oursql_plain_query=True), - table, charset, full_name - ) - - def is_disconnect(self, e, connection, cursor): - if isinstance(e, self.dbapi.ProgrammingError): - return e.errno is None and 'cursor' not in e.args[1] \ - and e.args[1].endswith('closed') - else: - return e.errno in (2006, 2013, 2014, 2045, 2055) - - def create_connect_args(self, url): - opts = url.translate_connect_args(database='db', username='user', - password='passwd') - opts.update(url.query) - - util.coerce_kw_type(opts, 'port', int) - util.coerce_kw_type(opts, 'compress', bool) - util.coerce_kw_type(opts, 'autoping', bool) - util.coerce_kw_type(opts, 'raise_on_warnings', bool) - - util.coerce_kw_type(opts, 'default_charset', bool) - if opts.pop('default_charset', False): - opts['charset'] = None - else: - util.coerce_kw_type(opts, 'charset', str) - opts['use_unicode'] = opts.get('use_unicode', True) - util.coerce_kw_type(opts, 'use_unicode', bool) - - # FOUND_ROWS must be set in CLIENT_FLAGS to enable - # supports_sane_rowcount. - opts.setdefault('found_rows', True) - - ssl = {} - for key in ['ssl_ca', 'ssl_key', 'ssl_cert', - 'ssl_capath', 'ssl_cipher']: - if key in opts: - ssl[key[4:]] = opts[key] - util.coerce_kw_type(ssl, key[4:], str) - del opts[key] - if ssl: - opts['ssl'] = ssl - - return [[], opts] - - def _get_server_version_info(self, connection): - dbapi_con = connection.connection - version = [] - r = re.compile('[.\-]') - for n in r.split(dbapi_con.server_info): - try: - version.append(int(n)) - except ValueError: - version.append(n) - return tuple(version) - - def _extract_error_code(self, exception): - return exception.errno - - def _detect_charset(self, connection): - """Sniff out the character set in use for connection results.""" - - return connection.connection.charset - - def _compat_fetchall(self, rp, charset=None): - """oursql isn't super-broken like MySQLdb, yaaay.""" - return rp.fetchall() - - def _compat_fetchone(self, rp, charset=None): - """oursql isn't super-broken like MySQLdb, yaaay.""" - return rp.fetchone() - - def _compat_first(self, rp, charset=None): - return rp.first() - - -dialect = MySQLDialect_oursql diff --git a/python/sqlalchemy/dialects/mysql/pymysql.py b/python/sqlalchemy/dialects/mysql/pymysql.py deleted file mode 100644 index 87159b56..00000000 --- a/python/sqlalchemy/dialects/mysql/pymysql.py +++ /dev/null @@ -1,57 +0,0 @@ -# mysql/pymysql.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" - -.. dialect:: mysql+pymysql - :name: PyMySQL - :dbapi: pymysql - :connectstring: mysql+pymysql://:@/\ -[?] - :url: http://www.pymysql.org/ - -Unicode -------- - -Please see :ref:`mysql_unicode` for current recommendations on unicode -handling. - -MySQL-Python Compatibility --------------------------- - -The pymysql DBAPI is a pure Python port of the MySQL-python (MySQLdb) driver, -and targets 100% compatibility. Most behavioral notes for MySQL-python apply -to the pymysql driver as well. - -""" - -from .mysqldb import MySQLDialect_mysqldb -from ...util import py3k - - -class MySQLDialect_pymysql(MySQLDialect_mysqldb): - driver = 'pymysql' - - description_encoding = None - - # generally, these two values should be both True - # or both False. PyMySQL unicode tests pass all the way back - # to 0.4 either way. See [ticket:3337] - supports_unicode_statements = True - supports_unicode_binds = True - - @classmethod - def dbapi(cls): - return __import__('pymysql') - - if py3k: - def _extract_error_code(self, exception): - if isinstance(exception.args[0], Exception): - exception = exception.args[0] - return exception.args[0] - -dialect = MySQLDialect_pymysql diff --git a/python/sqlalchemy/dialects/mysql/pyodbc.py b/python/sqlalchemy/dialects/mysql/pyodbc.py deleted file mode 100644 index b544f058..00000000 --- a/python/sqlalchemy/dialects/mysql/pyodbc.py +++ /dev/null @@ -1,79 +0,0 @@ -# mysql/pyodbc.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" - - -.. dialect:: mysql+pyodbc - :name: PyODBC - :dbapi: pyodbc - :connectstring: mysql+pyodbc://:@ - :url: http://pypi.python.org/pypi/pyodbc/ - - .. note:: The PyODBC for MySQL dialect is not well supported, and - is subject to unresolved character encoding issues - which exist within the current ODBC drivers available. - (see http://code.google.com/p/pyodbc/issues/detail?id=25). - Other dialects for MySQL are recommended. - -""" - -from .base import MySQLDialect, MySQLExecutionContext -from ...connectors.pyodbc import PyODBCConnector -from ... import util -import re - - -class MySQLExecutionContext_pyodbc(MySQLExecutionContext): - - def get_lastrowid(self): - cursor = self.create_cursor() - cursor.execute("SELECT LAST_INSERT_ID()") - lastrowid = cursor.fetchone()[0] - cursor.close() - return lastrowid - - -class MySQLDialect_pyodbc(PyODBCConnector, MySQLDialect): - supports_unicode_statements = False - execution_ctx_cls = MySQLExecutionContext_pyodbc - - pyodbc_driver_name = "MySQL" - - def __init__(self, **kw): - # deal with http://code.google.com/p/pyodbc/issues/detail?id=25 - kw.setdefault('convert_unicode', True) - super(MySQLDialect_pyodbc, self).__init__(**kw) - - def _detect_charset(self, connection): - """Sniff out the character set in use for connection results.""" - - # Prefer 'character_set_results' for the current connection over the - # value in the driver. SET NAMES or individual variable SETs will - # change the charset without updating the driver's view of the world. - # - # If it's decided that issuing that sort of SQL leaves you SOL, then - # this can prefer the driver value. - rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'") - opts = dict([(row[0], row[1]) for row in self._compat_fetchall(rs)]) - for key in ('character_set_connection', 'character_set'): - if opts.get(key, None): - return opts[key] - - util.warn("Could not detect the connection character set. " - "Assuming latin1.") - return 'latin1' - - def _extract_error_code(self, exception): - m = re.compile(r"\((\d+)\)").search(str(exception.args)) - c = m.group(1) - if c: - return int(c) - else: - return None - -dialect = MySQLDialect_pyodbc diff --git a/python/sqlalchemy/dialects/mysql/zxjdbc.py b/python/sqlalchemy/dialects/mysql/zxjdbc.py deleted file mode 100644 index 37b0b630..00000000 --- a/python/sqlalchemy/dialects/mysql/zxjdbc.py +++ /dev/null @@ -1,117 +0,0 @@ -# mysql/zxjdbc.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" - -.. dialect:: mysql+zxjdbc - :name: zxjdbc for Jython - :dbapi: zxjdbc - :connectstring: mysql+zxjdbc://:@[:]/\ - - :driverurl: http://dev.mysql.com/downloads/connector/j/ - - .. note:: Jython is not supported by current versions of SQLAlchemy. The - zxjdbc dialect should be considered as experimental. - -Character Sets --------------- - -SQLAlchemy zxjdbc dialects pass unicode straight through to the -zxjdbc/JDBC layer. To allow multiple character sets to be sent from the -MySQL Connector/J JDBC driver, by default SQLAlchemy sets its -``characterEncoding`` connection property to ``UTF-8``. It may be -overridden via a ``create_engine`` URL parameter. - -""" -import re - -from ... import types as sqltypes, util -from ...connectors.zxJDBC import ZxJDBCConnector -from .base import BIT, MySQLDialect, MySQLExecutionContext - - -class _ZxJDBCBit(BIT): - def result_processor(self, dialect, coltype): - """Converts boolean or byte arrays from MySQL Connector/J to longs.""" - def process(value): - if value is None: - return value - if isinstance(value, bool): - return int(value) - v = 0 - for i in value: - v = v << 8 | (i & 0xff) - value = v - return value - return process - - -class MySQLExecutionContext_zxjdbc(MySQLExecutionContext): - def get_lastrowid(self): - cursor = self.create_cursor() - cursor.execute("SELECT LAST_INSERT_ID()") - lastrowid = cursor.fetchone()[0] - cursor.close() - return lastrowid - - -class MySQLDialect_zxjdbc(ZxJDBCConnector, MySQLDialect): - jdbc_db_name = 'mysql' - jdbc_driver_name = 'com.mysql.jdbc.Driver' - - execution_ctx_cls = MySQLExecutionContext_zxjdbc - - colspecs = util.update_copy( - MySQLDialect.colspecs, - { - sqltypes.Time: sqltypes.Time, - BIT: _ZxJDBCBit - } - ) - - def _detect_charset(self, connection): - """Sniff out the character set in use for connection results.""" - # Prefer 'character_set_results' for the current connection over the - # value in the driver. SET NAMES or individual variable SETs will - # change the charset without updating the driver's view of the world. - # - # If it's decided that issuing that sort of SQL leaves you SOL, then - # this can prefer the driver value. - rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'") - opts = dict((row[0], row[1]) for row in self._compat_fetchall(rs)) - for key in ('character_set_connection', 'character_set'): - if opts.get(key, None): - return opts[key] - - util.warn("Could not detect the connection character set. " - "Assuming latin1.") - return 'latin1' - - def _driver_kwargs(self): - """return kw arg dict to be sent to connect().""" - return dict(characterEncoding='UTF-8', yearIsDateType='false') - - def _extract_error_code(self, exception): - # e.g.: DBAPIError: (Error) Table 'test.u2' doesn't exist - # [SQLCode: 1146], [SQLState: 42S02] 'DESCRIBE `u2`' () - m = re.compile(r"\[SQLCode\: (\d+)\]").search(str(exception.args)) - c = m.group(1) - if c: - return int(c) - - def _get_server_version_info(self, connection): - dbapi_con = connection.connection - version = [] - r = re.compile('[.\-]') - for n in r.split(dbapi_con.dbversion): - try: - version.append(int(n)) - except ValueError: - version.append(n) - return tuple(version) - -dialect = MySQLDialect_zxjdbc diff --git a/python/sqlalchemy/dialects/oracle/__init__.py b/python/sqlalchemy/dialects/oracle/__init__.py deleted file mode 100644 index b055b0b1..00000000 --- a/python/sqlalchemy/dialects/oracle/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -# oracle/__init__.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -from sqlalchemy.dialects.oracle import base, cx_oracle, zxjdbc - -base.dialect = cx_oracle.dialect - -from sqlalchemy.dialects.oracle.base import \ - VARCHAR, NVARCHAR, CHAR, DATE, NUMBER,\ - BLOB, BFILE, CLOB, NCLOB, TIMESTAMP, RAW,\ - FLOAT, DOUBLE_PRECISION, LONG, dialect, INTERVAL,\ - VARCHAR2, NVARCHAR2, ROWID, dialect - - -__all__ = ( - 'VARCHAR', 'NVARCHAR', 'CHAR', 'DATE', 'NUMBER', - 'BLOB', 'BFILE', 'CLOB', 'NCLOB', 'TIMESTAMP', 'RAW', - 'FLOAT', 'DOUBLE_PRECISION', 'LONG', 'dialect', 'INTERVAL', - 'VARCHAR2', 'NVARCHAR2', 'ROWID' -) diff --git a/python/sqlalchemy/dialects/oracle/base.py b/python/sqlalchemy/dialects/oracle/base.py deleted file mode 100644 index 2449b5a8..00000000 --- a/python/sqlalchemy/dialects/oracle/base.py +++ /dev/null @@ -1,1546 +0,0 @@ -# oracle/base.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" -.. dialect:: oracle - :name: Oracle - - Oracle version 8 through current (11g at the time of this writing) are - supported. - -Connect Arguments ------------------ - -The dialect supports several :func:`~sqlalchemy.create_engine()` arguments -which affect the behavior of the dialect regardless of driver in use. - -* ``use_ansi`` - Use ANSI JOIN constructs (see the section on Oracle 8). - Defaults to ``True``. If ``False``, Oracle-8 compatible constructs are used - for joins. - -* ``optimize_limits`` - defaults to ``False``. see the section on - LIMIT/OFFSET. - -* ``use_binds_for_limits`` - defaults to ``True``. see the section on - LIMIT/OFFSET. - -Auto Increment Behavior ------------------------ - -SQLAlchemy Table objects which include integer primary keys are usually -assumed to have "autoincrementing" behavior, meaning they can generate their -own primary key values upon INSERT. Since Oracle has no "autoincrement" -feature, SQLAlchemy relies upon sequences to produce these values. With the -Oracle dialect, *a sequence must always be explicitly specified to enable -autoincrement*. This is divergent with the majority of documentation -examples which assume the usage of an autoincrement-capable database. To -specify sequences, use the sqlalchemy.schema.Sequence object which is passed -to a Column construct:: - - t = Table('mytable', metadata, - Column('id', Integer, Sequence('id_seq'), primary_key=True), - Column(...), ... - ) - -This step is also required when using table reflection, i.e. autoload=True:: - - t = Table('mytable', metadata, - Column('id', Integer, Sequence('id_seq'), primary_key=True), - autoload=True - ) - -Identifier Casing ------------------ - -In Oracle, the data dictionary represents all case insensitive identifier -names using UPPERCASE text. SQLAlchemy on the other hand considers an -all-lower case identifier name to be case insensitive. The Oracle dialect -converts all case insensitive identifiers to and from those two formats during -schema level communication, such as reflection of tables and indexes. Using -an UPPERCASE name on the SQLAlchemy side indicates a case sensitive -identifier, and SQLAlchemy will quote the name - this will cause mismatches -against data dictionary data received from Oracle, so unless identifier names -have been truly created as case sensitive (i.e. using quoted names), all -lowercase names should be used on the SQLAlchemy side. - - -LIMIT/OFFSET Support --------------------- - -Oracle has no support for the LIMIT or OFFSET keywords. SQLAlchemy uses -a wrapped subquery approach in conjunction with ROWNUM. The exact methodology -is taken from -http://www.oracle.com/technology/oramag/oracle/06-sep/o56asktom.html . - -There are two options which affect its behavior: - -* the "FIRST ROWS()" optimization keyword is not used by default. To enable - the usage of this optimization directive, specify ``optimize_limits=True`` - to :func:`.create_engine`. -* the values passed for the limit/offset are sent as bound parameters. Some - users have observed that Oracle produces a poor query plan when the values - are sent as binds and not rendered literally. To render the limit/offset - values literally within the SQL statement, specify - ``use_binds_for_limits=False`` to :func:`.create_engine`. - -Some users have reported better performance when the entirely different -approach of a window query is used, i.e. ROW_NUMBER() OVER (ORDER BY), to -provide LIMIT/OFFSET (note that the majority of users don't observe this). -To suit this case the method used for LIMIT/OFFSET can be replaced entirely. -See the recipe at -http://www.sqlalchemy.org/trac/wiki/UsageRecipes/WindowFunctionsByDefault -which installs a select compiler that overrides the generation of limit/offset -with a window function. - -.. _oracle_returning: - -RETURNING Support ------------------ - -The Oracle database supports a limited form of RETURNING, in order to retrieve -result sets of matched rows from INSERT, UPDATE and DELETE statements. -Oracle's RETURNING..INTO syntax only supports one row being returned, as it -relies upon OUT parameters in order to function. In addition, supported -DBAPIs have further limitations (see :ref:`cx_oracle_returning`). - -SQLAlchemy's "implicit returning" feature, which employs RETURNING within an -INSERT and sometimes an UPDATE statement in order to fetch newly generated -primary key values and other SQL defaults and expressions, is normally enabled -on the Oracle backend. By default, "implicit returning" typically only -fetches the value of a single ``nextval(some_seq)`` expression embedded into -an INSERT in order to increment a sequence within an INSERT statement and get -the value back at the same time. To disable this feature across the board, -specify ``implicit_returning=False`` to :func:`.create_engine`:: - - engine = create_engine("oracle://scott:tiger@dsn", - implicit_returning=False) - -Implicit returning can also be disabled on a table-by-table basis as a table -option:: - - # Core Table - my_table = Table("my_table", metadata, ..., implicit_returning=False) - - - # declarative - class MyClass(Base): - __tablename__ = 'my_table' - __table_args__ = {"implicit_returning": False} - -.. seealso:: - - :ref:`cx_oracle_returning` - additional cx_oracle-specific restrictions on - implicit returning. - -ON UPDATE CASCADE ------------------ - -Oracle doesn't have native ON UPDATE CASCADE functionality. A trigger based -solution is available at -http://asktom.oracle.com/tkyte/update_cascade/index.html . - -When using the SQLAlchemy ORM, the ORM has limited ability to manually issue -cascading updates - specify ForeignKey objects using the -"deferrable=True, initially='deferred'" keyword arguments, -and specify "passive_updates=False" on each relationship(). - -Oracle 8 Compatibility ----------------------- - -When Oracle 8 is detected, the dialect internally configures itself to the -following behaviors: - -* the use_ansi flag is set to False. This has the effect of converting all - JOIN phrases into the WHERE clause, and in the case of LEFT OUTER JOIN - makes use of Oracle's (+) operator. - -* the NVARCHAR2 and NCLOB datatypes are no longer generated as DDL when - the :class:`~sqlalchemy.types.Unicode` is used - VARCHAR2 and CLOB are - issued instead. This because these types don't seem to work correctly on - Oracle 8 even though they are available. The - :class:`~sqlalchemy.types.NVARCHAR` and - :class:`~sqlalchemy.dialects.oracle.NCLOB` types will always generate - NVARCHAR2 and NCLOB. - -* the "native unicode" mode is disabled when using cx_oracle, i.e. SQLAlchemy - encodes all Python unicode objects to "string" before passing in as bind - parameters. - -Synonym/DBLINK Reflection -------------------------- - -When using reflection with Table objects, the dialect can optionally search -for tables indicated by synonyms, either in local or remote schemas or -accessed over DBLINK, by passing the flag ``oracle_resolve_synonyms=True`` as -a keyword argument to the :class:`.Table` construct:: - - some_table = Table('some_table', autoload=True, - autoload_with=some_engine, - oracle_resolve_synonyms=True) - -When this flag is set, the given name (such as ``some_table`` above) will -be searched not just in the ``ALL_TABLES`` view, but also within the -``ALL_SYNONYMS`` view to see if this name is actually a synonym to another -name. If the synonym is located and refers to a DBLINK, the oracle dialect -knows how to locate the table's information using DBLINK syntax(e.g. -``@dblink``). - -``oracle_resolve_synonyms`` is accepted wherever reflection arguments are -accepted, including methods such as :meth:`.MetaData.reflect` and -:meth:`.Inspector.get_columns`. - -If synonyms are not in use, this flag should be left disabled. - -DateTime Compatibility ----------------------- - -Oracle has no datatype known as ``DATETIME``, it instead has only ``DATE``, -which can actually store a date and time value. For this reason, the Oracle -dialect provides a type :class:`.oracle.DATE` which is a subclass of -:class:`.DateTime`. This type has no special behavior, and is only -present as a "marker" for this type; additionally, when a database column -is reflected and the type is reported as ``DATE``, the time-supporting -:class:`.oracle.DATE` type is used. - -.. versionchanged:: 0.9.4 Added :class:`.oracle.DATE` to subclass - :class:`.DateTime`. This is a change as previous versions - would reflect a ``DATE`` column as :class:`.types.DATE`, which subclasses - :class:`.Date`. The only significance here is for schemes that are - examining the type of column for use in special Python translations or - for migrating schemas to other database backends. - -.. _oracle_table_options: - -Oracle Table Options -------------------------- - -The CREATE TABLE phrase supports the following options with Oracle -in conjunction with the :class:`.Table` construct: - - -* ``ON COMMIT``:: - - Table( - "some_table", metadata, ..., - prefixes=['GLOBAL TEMPORARY'], oracle_on_commit='PRESERVE ROWS') - -.. versionadded:: 1.0.0 - -* ``COMPRESS``:: - - Table('mytable', metadata, Column('data', String(32)), - oracle_compress=True) - - Table('mytable', metadata, Column('data', String(32)), - oracle_compress=6) - - The ``oracle_compress`` parameter accepts either an integer compression - level, or ``True`` to use the default compression level. - -.. versionadded:: 1.0.0 - -.. _oracle_index_options: - -Oracle Specific Index Options ------------------------------ - -Bitmap Indexes -~~~~~~~~~~~~~~ - -You can specify the ``oracle_bitmap`` parameter to create a bitmap index -instead of a B-tree index:: - - Index('my_index', my_table.c.data, oracle_bitmap=True) - -Bitmap indexes cannot be unique and cannot be compressed. SQLAlchemy will not -check for such limitations, only the database will. - -.. versionadded:: 1.0.0 - -Index compression -~~~~~~~~~~~~~~~~~ - -Oracle has a more efficient storage mode for indexes containing lots of -repeated values. Use the ``oracle_compress`` parameter to turn on key c -ompression:: - - Index('my_index', my_table.c.data, oracle_compress=True) - - Index('my_index', my_table.c.data1, my_table.c.data2, unique=True, - oracle_compress=1) - -The ``oracle_compress`` parameter accepts either an integer specifying the -number of prefix columns to compress, or ``True`` to use the default (all -columns for non-unique indexes, all but the last column for unique indexes). - -.. versionadded:: 1.0.0 - -""" - -import re - -from sqlalchemy import util, sql -from sqlalchemy.engine import default, reflection -from sqlalchemy.sql import compiler, visitors, expression -from sqlalchemy.sql import operators as sql_operators -from sqlalchemy.sql.elements import quoted_name -from sqlalchemy import types as sqltypes, schema as sa_schema -from sqlalchemy.types import VARCHAR, NVARCHAR, CHAR, \ - BLOB, CLOB, TIMESTAMP, FLOAT - -RESERVED_WORDS = \ - set('SHARE RAW DROP BETWEEN FROM DESC OPTION PRIOR LONG THEN ' - 'DEFAULT ALTER IS INTO MINUS INTEGER NUMBER GRANT IDENTIFIED ' - 'ALL TO ORDER ON FLOAT DATE HAVING CLUSTER NOWAIT RESOURCE ' - 'ANY TABLE INDEX FOR UPDATE WHERE CHECK SMALLINT WITH DELETE ' - 'BY ASC REVOKE LIKE SIZE RENAME NOCOMPRESS NULL GROUP VALUES ' - 'AS IN VIEW EXCLUSIVE COMPRESS SYNONYM SELECT INSERT EXISTS ' - 'NOT TRIGGER ELSE CREATE INTERSECT PCTFREE DISTINCT USER ' - 'CONNECT SET MODE OF UNIQUE VARCHAR2 VARCHAR LOCK OR CHAR ' - 'DECIMAL UNION PUBLIC AND START UID COMMENT CURRENT LEVEL'.split()) - -NO_ARG_FNS = set('UID CURRENT_DATE SYSDATE USER ' - 'CURRENT_TIME CURRENT_TIMESTAMP'.split()) - - -class RAW(sqltypes._Binary): - __visit_name__ = 'RAW' -OracleRaw = RAW - - -class NCLOB(sqltypes.Text): - __visit_name__ = 'NCLOB' - - -class VARCHAR2(VARCHAR): - __visit_name__ = 'VARCHAR2' - -NVARCHAR2 = NVARCHAR - - -class NUMBER(sqltypes.Numeric, sqltypes.Integer): - __visit_name__ = 'NUMBER' - - def __init__(self, precision=None, scale=None, asdecimal=None): - if asdecimal is None: - asdecimal = bool(scale and scale > 0) - - super(NUMBER, self).__init__( - precision=precision, scale=scale, asdecimal=asdecimal) - - def adapt(self, impltype): - ret = super(NUMBER, self).adapt(impltype) - # leave a hint for the DBAPI handler - ret._is_oracle_number = True - return ret - - @property - def _type_affinity(self): - if bool(self.scale and self.scale > 0): - return sqltypes.Numeric - else: - return sqltypes.Integer - - -class DOUBLE_PRECISION(sqltypes.Numeric): - __visit_name__ = 'DOUBLE_PRECISION' - - def __init__(self, precision=None, scale=None, asdecimal=None): - if asdecimal is None: - asdecimal = False - - super(DOUBLE_PRECISION, self).__init__( - precision=precision, scale=scale, asdecimal=asdecimal) - - -class BFILE(sqltypes.LargeBinary): - __visit_name__ = 'BFILE' - - -class LONG(sqltypes.Text): - __visit_name__ = 'LONG' - - -class DATE(sqltypes.DateTime): - """Provide the oracle DATE type. - - This type has no special Python behavior, except that it subclasses - :class:`.types.DateTime`; this is to suit the fact that the Oracle - ``DATE`` type supports a time value. - - .. versionadded:: 0.9.4 - - """ - __visit_name__ = 'DATE' - - def _compare_type_affinity(self, other): - return other._type_affinity in (sqltypes.DateTime, sqltypes.Date) - - -class INTERVAL(sqltypes.TypeEngine): - __visit_name__ = 'INTERVAL' - - def __init__(self, - day_precision=None, - second_precision=None): - """Construct an INTERVAL. - - Note that only DAY TO SECOND intervals are currently supported. - This is due to a lack of support for YEAR TO MONTH intervals - within available DBAPIs (cx_oracle and zxjdbc). - - :param day_precision: the day precision value. this is the number of - digits to store for the day field. Defaults to "2" - :param second_precision: the second precision value. this is the - number of digits to store for the fractional seconds field. - Defaults to "6". - - """ - self.day_precision = day_precision - self.second_precision = second_precision - - @classmethod - def _adapt_from_generic_interval(cls, interval): - return INTERVAL(day_precision=interval.day_precision, - second_precision=interval.second_precision) - - @property - def _type_affinity(self): - return sqltypes.Interval - - -class ROWID(sqltypes.TypeEngine): - """Oracle ROWID type. - - When used in a cast() or similar, generates ROWID. - - """ - __visit_name__ = 'ROWID' - - -class _OracleBoolean(sqltypes.Boolean): - def get_dbapi_type(self, dbapi): - return dbapi.NUMBER - -colspecs = { - sqltypes.Boolean: _OracleBoolean, - sqltypes.Interval: INTERVAL, - sqltypes.DateTime: DATE -} - -ischema_names = { - 'VARCHAR2': VARCHAR, - 'NVARCHAR2': NVARCHAR, - 'CHAR': CHAR, - 'DATE': DATE, - 'NUMBER': NUMBER, - 'BLOB': BLOB, - 'BFILE': BFILE, - 'CLOB': CLOB, - 'NCLOB': NCLOB, - 'TIMESTAMP': TIMESTAMP, - 'TIMESTAMP WITH TIME ZONE': TIMESTAMP, - 'INTERVAL DAY TO SECOND': INTERVAL, - 'RAW': RAW, - 'FLOAT': FLOAT, - 'DOUBLE PRECISION': DOUBLE_PRECISION, - 'LONG': LONG, -} - - -class OracleTypeCompiler(compiler.GenericTypeCompiler): - # Note: - # Oracle DATE == DATETIME - # Oracle does not allow milliseconds in DATE - # Oracle does not support TIME columns - - def visit_datetime(self, type_, **kw): - return self.visit_DATE(type_, **kw) - - def visit_float(self, type_, **kw): - return self.visit_FLOAT(type_, **kw) - - def visit_unicode(self, type_, **kw): - if self.dialect._supports_nchar: - return self.visit_NVARCHAR2(type_, **kw) - else: - return self.visit_VARCHAR2(type_, **kw) - - def visit_INTERVAL(self, type_, **kw): - return "INTERVAL DAY%s TO SECOND%s" % ( - type_.day_precision is not None and - "(%d)" % type_.day_precision or - "", - type_.second_precision is not None and - "(%d)" % type_.second_precision or - "", - ) - - def visit_LONG(self, type_, **kw): - return "LONG" - - def visit_TIMESTAMP(self, type_, **kw): - if type_.timezone: - return "TIMESTAMP WITH TIME ZONE" - else: - return "TIMESTAMP" - - def visit_DOUBLE_PRECISION(self, type_, **kw): - return self._generate_numeric(type_, "DOUBLE PRECISION", **kw) - - def visit_NUMBER(self, type_, **kw): - return self._generate_numeric(type_, "NUMBER", **kw) - - def _generate_numeric(self, type_, name, precision=None, scale=None, **kw): - if precision is None: - precision = type_.precision - - if scale is None: - scale = getattr(type_, 'scale', None) - - if precision is None: - return name - elif scale is None: - n = "%(name)s(%(precision)s)" - return n % {'name': name, 'precision': precision} - else: - n = "%(name)s(%(precision)s, %(scale)s)" - return n % {'name': name, 'precision': precision, 'scale': scale} - - def visit_string(self, type_, **kw): - return self.visit_VARCHAR2(type_, **kw) - - def visit_VARCHAR2(self, type_, **kw): - return self._visit_varchar(type_, '', '2') - - def visit_NVARCHAR2(self, type_, **kw): - return self._visit_varchar(type_, 'N', '2') - visit_NVARCHAR = visit_NVARCHAR2 - - def visit_VARCHAR(self, type_, **kw): - return self._visit_varchar(type_, '', '') - - def _visit_varchar(self, type_, n, num): - if not type_.length: - return "%(n)sVARCHAR%(two)s" % {'two': num, 'n': n} - elif not n and self.dialect._supports_char_length: - varchar = "VARCHAR%(two)s(%(length)s CHAR)" - return varchar % {'length': type_.length, 'two': num} - else: - varchar = "%(n)sVARCHAR%(two)s(%(length)s)" - return varchar % {'length': type_.length, 'two': num, 'n': n} - - def visit_text(self, type_, **kw): - return self.visit_CLOB(type_, **kw) - - def visit_unicode_text(self, type_, **kw): - if self.dialect._supports_nchar: - return self.visit_NCLOB(type_, **kw) - else: - return self.visit_CLOB(type_, **kw) - - def visit_large_binary(self, type_, **kw): - return self.visit_BLOB(type_, **kw) - - def visit_big_integer(self, type_, **kw): - return self.visit_NUMBER(type_, precision=19, **kw) - - def visit_boolean(self, type_, **kw): - return self.visit_SMALLINT(type_, **kw) - - def visit_RAW(self, type_, **kw): - if type_.length: - return "RAW(%(length)s)" % {'length': type_.length} - else: - return "RAW" - - def visit_ROWID(self, type_, **kw): - return "ROWID" - - -class OracleCompiler(compiler.SQLCompiler): - """Oracle compiler modifies the lexical structure of Select - statements to work under non-ANSI configured Oracle databases, if - the use_ansi flag is False. - """ - - compound_keywords = util.update_copy( - compiler.SQLCompiler.compound_keywords, - { - expression.CompoundSelect.EXCEPT: 'MINUS' - } - ) - - def __init__(self, *args, **kwargs): - self.__wheres = {} - self._quoted_bind_names = {} - super(OracleCompiler, self).__init__(*args, **kwargs) - - def visit_mod_binary(self, binary, operator, **kw): - return "mod(%s, %s)" % (self.process(binary.left, **kw), - self.process(binary.right, **kw)) - - def visit_now_func(self, fn, **kw): - return "CURRENT_TIMESTAMP" - - def visit_char_length_func(self, fn, **kw): - return "LENGTH" + self.function_argspec(fn, **kw) - - def visit_match_op_binary(self, binary, operator, **kw): - return "CONTAINS (%s, %s)" % (self.process(binary.left), - self.process(binary.right)) - - def visit_true(self, expr, **kw): - return '1' - - def visit_false(self, expr, **kw): - return '0' - - def get_cte_preamble(self, recursive): - return "WITH" - - def get_select_hint_text(self, byfroms): - return " ".join( - "/*+ %s */" % text for table, text in byfroms.items() - ) - - def function_argspec(self, fn, **kw): - if len(fn.clauses) > 0 or fn.name.upper() not in NO_ARG_FNS: - return compiler.SQLCompiler.function_argspec(self, fn, **kw) - else: - return "" - - def default_from(self): - """Called when a ``SELECT`` statement has no froms, - and no ``FROM`` clause is to be appended. - - The Oracle compiler tacks a "FROM DUAL" to the statement. - """ - - return " FROM DUAL" - - def visit_join(self, join, **kwargs): - if self.dialect.use_ansi: - return compiler.SQLCompiler.visit_join(self, join, **kwargs) - else: - kwargs['asfrom'] = True - if isinstance(join.right, expression.FromGrouping): - right = join.right.element - else: - right = join.right - return self.process(join.left, **kwargs) + \ - ", " + self.process(right, **kwargs) - - def _get_nonansi_join_whereclause(self, froms): - clauses = [] - - def visit_join(join): - if join.isouter: - def visit_binary(binary): - if binary.operator == sql_operators.eq: - if join.right.is_derived_from(binary.left.table): - binary.left = _OuterJoinColumn(binary.left) - elif join.right.is_derived_from(binary.right.table): - binary.right = _OuterJoinColumn(binary.right) - clauses.append(visitors.cloned_traverse( - join.onclause, {}, {'binary': visit_binary})) - else: - clauses.append(join.onclause) - - for j in join.left, join.right: - if isinstance(j, expression.Join): - visit_join(j) - elif isinstance(j, expression.FromGrouping): - visit_join(j.element) - - for f in froms: - if isinstance(f, expression.Join): - visit_join(f) - - if not clauses: - return None - else: - return sql.and_(*clauses) - - def visit_outer_join_column(self, vc, **kw): - return self.process(vc.column, **kw) + "(+)" - - def visit_sequence(self, seq): - return (self.dialect.identifier_preparer.format_sequence(seq) + - ".nextval") - - def get_render_as_alias_suffix(self, alias_name_text): - """Oracle doesn't like ``FROM table AS alias``""" - - return " " + alias_name_text - - def returning_clause(self, stmt, returning_cols): - columns = [] - binds = [] - for i, column in enumerate( - expression._select_iterables(returning_cols)): - if column.type._has_column_expression: - col_expr = column.type.column_expression(column) - else: - col_expr = column - outparam = sql.outparam("ret_%d" % i, type_=column.type) - self.binds[outparam.key] = outparam - binds.append( - self.bindparam_string(self._truncate_bindparam(outparam))) - columns.append( - self.process(col_expr, within_columns_clause=False)) - - self._add_to_result_map( - outparam.key, outparam.key, - (column, getattr(column, 'name', None), - getattr(column, 'key', None)), - column.type - ) - - return 'RETURNING ' + ', '.join(columns) + " INTO " + ", ".join(binds) - - def _TODO_visit_compound_select(self, select): - """Need to determine how to get ``LIMIT``/``OFFSET`` into a - ``UNION`` for Oracle. - """ - pass - - def visit_select(self, select, **kwargs): - """Look for ``LIMIT`` and OFFSET in a select statement, and if - so tries to wrap it in a subquery with ``rownum`` criterion. - """ - - if not getattr(select, '_oracle_visit', None): - if not self.dialect.use_ansi: - froms = self._display_froms_for_select( - select, kwargs.get('asfrom', False)) - whereclause = self._get_nonansi_join_whereclause(froms) - if whereclause is not None: - select = select.where(whereclause) - select._oracle_visit = True - - limit_clause = select._limit_clause - offset_clause = select._offset_clause - if limit_clause is not None or offset_clause is not None: - # See http://www.oracle.com/technology/oramag/oracle/06-sep/\ - # o56asktom.html - # - # Generalized form of an Oracle pagination query: - # select ... from ( - # select /*+ FIRST_ROWS(N) */ ...., rownum as ora_rn from - # ( select distinct ... where ... order by ... - # ) where ROWNUM <= :limit+:offset - # ) where ora_rn > :offset - # Outer select and "ROWNUM as ora_rn" can be dropped if - # limit=0 - - kwargs['select_wraps_for'] = select - select = select._generate() - select._oracle_visit = True - - # Wrap the middle select and add the hint - limitselect = sql.select([c for c in select.c]) - if limit_clause is not None and \ - self.dialect.optimize_limits and \ - select._simple_int_limit: - limitselect = limitselect.prefix_with( - "/*+ FIRST_ROWS(%d) */" % - select._limit) - - limitselect._oracle_visit = True - limitselect._is_wrapper = True - - # If needed, add the limiting clause - if limit_clause is not None: - if not self.dialect.use_binds_for_limits: - # use simple int limits, will raise an exception - # if the limit isn't specified this way - max_row = select._limit - - if offset_clause is not None: - max_row += select._offset - max_row = sql.literal_column("%d" % max_row) - else: - max_row = limit_clause - if offset_clause is not None: - max_row = max_row + offset_clause - limitselect.append_whereclause( - sql.literal_column("ROWNUM") <= max_row) - - # If needed, add the ora_rn, and wrap again with offset. - if offset_clause is None: - limitselect._for_update_arg = select._for_update_arg - select = limitselect - else: - limitselect = limitselect.column( - sql.literal_column("ROWNUM").label("ora_rn")) - limitselect._oracle_visit = True - limitselect._is_wrapper = True - - offsetselect = sql.select( - [c for c in limitselect.c if c.key != 'ora_rn']) - offsetselect._oracle_visit = True - offsetselect._is_wrapper = True - - if not self.dialect.use_binds_for_limits: - offset_clause = sql.literal_column( - "%d" % select._offset) - offsetselect.append_whereclause( - sql.literal_column("ora_rn") > offset_clause) - - offsetselect._for_update_arg = select._for_update_arg - select = offsetselect - - return compiler.SQLCompiler.visit_select(self, select, **kwargs) - - def limit_clause(self, select, **kw): - return "" - - def for_update_clause(self, select, **kw): - if self.is_subquery(): - return "" - - tmp = ' FOR UPDATE' - - if select._for_update_arg.of: - tmp += ' OF ' + ', '.join( - self.process(elem, **kw) for elem in - select._for_update_arg.of - ) - - if select._for_update_arg.nowait: - tmp += " NOWAIT" - - return tmp - - -class OracleDDLCompiler(compiler.DDLCompiler): - - def define_constraint_cascades(self, constraint): - text = "" - if constraint.ondelete is not None: - text += " ON DELETE %s" % constraint.ondelete - - # oracle has no ON UPDATE CASCADE - - # its only available via triggers - # http://asktom.oracle.com/tkyte/update_cascade/index.html - if constraint.onupdate is not None: - util.warn( - "Oracle does not contain native UPDATE CASCADE " - "functionality - onupdates will not be rendered for foreign " - "keys. Consider using deferrable=True, initially='deferred' " - "or triggers.") - - return text - - def visit_create_index(self, create): - index = create.element - self._verify_index_table(index) - preparer = self.preparer - text = "CREATE " - if index.unique: - text += "UNIQUE " - if index.dialect_options['oracle']['bitmap']: - text += "BITMAP " - text += "INDEX %s ON %s (%s)" % ( - self._prepared_index_name(index, include_schema=True), - preparer.format_table(index.table, use_schema=True), - ', '.join( - self.sql_compiler.process( - expr, - include_table=False, literal_binds=True) - for expr in index.expressions) - ) - if index.dialect_options['oracle']['compress'] is not False: - if index.dialect_options['oracle']['compress'] is True: - text += " COMPRESS" - else: - text += " COMPRESS %d" % ( - index.dialect_options['oracle']['compress'] - ) - return text - - def post_create_table(self, table): - table_opts = [] - opts = table.dialect_options['oracle'] - - if opts['on_commit']: - on_commit_options = opts['on_commit'].replace("_", " ").upper() - table_opts.append('\n ON COMMIT %s' % on_commit_options) - - if opts['compress']: - if opts['compress'] is True: - table_opts.append("\n COMPRESS") - else: - table_opts.append("\n COMPRESS FOR %s" % ( - opts['compress'] - )) - - return ''.join(table_opts) - - -class OracleIdentifierPreparer(compiler.IdentifierPreparer): - - reserved_words = set([x.lower() for x in RESERVED_WORDS]) - illegal_initial_characters = set( - (str(dig) for dig in range(0, 10))).union(["_", "$"]) - - def _bindparam_requires_quotes(self, value): - """Return True if the given identifier requires quoting.""" - lc_value = value.lower() - return (lc_value in self.reserved_words - or value[0] in self.illegal_initial_characters - or not self.legal_characters.match(util.text_type(value)) - ) - - def format_savepoint(self, savepoint): - name = re.sub(r'^_+', '', savepoint.ident) - return super( - OracleIdentifierPreparer, self).format_savepoint(savepoint, name) - - -class OracleExecutionContext(default.DefaultExecutionContext): - def fire_sequence(self, seq, type_): - return self._execute_scalar( - "SELECT " + - self.dialect.identifier_preparer.format_sequence(seq) + - ".nextval FROM DUAL", type_) - - -class OracleDialect(default.DefaultDialect): - name = 'oracle' - supports_alter = True - supports_unicode_statements = False - supports_unicode_binds = False - max_identifier_length = 30 - supports_sane_rowcount = True - supports_sane_multi_rowcount = False - - supports_simple_order_by_label = False - - supports_sequences = True - sequences_optional = False - postfetch_lastrowid = False - - default_paramstyle = 'named' - colspecs = colspecs - ischema_names = ischema_names - requires_name_normalize = True - - supports_default_values = False - supports_empty_insert = False - - statement_compiler = OracleCompiler - ddl_compiler = OracleDDLCompiler - type_compiler = OracleTypeCompiler - preparer = OracleIdentifierPreparer - execution_ctx_cls = OracleExecutionContext - - reflection_options = ('oracle_resolve_synonyms', ) - - construct_arguments = [ - (sa_schema.Table, { - "resolve_synonyms": False, - "on_commit": None, - "compress": False - }), - (sa_schema.Index, { - "bitmap": False, - "compress": False - }) - ] - - def __init__(self, - use_ansi=True, - optimize_limits=False, - use_binds_for_limits=True, - **kwargs): - default.DefaultDialect.__init__(self, **kwargs) - self.use_ansi = use_ansi - self.optimize_limits = optimize_limits - self.use_binds_for_limits = use_binds_for_limits - - def initialize(self, connection): - super(OracleDialect, self).initialize(connection) - self.implicit_returning = self.__dict__.get( - 'implicit_returning', - self.server_version_info > (10, ) - ) - - if self._is_oracle_8: - self.colspecs = self.colspecs.copy() - self.colspecs.pop(sqltypes.Interval) - self.use_ansi = False - - @property - def _is_oracle_8(self): - return self.server_version_info and \ - self.server_version_info < (9, ) - - @property - def _supports_table_compression(self): - return self.server_version_info and \ - self.server_version_info >= (9, 2, ) - - @property - def _supports_table_compress_for(self): - return self.server_version_info and \ - self.server_version_info >= (11, ) - - @property - def _supports_char_length(self): - return not self._is_oracle_8 - - @property - def _supports_nchar(self): - return not self._is_oracle_8 - - def do_release_savepoint(self, connection, name): - # Oracle does not support RELEASE SAVEPOINT - pass - - def has_table(self, connection, table_name, schema=None): - if not schema: - schema = self.default_schema_name - cursor = connection.execute( - sql.text("SELECT table_name FROM all_tables " - "WHERE table_name = :name AND owner = :schema_name"), - name=self.denormalize_name(table_name), - schema_name=self.denormalize_name(schema)) - return cursor.first() is not None - - def has_sequence(self, connection, sequence_name, schema=None): - if not schema: - schema = self.default_schema_name - cursor = connection.execute( - sql.text("SELECT sequence_name FROM all_sequences " - "WHERE sequence_name = :name AND " - "sequence_owner = :schema_name"), - name=self.denormalize_name(sequence_name), - schema_name=self.denormalize_name(schema)) - return cursor.first() is not None - - def normalize_name(self, name): - if name is None: - return None - if util.py2k: - if isinstance(name, str): - name = name.decode(self.encoding) - if name.upper() == name and not \ - self.identifier_preparer._requires_quotes(name.lower()): - return name.lower() - elif name.lower() == name: - return quoted_name(name, quote=True) - else: - return name - - def denormalize_name(self, name): - if name is None: - return None - elif name.lower() == name and not \ - self.identifier_preparer._requires_quotes(name.lower()): - name = name.upper() - if util.py2k: - if not self.supports_unicode_binds: - name = name.encode(self.encoding) - else: - name = unicode(name) - return name - - def _get_default_schema_name(self, connection): - return self.normalize_name( - connection.execute('SELECT USER FROM DUAL').scalar()) - - def _resolve_synonym(self, connection, desired_owner=None, - desired_synonym=None, desired_table=None): - """search for a local synonym matching the given desired owner/name. - - if desired_owner is None, attempts to locate a distinct owner. - - returns the actual name, owner, dblink name, and synonym name if - found. - """ - - q = "SELECT owner, table_owner, table_name, db_link, "\ - "synonym_name FROM all_synonyms WHERE " - clauses = [] - params = {} - if desired_synonym: - clauses.append("synonym_name = :synonym_name") - params['synonym_name'] = desired_synonym - if desired_owner: - clauses.append("owner = :desired_owner") - params['desired_owner'] = desired_owner - if desired_table: - clauses.append("table_name = :tname") - params['tname'] = desired_table - - q += " AND ".join(clauses) - - result = connection.execute(sql.text(q), **params) - if desired_owner: - row = result.first() - if row: - return (row['table_name'], row['table_owner'], - row['db_link'], row['synonym_name']) - else: - return None, None, None, None - else: - rows = result.fetchall() - if len(rows) > 1: - raise AssertionError( - "There are multiple tables visible to the schema, you " - "must specify owner") - elif len(rows) == 1: - row = rows[0] - return (row['table_name'], row['table_owner'], - row['db_link'], row['synonym_name']) - else: - return None, None, None, None - - @reflection.cache - def _prepare_reflection_args(self, connection, table_name, schema=None, - resolve_synonyms=False, dblink='', **kw): - - if resolve_synonyms: - actual_name, owner, dblink, synonym = self._resolve_synonym( - connection, - desired_owner=self.denormalize_name(schema), - desired_synonym=self.denormalize_name(table_name) - ) - else: - actual_name, owner, dblink, synonym = None, None, None, None - if not actual_name: - actual_name = self.denormalize_name(table_name) - - if dblink: - # using user_db_links here since all_db_links appears - # to have more restricted permissions. - # http://docs.oracle.com/cd/B28359_01/server.111/b28310/ds_admin005.htm - # will need to hear from more users if we are doing - # the right thing here. See [ticket:2619] - owner = connection.scalar( - sql.text("SELECT username FROM user_db_links " - "WHERE db_link=:link"), link=dblink) - dblink = "@" + dblink - elif not owner: - owner = self.denormalize_name(schema or self.default_schema_name) - - return (actual_name, owner, dblink or '', synonym) - - @reflection.cache - def get_schema_names(self, connection, **kw): - s = "SELECT username FROM all_users ORDER BY username" - cursor = connection.execute(s,) - return [self.normalize_name(row[0]) for row in cursor] - - @reflection.cache - def get_table_names(self, connection, schema=None, **kw): - schema = self.denormalize_name(schema or self.default_schema_name) - - # note that table_names() isn't loading DBLINKed or synonym'ed tables - if schema is None: - schema = self.default_schema_name - s = sql.text( - "SELECT table_name FROM all_tables " - "WHERE nvl(tablespace_name, 'no tablespace') NOT IN " - "('SYSTEM', 'SYSAUX') " - "AND OWNER = :owner " - "AND IOT_NAME IS NULL " - "AND DURATION IS NULL") - cursor = connection.execute(s, owner=schema) - return [self.normalize_name(row[0]) for row in cursor] - - @reflection.cache - def get_temp_table_names(self, connection, **kw): - schema = self.denormalize_name(self.default_schema_name) - s = sql.text( - "SELECT table_name FROM all_tables " - "WHERE nvl(tablespace_name, 'no tablespace') NOT IN " - "('SYSTEM', 'SYSAUX') " - "AND OWNER = :owner " - "AND IOT_NAME IS NULL " - "AND DURATION IS NOT NULL") - cursor = connection.execute(s, owner=schema) - return [self.normalize_name(row[0]) for row in cursor] - - @reflection.cache - def get_view_names(self, connection, schema=None, **kw): - schema = self.denormalize_name(schema or self.default_schema_name) - s = sql.text("SELECT view_name FROM all_views WHERE owner = :owner") - cursor = connection.execute(s, owner=self.denormalize_name(schema)) - return [self.normalize_name(row[0]) for row in cursor] - - @reflection.cache - def get_table_options(self, connection, table_name, schema=None, **kw): - options = {} - - resolve_synonyms = kw.get('oracle_resolve_synonyms', False) - dblink = kw.get('dblink', '') - info_cache = kw.get('info_cache') - - (table_name, schema, dblink, synonym) = \ - self._prepare_reflection_args(connection, table_name, schema, - resolve_synonyms, dblink, - info_cache=info_cache) - - params = {"table_name": table_name} - - columns = ["table_name"] - if self._supports_table_compression: - columns.append("compression") - if self._supports_table_compress_for: - columns.append("compress_for") - - text = "SELECT %(columns)s "\ - "FROM ALL_TABLES%(dblink)s "\ - "WHERE table_name = :table_name" - - if schema is not None: - params['owner'] = schema - text += " AND owner = :owner " - text = text % {'dblink': dblink, 'columns': ", ".join(columns)} - - result = connection.execute(sql.text(text), **params) - - enabled = dict(DISABLED=False, ENABLED=True) - - row = result.first() - if row: - if "compression" in row and enabled.get(row.compression, False): - if "compress_for" in row: - options['oracle_compress'] = row.compress_for - else: - options['oracle_compress'] = True - - return options - - @reflection.cache - def get_columns(self, connection, table_name, schema=None, **kw): - """ - - kw arguments can be: - - oracle_resolve_synonyms - - dblink - - """ - - resolve_synonyms = kw.get('oracle_resolve_synonyms', False) - dblink = kw.get('dblink', '') - info_cache = kw.get('info_cache') - - (table_name, schema, dblink, synonym) = \ - self._prepare_reflection_args(connection, table_name, schema, - resolve_synonyms, dblink, - info_cache=info_cache) - columns = [] - if self._supports_char_length: - char_length_col = 'char_length' - else: - char_length_col = 'data_length' - - params = {"table_name": table_name} - text = "SELECT column_name, data_type, %(char_length_col)s, "\ - "data_precision, data_scale, "\ - "nullable, data_default FROM ALL_TAB_COLUMNS%(dblink)s "\ - "WHERE table_name = :table_name" - if schema is not None: - params['owner'] = schema - text += " AND owner = :owner " - text += " ORDER BY column_id" - text = text % {'dblink': dblink, 'char_length_col': char_length_col} - - c = connection.execute(sql.text(text), **params) - - for row in c: - (colname, orig_colname, coltype, length, precision, scale, nullable, default) = \ - (self.normalize_name(row[0]), row[0], row[1], row[ - 2], row[3], row[4], row[5] == 'Y', row[6]) - - if coltype == 'NUMBER': - coltype = NUMBER(precision, scale) - elif coltype in ('VARCHAR2', 'NVARCHAR2', 'CHAR'): - coltype = self.ischema_names.get(coltype)(length) - elif 'WITH TIME ZONE' in coltype: - coltype = TIMESTAMP(timezone=True) - else: - coltype = re.sub(r'\(\d+\)', '', coltype) - try: - coltype = self.ischema_names[coltype] - except KeyError: - util.warn("Did not recognize type '%s' of column '%s'" % - (coltype, colname)) - coltype = sqltypes.NULLTYPE - - cdict = { - 'name': colname, - 'type': coltype, - 'nullable': nullable, - 'default': default, - 'autoincrement': default is None - } - if orig_colname.lower() == orig_colname: - cdict['quote'] = True - - columns.append(cdict) - return columns - - @reflection.cache - def get_indexes(self, connection, table_name, schema=None, - resolve_synonyms=False, dblink='', **kw): - - info_cache = kw.get('info_cache') - (table_name, schema, dblink, synonym) = \ - self._prepare_reflection_args(connection, table_name, schema, - resolve_synonyms, dblink, - info_cache=info_cache) - indexes = [] - - params = {'table_name': table_name} - text = \ - "SELECT a.index_name, a.column_name, "\ - "\nb.index_type, b.uniqueness, b.compression, b.prefix_length "\ - "\nFROM ALL_IND_COLUMNS%(dblink)s a, "\ - "\nALL_INDEXES%(dblink)s b "\ - "\nWHERE "\ - "\na.index_name = b.index_name "\ - "\nAND a.table_owner = b.table_owner "\ - "\nAND a.table_name = b.table_name "\ - "\nAND a.table_name = :table_name " - - if schema is not None: - params['schema'] = schema - text += "AND a.table_owner = :schema " - - text += "ORDER BY a.index_name, a.column_position" - - text = text % {'dblink': dblink} - - q = sql.text(text) - rp = connection.execute(q, **params) - indexes = [] - last_index_name = None - pk_constraint = self.get_pk_constraint( - connection, table_name, schema, resolve_synonyms=resolve_synonyms, - dblink=dblink, info_cache=kw.get('info_cache')) - pkeys = pk_constraint['constrained_columns'] - uniqueness = dict(NONUNIQUE=False, UNIQUE=True) - enabled = dict(DISABLED=False, ENABLED=True) - - oracle_sys_col = re.compile(r'SYS_NC\d+\$', re.IGNORECASE) - - def upper_name_set(names): - return set([i.upper() for i in names]) - - pk_names = upper_name_set(pkeys) - - def remove_if_primary_key(index): - # don't include the primary key index - if index is not None and \ - upper_name_set(index['column_names']) == pk_names: - indexes.pop() - - index = None - for rset in rp: - if rset.index_name != last_index_name: - remove_if_primary_key(index) - index = dict(name=self.normalize_name(rset.index_name), - column_names=[], dialect_options={}) - indexes.append(index) - index['unique'] = uniqueness.get(rset.uniqueness, False) - - if rset.index_type in ('BITMAP', 'FUNCTION-BASED BITMAP'): - index['dialect_options']['oracle_bitmap'] = True - if enabled.get(rset.compression, False): - index['dialect_options']['oracle_compress'] = rset.prefix_length - - # filter out Oracle SYS_NC names. could also do an outer join - # to the all_tab_columns table and check for real col names there. - if not oracle_sys_col.match(rset.column_name): - index['column_names'].append( - self.normalize_name(rset.column_name)) - last_index_name = rset.index_name - remove_if_primary_key(index) - return indexes - - @reflection.cache - def _get_constraint_data(self, connection, table_name, schema=None, - dblink='', **kw): - - params = {'table_name': table_name} - - text = \ - "SELECT"\ - "\nac.constraint_name,"\ - "\nac.constraint_type,"\ - "\nloc.column_name AS local_column,"\ - "\nrem.table_name AS remote_table,"\ - "\nrem.column_name AS remote_column,"\ - "\nrem.owner AS remote_owner,"\ - "\nloc.position as loc_pos,"\ - "\nrem.position as rem_pos"\ - "\nFROM all_constraints%(dblink)s ac,"\ - "\nall_cons_columns%(dblink)s loc,"\ - "\nall_cons_columns%(dblink)s rem"\ - "\nWHERE ac.table_name = :table_name"\ - "\nAND ac.constraint_type IN ('R','P')" - - if schema is not None: - params['owner'] = schema - text += "\nAND ac.owner = :owner" - - text += \ - "\nAND ac.owner = loc.owner"\ - "\nAND ac.constraint_name = loc.constraint_name"\ - "\nAND ac.r_owner = rem.owner(+)"\ - "\nAND ac.r_constraint_name = rem.constraint_name(+)"\ - "\nAND (rem.position IS NULL or loc.position=rem.position)"\ - "\nORDER BY ac.constraint_name, loc.position" - - text = text % {'dblink': dblink} - rp = connection.execute(sql.text(text), **params) - constraint_data = rp.fetchall() - return constraint_data - - @reflection.cache - def get_pk_constraint(self, connection, table_name, schema=None, **kw): - resolve_synonyms = kw.get('oracle_resolve_synonyms', False) - dblink = kw.get('dblink', '') - info_cache = kw.get('info_cache') - - (table_name, schema, dblink, synonym) = \ - self._prepare_reflection_args(connection, table_name, schema, - resolve_synonyms, dblink, - info_cache=info_cache) - pkeys = [] - constraint_name = None - constraint_data = self._get_constraint_data( - connection, table_name, schema, dblink, - info_cache=kw.get('info_cache')) - - for row in constraint_data: - (cons_name, cons_type, local_column, remote_table, remote_column, remote_owner) = \ - row[0:2] + tuple([self.normalize_name(x) for x in row[2:6]]) - if cons_type == 'P': - if constraint_name is None: - constraint_name = self.normalize_name(cons_name) - pkeys.append(local_column) - return {'constrained_columns': pkeys, 'name': constraint_name} - - @reflection.cache - def get_foreign_keys(self, connection, table_name, schema=None, **kw): - """ - - kw arguments can be: - - oracle_resolve_synonyms - - dblink - - """ - - requested_schema = schema # to check later on - resolve_synonyms = kw.get('oracle_resolve_synonyms', False) - dblink = kw.get('dblink', '') - info_cache = kw.get('info_cache') - - (table_name, schema, dblink, synonym) = \ - self._prepare_reflection_args(connection, table_name, schema, - resolve_synonyms, dblink, - info_cache=info_cache) - - constraint_data = self._get_constraint_data( - connection, table_name, schema, dblink, - info_cache=kw.get('info_cache')) - - def fkey_rec(): - return { - 'name': None, - 'constrained_columns': [], - 'referred_schema': None, - 'referred_table': None, - 'referred_columns': [] - } - - fkeys = util.defaultdict(fkey_rec) - - for row in constraint_data: - (cons_name, cons_type, local_column, remote_table, remote_column, remote_owner) = \ - row[0:2] + tuple([self.normalize_name(x) for x in row[2:6]]) - - if cons_type == 'R': - if remote_table is None: - # ticket 363 - util.warn( - ("Got 'None' querying 'table_name' from " - "all_cons_columns%(dblink)s - does the user have " - "proper rights to the table?") % {'dblink': dblink}) - continue - - rec = fkeys[cons_name] - rec['name'] = cons_name - local_cols, remote_cols = rec[ - 'constrained_columns'], rec['referred_columns'] - - if not rec['referred_table']: - if resolve_synonyms: - ref_remote_name, ref_remote_owner, ref_dblink, ref_synonym = \ - self._resolve_synonym( - connection, - desired_owner=self.denormalize_name( - remote_owner), - desired_table=self.denormalize_name( - remote_table) - ) - if ref_synonym: - remote_table = self.normalize_name(ref_synonym) - remote_owner = self.normalize_name( - ref_remote_owner) - - rec['referred_table'] = remote_table - - if requested_schema is not None or \ - self.denormalize_name(remote_owner) != schema: - rec['referred_schema'] = remote_owner - - local_cols.append(local_column) - remote_cols.append(remote_column) - - return list(fkeys.values()) - - @reflection.cache - def get_view_definition(self, connection, view_name, schema=None, - resolve_synonyms=False, dblink='', **kw): - info_cache = kw.get('info_cache') - (view_name, schema, dblink, synonym) = \ - self._prepare_reflection_args(connection, view_name, schema, - resolve_synonyms, dblink, - info_cache=info_cache) - - params = {'view_name': view_name} - text = "SELECT text FROM all_views WHERE view_name=:view_name" - - if schema is not None: - text += " AND owner = :schema" - params['schema'] = schema - - rp = connection.execute(sql.text(text), **params).scalar() - if rp: - if util.py2k: - rp = rp.decode(self.encoding) - return rp - else: - return None - - -class _OuterJoinColumn(sql.ClauseElement): - __visit_name__ = 'outer_join_column' - - def __init__(self, column): - self.column = column diff --git a/python/sqlalchemy/dialects/oracle/cx_oracle.py b/python/sqlalchemy/dialects/oracle/cx_oracle.py deleted file mode 100644 index dede3b21..00000000 --- a/python/sqlalchemy/dialects/oracle/cx_oracle.py +++ /dev/null @@ -1,989 +0,0 @@ -# oracle/cx_oracle.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" - -.. dialect:: oracle+cx_oracle - :name: cx-Oracle - :dbapi: cx_oracle - :connectstring: oracle+cx_oracle://user:pass@host:port/dbname\ -[?key=value&key=value...] - :url: http://cx-oracle.sourceforge.net/ - -Additional Connect Arguments ----------------------------- - -When connecting with ``dbname`` present, the host, port, and dbname tokens are -converted to a TNS name using -the cx_oracle ``makedsn()`` function. Otherwise, the host token is taken -directly as a TNS name. - -Additional arguments which may be specified either as query string arguments -on the URL, or as keyword arguments to :func:`.create_engine()` are: - -* ``allow_twophase`` - enable two-phase transactions. Defaults to ``True``. - -* ``arraysize`` - set the cx_oracle.arraysize value on cursors, defaulted - to 50. This setting is significant with cx_Oracle as the contents of LOB - objects are only readable within a "live" row (e.g. within a batch of - 50 rows). - -* ``auto_convert_lobs`` - defaults to True; See :ref:`cx_oracle_lob`. - -* ``auto_setinputsizes`` - the cx_oracle.setinputsizes() call is issued for - all bind parameters. This is required for LOB datatypes but can be - disabled to reduce overhead. Defaults to ``True``. Specific types - can be excluded from this process using the ``exclude_setinputsizes`` - parameter. - -* ``coerce_to_unicode`` - see :ref:`cx_oracle_unicode` for detail. - -* ``coerce_to_decimal`` - see :ref:`cx_oracle_numeric` for detail. - -* ``exclude_setinputsizes`` - a tuple or list of string DBAPI type names to - be excluded from the "auto setinputsizes" feature. The type names here - must match DBAPI types that are found in the "cx_Oracle" module namespace, - such as cx_Oracle.UNICODE, cx_Oracle.NCLOB, etc. Defaults to - ``(STRING, UNICODE)``. - - .. versionadded:: 0.8 specific DBAPI types can be excluded from the - auto_setinputsizes feature via the exclude_setinputsizes attribute. - -* ``mode`` - This is given the string value of SYSDBA or SYSOPER, or - alternatively an integer value. This value is only available as a URL query - string argument. - -* ``threaded`` - enable multithreaded access to cx_oracle connections. - Defaults to ``True``. Note that this is the opposite default of the - cx_Oracle DBAPI itself. - -* ``service_name`` - An option to use connection string (DSN) with - ``SERVICE_NAME`` instead of ``SID``. It can't be passed when a ``database`` - part is given. - E.g. ``oracle+cx_oracle://scott:tiger@host:1521/?service_name=hr`` - is a valid url. This value is only available as a URL query string argument. - - .. versionadded:: 1.0.0 - -.. _cx_oracle_unicode: - -Unicode -------- - -The cx_Oracle DBAPI as of version 5 fully supports unicode, and has the -ability to return string results as Python unicode objects natively. - -When used in Python 3, cx_Oracle returns all strings as Python unicode objects -(that is, plain ``str`` in Python 3). In Python 2, it will return as Python -unicode those column values that are of type ``NVARCHAR`` or ``NCLOB``. For -column values that are of type ``VARCHAR`` or other non-unicode string types, -it will return values as Python strings (e.g. bytestrings). - -The cx_Oracle SQLAlchemy dialect presents two different options for the use -case of returning ``VARCHAR`` column values as Python unicode objects under -Python 2: - -* the cx_Oracle DBAPI has the ability to coerce all string results to Python - unicode objects unconditionally using output type handlers. This has - the advantage that the unicode conversion is global to all statements - at the cx_Oracle driver level, meaning it works with raw textual SQL - statements that have no typing information associated. However, this system - has been observed to incur signfiicant performance overhead, not only - because it takes effect for all string values unconditionally, but also - because cx_Oracle under Python 2 seems to use a pure-Python function call in - order to do the decode operation, which under cPython can orders of - magnitude slower than doing it using C functions alone. - -* SQLAlchemy has unicode-decoding services built in, and when using - SQLAlchemy's C extensions, these functions do not use any Python function - calls and are very fast. The disadvantage to this approach is that the - unicode conversion only takes effect for statements where the - :class:`.Unicode` type or :class:`.String` type with - ``convert_unicode=True`` is explicitly associated with the result column. - This is the case for any ORM or Core query or SQL expression as well as for - a :func:`.text` construct that specifies output column types, so in the vast - majority of cases this is not an issue. However, when sending a completely - raw string to :meth:`.Connection.execute`, this typing information isn't - present, unless the string is handled within a :func:`.text` construct that - adds typing information. - -As of version 0.9.2 of SQLAlchemy, the default approach is to use SQLAlchemy's -typing system. This keeps cx_Oracle's expensive Python 2 approach -disabled unless the user explicitly wants it. Under Python 3, SQLAlchemy -detects that cx_Oracle is returning unicode objects natively and cx_Oracle's -system is used. - -To re-enable cx_Oracle's output type handler under Python 2, the -``coerce_to_unicode=True`` flag (new in 0.9.4) can be passed to -:func:`.create_engine`:: - - engine = create_engine("oracle+cx_oracle://dsn", coerce_to_unicode=True) - -Alternatively, to run a pure string SQL statement and get ``VARCHAR`` results -as Python unicode under Python 2 without using cx_Oracle's native handlers, -the :func:`.text` feature can be used:: - - from sqlalchemy import text, Unicode - result = conn.execute( - text("select username from user").columns(username=Unicode)) - -.. versionchanged:: 0.9.2 cx_Oracle's outputtypehandlers are no longer used - for unicode results of non-unicode datatypes in Python 2, after they were - identified as a major performance bottleneck. SQLAlchemy's own unicode - facilities are used instead. - -.. versionadded:: 0.9.4 Added the ``coerce_to_unicode`` flag, to re-enable - cx_Oracle's outputtypehandler and revert to pre-0.9.2 behavior. - -.. _cx_oracle_returning: - -RETURNING Support ------------------ - -The cx_oracle DBAPI supports a limited subset of Oracle's already limited -RETURNING support. Typically, results can only be guaranteed for at most one -column being returned; this is the typical case when SQLAlchemy uses RETURNING -to get just the value of a primary-key-associated sequence value. -Additional column expressions will cause problems in a non-determinative way, -due to cx_oracle's lack of support for the OCI_DATA_AT_EXEC API which is -required for more complex RETURNING scenarios. - -For this reason, stability may be enhanced by disabling RETURNING support -completely; SQLAlchemy otherwise will use RETURNING to fetch newly -sequence-generated primary keys. As illustrated in :ref:`oracle_returning`:: - - engine = create_engine("oracle://scott:tiger@dsn", - implicit_returning=False) - -.. seealso:: - - http://docs.oracle.com/cd/B10501_01/appdev.920/a96584/oci05bnd.htm#420693 - - OCI documentation for RETURNING - - http://sourceforge.net/mailarchive/message.php?msg_id=31338136 - - cx_oracle developer commentary - -.. _cx_oracle_lob: - -LOB Objects ------------ - -cx_oracle returns oracle LOBs using the cx_oracle.LOB object. SQLAlchemy -converts these to strings so that the interface of the Binary type is -consistent with that of other backends, and so that the linkage to a live -cursor is not needed in scenarios like result.fetchmany() and -result.fetchall(). This means that by default, LOB objects are fully fetched -unconditionally by SQLAlchemy, and the linkage to a live cursor is broken. - -To disable this processing, pass ``auto_convert_lobs=False`` to -:func:`.create_engine()`. - -Two Phase Transaction Support ------------------------------ - -Two Phase transactions are implemented using XA transactions, and are known -to work in a rudimental fashion with recent versions of cx_Oracle -as of SQLAlchemy 0.8.0b2, 0.7.10. However, the mechanism is not yet -considered to be robust and should still be regarded as experimental. - -In particular, the cx_Oracle DBAPI as recently as 5.1.2 has a bug regarding -two phase which prevents -a particular DBAPI connection from being consistently usable in both -prepared transactions as well as traditional DBAPI usage patterns; therefore -once a particular connection is used via :meth:`.Connection.begin_prepared`, -all subsequent usages of the underlying DBAPI connection must be within -the context of prepared transactions. - -The default behavior of :class:`.Engine` is to maintain a pool of DBAPI -connections. Therefore, due to the above glitch, a DBAPI connection that has -been used in a two-phase operation, and is then returned to the pool, will -not be usable in a non-two-phase context. To avoid this situation, -the application can make one of several choices: - -* Disable connection pooling using :class:`.NullPool` - -* Ensure that the particular :class:`.Engine` in use is only used - for two-phase operations. A :class:`.Engine` bound to an ORM - :class:`.Session` which includes ``twophase=True`` will consistently - use the two-phase transaction style. - -* For ad-hoc two-phase operations without disabling pooling, the DBAPI - connection in use can be evicted from the connection pool using the - :meth:`.Connection.detach` method. - -.. versionchanged:: 0.8.0b2,0.7.10 - Support for cx_oracle prepared transactions has been implemented - and tested. - -.. _cx_oracle_numeric: - -Precision Numerics ------------------- - -The SQLAlchemy dialect goes through a lot of steps to ensure -that decimal numbers are sent and received with full accuracy. -An "outputtypehandler" callable is associated with each -cx_oracle connection object which detects numeric types and -receives them as string values, instead of receiving a Python -``float`` directly, which is then passed to the Python -``Decimal`` constructor. The :class:`.Numeric` and -:class:`.Float` types under the cx_oracle dialect are aware of -this behavior, and will coerce the ``Decimal`` to ``float`` if -the ``asdecimal`` flag is ``False`` (default on :class:`.Float`, -optional on :class:`.Numeric`). - -Because the handler coerces to ``Decimal`` in all cases first, -the feature can detract significantly from performance. -If precision numerics aren't required, the decimal handling -can be disabled by passing the flag ``coerce_to_decimal=False`` -to :func:`.create_engine`:: - - engine = create_engine("oracle+cx_oracle://dsn", coerce_to_decimal=False) - -.. versionadded:: 0.7.6 - Add the ``coerce_to_decimal`` flag. - -Another alternative to performance is to use the -`cdecimal `_ library; -see :class:`.Numeric` for additional notes. - -The handler attempts to use the "precision" and "scale" -attributes of the result set column to best determine if -subsequent incoming values should be received as ``Decimal`` as -opposed to int (in which case no processing is added). There are -several scenarios where OCI_ does not provide unambiguous data -as to the numeric type, including some situations where -individual rows may return a combination of floating point and -integer values. Certain values for "precision" and "scale" have -been observed to determine this scenario. When it occurs, the -outputtypehandler receives as string and then passes off to a -processing function which detects, for each returned value, if a -decimal point is present, and if so converts to ``Decimal``, -otherwise to int. The intention is that simple int-based -statements like "SELECT my_seq.nextval() FROM DUAL" continue to -return ints and not ``Decimal`` objects, and that any kind of -floating point value is received as a string so that there is no -floating point loss of precision. - -The "decimal point is present" logic itself is also sensitive to -locale. Under OCI_, this is controlled by the NLS_LANG -environment variable. Upon first connection, the dialect runs a -test to determine the current "decimal" character, which can be -a comma "," for European locales. From that point forward the -outputtypehandler uses that character to represent a decimal -point. Note that cx_oracle 5.0.3 or greater is required -when dealing with numerics with locale settings that don't use -a period "." as the decimal character. - -.. versionchanged:: 0.6.6 - The outputtypehandler supports the case where the locale uses a - comma "," character to represent a decimal point. - -.. _OCI: http://www.oracle.com/technetwork/database/features/oci/index.html - -""" - -from __future__ import absolute_import - -from .base import OracleCompiler, OracleDialect, OracleExecutionContext -from . import base as oracle -from ...engine import result as _result -from sqlalchemy import types as sqltypes, util, exc, processors -from sqlalchemy import util -import random -import collections -import decimal -import re - - -class _OracleNumeric(sqltypes.Numeric): - def bind_processor(self, dialect): - # cx_oracle accepts Decimal objects and floats - return None - - def result_processor(self, dialect, coltype): - # we apply a cx_oracle type handler to all connections - # that converts floating point strings to Decimal(). - # However, in some subquery situations, Oracle doesn't - # give us enough information to determine int or Decimal. - # It could even be int/Decimal differently on each row, - # regardless of the scale given for the originating type. - # So we still need an old school isinstance() handler - # here for decimals. - - if dialect.supports_native_decimal: - if self.asdecimal: - fstring = "%%.%df" % self._effective_decimal_return_scale - - def to_decimal(value): - if value is None: - return None - elif isinstance(value, decimal.Decimal): - return value - else: - return decimal.Decimal(fstring % value) - - return to_decimal - else: - if self.precision is None and self.scale is None: - return processors.to_float - elif not getattr(self, '_is_oracle_number', False) \ - and self.scale is not None: - return processors.to_float - else: - return None - else: - # cx_oracle 4 behavior, will assume - # floats - return super(_OracleNumeric, self).\ - result_processor(dialect, coltype) - - -class _OracleDate(sqltypes.Date): - def bind_processor(self, dialect): - return None - - def result_processor(self, dialect, coltype): - def process(value): - if value is not None: - return value.date() - else: - return value - return process - - -class _LOBMixin(object): - def result_processor(self, dialect, coltype): - if not dialect.auto_convert_lobs: - # return the cx_oracle.LOB directly. - return None - - def process(value): - if value is not None: - return value.read() - else: - return value - return process - - -class _NativeUnicodeMixin(object): - if util.py2k: - def bind_processor(self, dialect): - if dialect._cx_oracle_with_unicode: - def process(value): - if value is None: - return value - else: - return unicode(value) - return process - else: - return super( - _NativeUnicodeMixin, self).bind_processor(dialect) - - # we apply a connection output handler that returns - # unicode in all cases, so the "native_unicode" flag - # will be set for the default String.result_processor. - - -class _OracleChar(_NativeUnicodeMixin, sqltypes.CHAR): - def get_dbapi_type(self, dbapi): - return dbapi.FIXED_CHAR - - -class _OracleNVarChar(_NativeUnicodeMixin, sqltypes.NVARCHAR): - def get_dbapi_type(self, dbapi): - return getattr(dbapi, 'UNICODE', dbapi.STRING) - - -class _OracleText(_LOBMixin, sqltypes.Text): - def get_dbapi_type(self, dbapi): - return dbapi.CLOB - - -class _OracleLong(oracle.LONG): - # a raw LONG is a text type, but does *not* - # get the LobMixin with cx_oracle. - - def get_dbapi_type(self, dbapi): - return dbapi.LONG_STRING - - -class _OracleString(_NativeUnicodeMixin, sqltypes.String): - pass - - -class _OracleUnicodeText( - _LOBMixin, _NativeUnicodeMixin, sqltypes.UnicodeText): - def get_dbapi_type(self, dbapi): - return dbapi.NCLOB - - def result_processor(self, dialect, coltype): - lob_processor = _LOBMixin.result_processor(self, dialect, coltype) - if lob_processor is None: - return None - - string_processor = sqltypes.UnicodeText.result_processor( - self, dialect, coltype) - - if string_processor is None: - return lob_processor - else: - def process(value): - return string_processor(lob_processor(value)) - return process - - -class _OracleInteger(sqltypes.Integer): - def result_processor(self, dialect, coltype): - def to_int(val): - if val is not None: - val = int(val) - return val - return to_int - - -class _OracleBinary(_LOBMixin, sqltypes.LargeBinary): - def get_dbapi_type(self, dbapi): - return dbapi.BLOB - - def bind_processor(self, dialect): - return None - - -class _OracleInterval(oracle.INTERVAL): - def get_dbapi_type(self, dbapi): - return dbapi.INTERVAL - - -class _OracleRaw(oracle.RAW): - pass - - -class _OracleRowid(oracle.ROWID): - def get_dbapi_type(self, dbapi): - return dbapi.ROWID - - -class OracleCompiler_cx_oracle(OracleCompiler): - def bindparam_string(self, name, **kw): - quote = getattr(name, 'quote', None) - if quote is True or quote is not False and \ - self.preparer._bindparam_requires_quotes(name): - quoted_name = '"%s"' % name - self._quoted_bind_names[name] = quoted_name - return OracleCompiler.bindparam_string(self, quoted_name, **kw) - else: - return OracleCompiler.bindparam_string(self, name, **kw) - - -class OracleExecutionContext_cx_oracle(OracleExecutionContext): - - def pre_exec(self): - quoted_bind_names = \ - getattr(self.compiled, '_quoted_bind_names', None) - if quoted_bind_names: - if not self.dialect.supports_unicode_statements: - # if DBAPI doesn't accept unicode statements, - # keys in self.parameters would have been encoded - # here. so convert names in quoted_bind_names - # to encoded as well. - quoted_bind_names = \ - dict( - (fromname.encode(self.dialect.encoding), - toname.encode(self.dialect.encoding)) - for fromname, toname in - quoted_bind_names.items() - ) - for param in self.parameters: - for fromname, toname in quoted_bind_names.items(): - param[toname] = param[fromname] - del param[fromname] - - if self.dialect.auto_setinputsizes: - # cx_oracle really has issues when you setinputsizes - # on String, including that outparams/RETURNING - # breaks for varchars - self.set_input_sizes( - quoted_bind_names, - exclude_types=self.dialect.exclude_setinputsizes - ) - - # if a single execute, check for outparams - if len(self.compiled_parameters) == 1: - for bindparam in self.compiled.binds.values(): - if bindparam.isoutparam: - dbtype = bindparam.type.dialect_impl(self.dialect).\ - get_dbapi_type(self.dialect.dbapi) - if not hasattr(self, 'out_parameters'): - self.out_parameters = {} - if dbtype is None: - raise exc.InvalidRequestError( - "Cannot create out parameter for parameter " - "%r - its type %r is not supported by" - " cx_oracle" % - (bindparam.key, bindparam.type) - ) - name = self.compiled.bind_names[bindparam] - self.out_parameters[name] = self.cursor.var(dbtype) - self.parameters[0][quoted_bind_names.get(name, name)] = \ - self.out_parameters[name] - - def create_cursor(self): - c = self._dbapi_connection.cursor() - if self.dialect.arraysize: - c.arraysize = self.dialect.arraysize - - return c - - def get_result_proxy(self): - if hasattr(self, 'out_parameters') and self.compiled.returning: - returning_params = dict( - (k, v.getvalue()) - for k, v in self.out_parameters.items() - ) - return ReturningResultProxy(self, returning_params) - - result = None - if self.cursor.description is not None: - for column in self.cursor.description: - type_code = column[1] - if type_code in self.dialect._cx_oracle_binary_types: - result = _result.BufferedColumnResultProxy(self) - - if result is None: - result = _result.ResultProxy(self) - - if hasattr(self, 'out_parameters'): - if self.compiled_parameters is not None and \ - len(self.compiled_parameters) == 1: - result.out_parameters = out_parameters = {} - - for bind, name in self.compiled.bind_names.items(): - if name in self.out_parameters: - type = bind.type - impl_type = type.dialect_impl(self.dialect) - dbapi_type = impl_type.get_dbapi_type( - self.dialect.dbapi) - result_processor = impl_type.\ - result_processor(self.dialect, - dbapi_type) - if result_processor is not None: - out_parameters[name] = \ - result_processor( - self.out_parameters[name].getvalue()) - else: - out_parameters[name] = self.out_parameters[ - name].getvalue() - else: - result.out_parameters = dict( - (k, v.getvalue()) - for k, v in self.out_parameters.items() - ) - - return result - - -class OracleExecutionContext_cx_oracle_with_unicode( - OracleExecutionContext_cx_oracle): - """Support WITH_UNICODE in Python 2.xx. - - WITH_UNICODE allows cx_Oracle's Python 3 unicode handling - behavior under Python 2.x. This mode in some cases disallows - and in other cases silently passes corrupted data when - non-Python-unicode strings (a.k.a. plain old Python strings) - are passed as arguments to connect(), the statement sent to execute(), - or any of the bind parameter keys or values sent to execute(). - This optional context therefore ensures that all statements are - passed as Python unicode objects. - - """ - - def __init__(self, *arg, **kw): - OracleExecutionContext_cx_oracle.__init__(self, *arg, **kw) - self.statement = util.text_type(self.statement) - - def _execute_scalar(self, stmt): - return super(OracleExecutionContext_cx_oracle_with_unicode, self).\ - _execute_scalar(util.text_type(stmt)) - - -class ReturningResultProxy(_result.FullyBufferedResultProxy): - """Result proxy which stuffs the _returning clause + outparams - into the fetch.""" - - def __init__(self, context, returning_params): - self._returning_params = returning_params - super(ReturningResultProxy, self).__init__(context) - - def _cursor_description(self): - returning = self.context.compiled.returning - return [ - ("ret_%d" % i, None) - for i, col in enumerate(returning) - ] - - def _buffer_rows(self): - return collections.deque( - [tuple(self._returning_params["ret_%d" % i] - for i, c in enumerate(self._returning_params))] - ) - - -class OracleDialect_cx_oracle(OracleDialect): - execution_ctx_cls = OracleExecutionContext_cx_oracle - statement_compiler = OracleCompiler_cx_oracle - - driver = "cx_oracle" - - colspecs = colspecs = { - sqltypes.Numeric: _OracleNumeric, - # generic type, assume datetime.date is desired - sqltypes.Date: _OracleDate, - sqltypes.LargeBinary: _OracleBinary, - sqltypes.Boolean: oracle._OracleBoolean, - sqltypes.Interval: _OracleInterval, - oracle.INTERVAL: _OracleInterval, - sqltypes.Text: _OracleText, - sqltypes.String: _OracleString, - sqltypes.UnicodeText: _OracleUnicodeText, - sqltypes.CHAR: _OracleChar, - - # a raw LONG is a text type, but does *not* - # get the LobMixin with cx_oracle. - oracle.LONG: _OracleLong, - - # this is only needed for OUT parameters. - # it would be nice if we could not use it otherwise. - sqltypes.Integer: _OracleInteger, - - oracle.RAW: _OracleRaw, - sqltypes.Unicode: _OracleNVarChar, - sqltypes.NVARCHAR: _OracleNVarChar, - oracle.ROWID: _OracleRowid, - } - - execute_sequence_format = list - - def __init__(self, - auto_setinputsizes=True, - exclude_setinputsizes=("STRING", "UNICODE"), - auto_convert_lobs=True, - threaded=True, - allow_twophase=True, - coerce_to_decimal=True, - coerce_to_unicode=False, - arraysize=50, **kwargs): - OracleDialect.__init__(self, **kwargs) - self.threaded = threaded - self.arraysize = arraysize - self.allow_twophase = allow_twophase - self.supports_timestamp = self.dbapi is None or \ - hasattr(self.dbapi, 'TIMESTAMP') - self.auto_setinputsizes = auto_setinputsizes - self.auto_convert_lobs = auto_convert_lobs - - if hasattr(self.dbapi, 'version'): - self.cx_oracle_ver = tuple([int(x) for x in - self.dbapi.version.split('.')]) - else: - self.cx_oracle_ver = (0, 0, 0) - - def types(*names): - return set( - getattr(self.dbapi, name, None) for name in names - ).difference([None]) - - self.exclude_setinputsizes = types(*(exclude_setinputsizes or ())) - self._cx_oracle_string_types = types("STRING", "UNICODE", - "NCLOB", "CLOB") - self._cx_oracle_unicode_types = types("UNICODE", "NCLOB") - self._cx_oracle_binary_types = types("BFILE", "CLOB", "NCLOB", "BLOB") - self.supports_unicode_binds = self.cx_oracle_ver >= (5, 0) - - self.coerce_to_unicode = ( - self.cx_oracle_ver >= (5, 0) and - coerce_to_unicode - ) - - self.supports_native_decimal = ( - self.cx_oracle_ver >= (5, 0) and - coerce_to_decimal - ) - - self._cx_oracle_native_nvarchar = self.cx_oracle_ver >= (5, 0) - - if self.cx_oracle_ver is None: - # this occurs in tests with mock DBAPIs - self._cx_oracle_string_types = set() - self._cx_oracle_with_unicode = False - elif util.py3k or ( - self.cx_oracle_ver >= (5,) and not \ - hasattr(self.dbapi, 'UNICODE') - ): - # cx_Oracle WITH_UNICODE mode. *only* python - # unicode objects accepted for anything - self.supports_unicode_statements = True - self.supports_unicode_binds = True - self._cx_oracle_with_unicode = True - - if util.py2k: - # There's really no reason to run with WITH_UNICODE under - # Python 2.x. Give the user a hint. - util.warn( - "cx_Oracle is compiled under Python 2.xx using the " - "WITH_UNICODE flag. Consider recompiling cx_Oracle " - "without this flag, which is in no way necessary for " - "full support of Unicode. Otherwise, all string-holding " - "bind parameters must be explicitly typed using " - "SQLAlchemy's String type or one of its subtypes," - "or otherwise be passed as Python unicode. " - "Plain Python strings passed as bind parameters will be " - "silently corrupted by cx_Oracle." - ) - self.execution_ctx_cls = \ - OracleExecutionContext_cx_oracle_with_unicode - else: - self._cx_oracle_with_unicode = False - - if self.cx_oracle_ver is None or \ - not self.auto_convert_lobs or \ - not hasattr(self.dbapi, 'CLOB'): - self.dbapi_type_map = {} - else: - # only use this for LOB objects. using it for strings, dates - # etc. leads to a little too much magic, reflection doesn't know - # if it should expect encoded strings or unicodes, etc. - self.dbapi_type_map = { - self.dbapi.CLOB: oracle.CLOB(), - self.dbapi.NCLOB: oracle.NCLOB(), - self.dbapi.BLOB: oracle.BLOB(), - self.dbapi.BINARY: oracle.RAW(), - } - - @classmethod - def dbapi(cls): - import cx_Oracle - return cx_Oracle - - def initialize(self, connection): - super(OracleDialect_cx_oracle, self).initialize(connection) - if self._is_oracle_8: - self.supports_unicode_binds = False - self._detect_decimal_char(connection) - - def _detect_decimal_char(self, connection): - """detect if the decimal separator character is not '.', as - is the case with European locale settings for NLS_LANG. - - cx_oracle itself uses similar logic when it formats Python - Decimal objects to strings on the bind side (as of 5.0.3), - as Oracle sends/receives string numerics only in the - current locale. - - """ - if self.cx_oracle_ver < (5,): - # no output type handlers before version 5 - return - - cx_Oracle = self.dbapi - conn = connection.connection - - # override the output_type_handler that's - # on the cx_oracle connection with a plain - # one on the cursor - - def output_type_handler(cursor, name, defaultType, - size, precision, scale): - return cursor.var( - cx_Oracle.STRING, - 255, arraysize=cursor.arraysize) - - cursor = conn.cursor() - cursor.outputtypehandler = output_type_handler - cursor.execute("SELECT 0.1 FROM DUAL") - val = cursor.fetchone()[0] - cursor.close() - char = re.match(r"([\.,])", val).group(1) - if char != '.': - _detect_decimal = self._detect_decimal - self._detect_decimal = \ - lambda value: _detect_decimal(value.replace(char, '.')) - self._to_decimal = \ - lambda value: decimal.Decimal(value.replace(char, '.')) - - def _detect_decimal(self, value): - if "." in value: - return decimal.Decimal(value) - else: - return int(value) - - _to_decimal = decimal.Decimal - - def on_connect(self): - if self.cx_oracle_ver < (5,): - # no output type handlers before version 5 - return - - cx_Oracle = self.dbapi - - def output_type_handler(cursor, name, defaultType, - size, precision, scale): - # convert all NUMBER with precision + positive scale to Decimal - # this almost allows "native decimal" mode. - if self.supports_native_decimal and \ - defaultType == cx_Oracle.NUMBER and \ - precision and scale > 0: - return cursor.var( - cx_Oracle.STRING, - 255, - outconverter=self._to_decimal, - arraysize=cursor.arraysize) - # if NUMBER with zero precision and 0 or neg scale, this appears - # to indicate "ambiguous". Use a slower converter that will - # make a decision based on each value received - the type - # may change from row to row (!). This kills - # off "native decimal" mode, handlers still needed. - elif self.supports_native_decimal and \ - defaultType == cx_Oracle.NUMBER \ - and not precision and scale <= 0: - return cursor.var( - cx_Oracle.STRING, - 255, - outconverter=self._detect_decimal, - arraysize=cursor.arraysize) - # allow all strings to come back natively as Unicode - elif self.coerce_to_unicode and \ - defaultType in (cx_Oracle.STRING, cx_Oracle.FIXED_CHAR): - return cursor.var(util.text_type, size, cursor.arraysize) - - def on_connect(conn): - conn.outputtypehandler = output_type_handler - - return on_connect - - def create_connect_args(self, url): - dialect_opts = dict(url.query) - for opt in ('use_ansi', 'auto_setinputsizes', 'auto_convert_lobs', - 'threaded', 'allow_twophase'): - if opt in dialect_opts: - util.coerce_kw_type(dialect_opts, opt, bool) - setattr(self, opt, dialect_opts[opt]) - - database = url.database - service_name = dialect_opts.get('service_name', None) - if database or service_name: - # if we have a database, then we have a remote host - port = url.port - if port: - port = int(port) - else: - port = 1521 - - if database and service_name: - raise exc.InvalidRequestError( - '"service_name" option shouldn\'t ' - 'be used with a "database" part of the url') - if database: - makedsn_kwargs = {'sid': database} - if service_name: - makedsn_kwargs = {'service_name': service_name} - - dsn = self.dbapi.makedsn(url.host, port, **makedsn_kwargs) - else: - # we have a local tnsname - dsn = url.host - - opts = dict( - user=url.username, - password=url.password, - dsn=dsn, - threaded=self.threaded, - twophase=self.allow_twophase, - ) - - if util.py2k: - if self._cx_oracle_with_unicode: - for k, v in opts.items(): - if isinstance(v, str): - opts[k] = unicode(v) - else: - for k, v in opts.items(): - if isinstance(v, unicode): - opts[k] = str(v) - - if 'mode' in url.query: - opts['mode'] = url.query['mode'] - if isinstance(opts['mode'], util.string_types): - mode = opts['mode'].upper() - if mode == 'SYSDBA': - opts['mode'] = self.dbapi.SYSDBA - elif mode == 'SYSOPER': - opts['mode'] = self.dbapi.SYSOPER - else: - util.coerce_kw_type(opts, 'mode', int) - return ([], opts) - - def _get_server_version_info(self, connection): - return tuple( - int(x) - for x in connection.connection.version.split('.') - ) - - def is_disconnect(self, e, connection, cursor): - error, = e.args - if isinstance(e, self.dbapi.InterfaceError): - return "not connected" in str(e) - elif hasattr(error, 'code'): - # ORA-00028: your session has been killed - # ORA-03114: not connected to ORACLE - # ORA-03113: end-of-file on communication channel - # ORA-03135: connection lost contact - # ORA-01033: ORACLE initialization or shutdown in progress - # ORA-02396: exceeded maximum idle time, please connect again - # TODO: Others ? - return error.code in (28, 3114, 3113, 3135, 1033, 2396) - else: - return False - - def create_xid(self): - """create a two-phase transaction ID. - - this id will be passed to do_begin_twophase(), do_rollback_twophase(), - do_commit_twophase(). its format is unspecified.""" - - id = random.randint(0, 2 ** 128) - return (0x1234, "%032x" % id, "%032x" % 9) - - def do_executemany(self, cursor, statement, parameters, context=None): - if isinstance(parameters, tuple): - parameters = list(parameters) - cursor.executemany(statement, parameters) - - def do_begin_twophase(self, connection, xid): - connection.connection.begin(*xid) - - def do_prepare_twophase(self, connection, xid): - result = connection.connection.prepare() - connection.info['cx_oracle_prepared'] = result - - def do_rollback_twophase(self, connection, xid, is_prepared=True, - recover=False): - self.do_rollback(connection.connection) - - def do_commit_twophase(self, connection, xid, is_prepared=True, - recover=False): - if not is_prepared: - self.do_commit(connection.connection) - else: - oci_prepared = connection.info['cx_oracle_prepared'] - if oci_prepared: - self.do_commit(connection.connection) - - def do_recover_twophase(self, connection): - connection.info.pop('cx_oracle_prepared', None) - -dialect = OracleDialect_cx_oracle diff --git a/python/sqlalchemy/dialects/oracle/zxjdbc.py b/python/sqlalchemy/dialects/oracle/zxjdbc.py deleted file mode 100644 index ab1ade04..00000000 --- a/python/sqlalchemy/dialects/oracle/zxjdbc.py +++ /dev/null @@ -1,237 +0,0 @@ -# oracle/zxjdbc.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" -.. dialect:: oracle+zxjdbc - :name: zxJDBC for Jython - :dbapi: zxjdbc - :connectstring: oracle+zxjdbc://user:pass@host/dbname - :driverurl: http://www.oracle.com/technology/software/tech/java/\ -sqlj_jdbc/index.html. - - .. note:: Jython is not supported by current versions of SQLAlchemy. The - zxjdbc dialect should be considered as experimental. - -""" -import decimal -import re - -from sqlalchemy import sql, types as sqltypes, util -from sqlalchemy.connectors.zxJDBC import ZxJDBCConnector -from sqlalchemy.dialects.oracle.base import (OracleCompiler, - OracleDialect, - OracleExecutionContext) -from sqlalchemy.engine import result as _result -from sqlalchemy.sql import expression -import collections - -SQLException = zxJDBC = None - - -class _ZxJDBCDate(sqltypes.Date): - - def result_processor(self, dialect, coltype): - def process(value): - if value is None: - return None - else: - return value.date() - return process - - -class _ZxJDBCNumeric(sqltypes.Numeric): - - def result_processor(self, dialect, coltype): - # XXX: does the dialect return Decimal or not??? - # if it does (in all cases), we could use a None processor as well as - # the to_float generic processor - if self.asdecimal: - def process(value): - if isinstance(value, decimal.Decimal): - return value - else: - return decimal.Decimal(str(value)) - else: - def process(value): - if isinstance(value, decimal.Decimal): - return float(value) - else: - return value - return process - - -class OracleCompiler_zxjdbc(OracleCompiler): - - def returning_clause(self, stmt, returning_cols): - self.returning_cols = list( - expression._select_iterables(returning_cols)) - - # within_columns_clause=False so that labels (foo AS bar) don't render - columns = [self.process(c, within_columns_clause=False, - result_map=self.result_map) - for c in self.returning_cols] - - if not hasattr(self, 'returning_parameters'): - self.returning_parameters = [] - - binds = [] - for i, col in enumerate(self.returning_cols): - dbtype = col.type.dialect_impl( - self.dialect).get_dbapi_type(self.dialect.dbapi) - self.returning_parameters.append((i + 1, dbtype)) - - bindparam = sql.bindparam( - "ret_%d" % i, value=ReturningParam(dbtype)) - self.binds[bindparam.key] = bindparam - binds.append( - self.bindparam_string(self._truncate_bindparam(bindparam))) - - return 'RETURNING ' + ', '.join(columns) + " INTO " + ", ".join(binds) - - -class OracleExecutionContext_zxjdbc(OracleExecutionContext): - - def pre_exec(self): - if hasattr(self.compiled, 'returning_parameters'): - # prepare a zxJDBC statement so we can grab its underlying - # OraclePreparedStatement's getReturnResultSet later - self.statement = self.cursor.prepare(self.statement) - - def get_result_proxy(self): - if hasattr(self.compiled, 'returning_parameters'): - rrs = None - try: - try: - rrs = self.statement.__statement__.getReturnResultSet() - next(rrs) - except SQLException as sqle: - msg = '%s [SQLCode: %d]' % ( - sqle.getMessage(), sqle.getErrorCode()) - if sqle.getSQLState() is not None: - msg += ' [SQLState: %s]' % sqle.getSQLState() - raise zxJDBC.Error(msg) - else: - row = tuple( - self.cursor.datahandler.getPyObject( - rrs, index, dbtype) - for index, dbtype in - self.compiled.returning_parameters) - return ReturningResultProxy(self, row) - finally: - if rrs is not None: - try: - rrs.close() - except SQLException: - pass - self.statement.close() - - return _result.ResultProxy(self) - - def create_cursor(self): - cursor = self._dbapi_connection.cursor() - cursor.datahandler = self.dialect.DataHandler(cursor.datahandler) - return cursor - - -class ReturningResultProxy(_result.FullyBufferedResultProxy): - - """ResultProxy backed by the RETURNING ResultSet results.""" - - def __init__(self, context, returning_row): - self._returning_row = returning_row - super(ReturningResultProxy, self).__init__(context) - - def _cursor_description(self): - ret = [] - for c in self.context.compiled.returning_cols: - if hasattr(c, 'name'): - ret.append((c.name, c.type)) - else: - ret.append((c.anon_label, c.type)) - return ret - - def _buffer_rows(self): - return collections.deque([self._returning_row]) - - -class ReturningParam(object): - - """A bindparam value representing a RETURNING parameter. - - Specially handled by OracleReturningDataHandler. - """ - - def __init__(self, type): - self.type = type - - def __eq__(self, other): - if isinstance(other, ReturningParam): - return self.type == other.type - return NotImplemented - - def __ne__(self, other): - if isinstance(other, ReturningParam): - return self.type != other.type - return NotImplemented - - def __repr__(self): - kls = self.__class__ - return '<%s.%s object at 0x%x type=%s>' % ( - kls.__module__, kls.__name__, id(self), self.type) - - -class OracleDialect_zxjdbc(ZxJDBCConnector, OracleDialect): - jdbc_db_name = 'oracle' - jdbc_driver_name = 'oracle.jdbc.OracleDriver' - - statement_compiler = OracleCompiler_zxjdbc - execution_ctx_cls = OracleExecutionContext_zxjdbc - - colspecs = util.update_copy( - OracleDialect.colspecs, - { - sqltypes.Date: _ZxJDBCDate, - sqltypes.Numeric: _ZxJDBCNumeric - } - ) - - def __init__(self, *args, **kwargs): - super(OracleDialect_zxjdbc, self).__init__(*args, **kwargs) - global SQLException, zxJDBC - from java.sql import SQLException - from com.ziclix.python.sql import zxJDBC - from com.ziclix.python.sql.handler import OracleDataHandler - - class OracleReturningDataHandler(OracleDataHandler): - """zxJDBC DataHandler that specially handles ReturningParam.""" - - def setJDBCObject(self, statement, index, object, dbtype=None): - if type(object) is ReturningParam: - statement.registerReturnParameter(index, object.type) - elif dbtype is None: - OracleDataHandler.setJDBCObject( - self, statement, index, object) - else: - OracleDataHandler.setJDBCObject( - self, statement, index, object, dbtype) - self.DataHandler = OracleReturningDataHandler - - def initialize(self, connection): - super(OracleDialect_zxjdbc, self).initialize(connection) - self.implicit_returning = \ - connection.connection.driverversion >= '10.2' - - def _create_jdbc_url(self, url): - return 'jdbc:oracle:thin:@%s:%s:%s' % ( - url.host, url.port or 1521, url.database) - - def _get_server_version_info(self, connection): - version = re.search( - r'Release ([\d\.]+)', connection.connection.dbversion).group(1) - return tuple(int(x) for x in version.split('.')) - -dialect = OracleDialect_zxjdbc diff --git a/python/sqlalchemy/dialects/postgres.py b/python/sqlalchemy/dialects/postgres.py deleted file mode 100644 index 3335333e..00000000 --- a/python/sqlalchemy/dialects/postgres.py +++ /dev/null @@ -1,18 +0,0 @@ -# dialects/postgres.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -# backwards compat with the old name -from sqlalchemy.util import warn_deprecated - -warn_deprecated( - "The SQLAlchemy PostgreSQL dialect has been renamed from 'postgres' to " - "'postgresql'. The new URL format is " - "postgresql[+driver]://:@/" -) - -from sqlalchemy.dialects.postgresql import * -from sqlalchemy.dialects.postgresql import base diff --git a/python/sqlalchemy/dialects/postgresql/__init__.py b/python/sqlalchemy/dialects/postgresql/__init__.py deleted file mode 100644 index 98fe6f08..00000000 --- a/python/sqlalchemy/dialects/postgresql/__init__.py +++ /dev/null @@ -1,31 +0,0 @@ -# postgresql/__init__.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -from . import base, psycopg2, pg8000, pypostgresql, zxjdbc, psycopg2cffi - -base.dialect = psycopg2.dialect - -from .base import \ - INTEGER, BIGINT, SMALLINT, VARCHAR, CHAR, TEXT, NUMERIC, FLOAT, REAL, \ - INET, CIDR, UUID, BIT, MACADDR, OID, DOUBLE_PRECISION, TIMESTAMP, TIME, \ - DATE, BYTEA, BOOLEAN, INTERVAL, ARRAY, ENUM, dialect, array, Any, All, \ - TSVECTOR, DropEnumType -from .constraints import ExcludeConstraint -from .hstore import HSTORE, hstore -from .json import JSON, JSONElement, JSONB -from .ranges import INT4RANGE, INT8RANGE, NUMRANGE, DATERANGE, TSRANGE, \ - TSTZRANGE - -__all__ = ( - 'INTEGER', 'BIGINT', 'SMALLINT', 'VARCHAR', 'CHAR', 'TEXT', 'NUMERIC', - 'FLOAT', 'REAL', 'INET', 'CIDR', 'UUID', 'BIT', 'MACADDR', 'OID', - 'DOUBLE_PRECISION', 'TIMESTAMP', 'TIME', 'DATE', 'BYTEA', 'BOOLEAN', - 'INTERVAL', 'ARRAY', 'ENUM', 'dialect', 'Any', 'All', 'array', 'HSTORE', - 'hstore', 'INT4RANGE', 'INT8RANGE', 'NUMRANGE', 'DATERANGE', - 'TSRANGE', 'TSTZRANGE', 'json', 'JSON', 'JSONB', 'JSONElement', - 'DropEnumType' -) diff --git a/python/sqlalchemy/dialects/postgresql/base.py b/python/sqlalchemy/dialects/postgresql/base.py deleted file mode 100644 index fd525256..00000000 --- a/python/sqlalchemy/dialects/postgresql/base.py +++ /dev/null @@ -1,2934 +0,0 @@ -# postgresql/base.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" -.. dialect:: postgresql - :name: PostgreSQL - - -Sequences/SERIAL ----------------- - -PostgreSQL supports sequences, and SQLAlchemy uses these as the default means -of creating new primary key values for integer-based primary key columns. When -creating tables, SQLAlchemy will issue the ``SERIAL`` datatype for -integer-based primary key columns, which generates a sequence and server side -default corresponding to the column. - -To specify a specific named sequence to be used for primary key generation, -use the :func:`~sqlalchemy.schema.Sequence` construct:: - - Table('sometable', metadata, - Column('id', Integer, Sequence('some_id_seq'), primary_key=True) - ) - -When SQLAlchemy issues a single INSERT statement, to fulfill the contract of -having the "last insert identifier" available, a RETURNING clause is added to -the INSERT statement which specifies the primary key columns should be -returned after the statement completes. The RETURNING functionality only takes -place if Postgresql 8.2 or later is in use. As a fallback approach, the -sequence, whether specified explicitly or implicitly via ``SERIAL``, is -executed independently beforehand, the returned value to be used in the -subsequent insert. Note that when an -:func:`~sqlalchemy.sql.expression.insert()` construct is executed using -"executemany" semantics, the "last inserted identifier" functionality does not -apply; no RETURNING clause is emitted nor is the sequence pre-executed in this -case. - -To force the usage of RETURNING by default off, specify the flag -``implicit_returning=False`` to :func:`.create_engine`. - -.. _postgresql_isolation_level: - -Transaction Isolation Level ---------------------------- - -All Postgresql dialects support setting of transaction isolation level -both via a dialect-specific parameter :paramref:`.create_engine.isolation_level` -accepted by :func:`.create_engine`, -as well as the ``isolation_level`` argument as passed to -:meth:`.Connection.execution_options`. When using a non-psycopg2 dialect, -this feature works by issuing the command -``SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL `` for -each new connection. - -To set isolation level using :func:`.create_engine`:: - - engine = create_engine( - "postgresql+pg8000://scott:tiger@localhost/test", - isolation_level="READ UNCOMMITTED" - ) - -To set using per-connection execution options:: - - connection = engine.connect() - connection = connection.execution_options( - isolation_level="READ COMMITTED" - ) - -Valid values for ``isolation_level`` include: - -* ``READ COMMITTED`` -* ``READ UNCOMMITTED`` -* ``REPEATABLE READ`` -* ``SERIALIZABLE`` - -The :mod:`~sqlalchemy.dialects.postgresql.psycopg2` and -:mod:`~sqlalchemy.dialects.postgresql.pg8000` dialects also offer the -special level ``AUTOCOMMIT``. - -.. seealso:: - - :ref:`psycopg2_isolation_level` - - :ref:`pg8000_isolation_level` - -.. _postgresql_schema_reflection: - -Remote-Schema Table Introspection and Postgresql search_path ------------------------------------------------------------- - -The Postgresql dialect can reflect tables from any schema. The -:paramref:`.Table.schema` argument, or alternatively the -:paramref:`.MetaData.reflect.schema` argument determines which schema will -be searched for the table or tables. The reflected :class:`.Table` objects -will in all cases retain this ``.schema`` attribute as was specified. -However, with regards to tables which these :class:`.Table` objects refer to -via foreign key constraint, a decision must be made as to how the ``.schema`` -is represented in those remote tables, in the case where that remote -schema name is also a member of the current -`Postgresql search path -`_. - -By default, the Postgresql dialect mimics the behavior encouraged by -Postgresql's own ``pg_get_constraintdef()`` builtin procedure. This function -returns a sample definition for a particular foreign key constraint, -omitting the referenced schema name from that definition when the name is -also in the Postgresql schema search path. The interaction below -illustrates this behavior:: - - test=> CREATE TABLE test_schema.referred(id INTEGER PRIMARY KEY); - CREATE TABLE - test=> CREATE TABLE referring( - test(> id INTEGER PRIMARY KEY, - test(> referred_id INTEGER REFERENCES test_schema.referred(id)); - CREATE TABLE - test=> SET search_path TO public, test_schema; - test=> SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM - test-> pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n - test-> ON n.oid = c.relnamespace - test-> JOIN pg_catalog.pg_constraint r ON c.oid = r.conrelid - test-> WHERE c.relname='referring' AND r.contype = 'f' - test-> ; - pg_get_constraintdef - --------------------------------------------------- - FOREIGN KEY (referred_id) REFERENCES referred(id) - (1 row) - -Above, we created a table ``referred`` as a member of the remote schema -``test_schema``, however when we added ``test_schema`` to the -PG ``search_path`` and then asked ``pg_get_constraintdef()`` for the -``FOREIGN KEY`` syntax, ``test_schema`` was not included in the output of -the function. - -On the other hand, if we set the search path back to the typical default -of ``public``:: - - test=> SET search_path TO public; - SET - -The same query against ``pg_get_constraintdef()`` now returns the fully -schema-qualified name for us:: - - test=> SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM - test-> pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n - test-> ON n.oid = c.relnamespace - test-> JOIN pg_catalog.pg_constraint r ON c.oid = r.conrelid - test-> WHERE c.relname='referring' AND r.contype = 'f'; - pg_get_constraintdef - --------------------------------------------------------------- - FOREIGN KEY (referred_id) REFERENCES test_schema.referred(id) - (1 row) - -SQLAlchemy will by default use the return value of ``pg_get_constraintdef()`` -in order to determine the remote schema name. That is, if our ``search_path`` -were set to include ``test_schema``, and we invoked a table -reflection process as follows:: - - >>> from sqlalchemy import Table, MetaData, create_engine - >>> engine = create_engine("postgresql://scott:tiger@localhost/test") - >>> with engine.connect() as conn: - ... conn.execute("SET search_path TO test_schema, public") - ... meta = MetaData() - ... referring = Table('referring', meta, - ... autoload=True, autoload_with=conn) - ... - - -The above process would deliver to the :attr:`.MetaData.tables` collection -``referred`` table named **without** the schema:: - - >>> meta.tables['referred'].schema is None - True - -To alter the behavior of reflection such that the referred schema is -maintained regardless of the ``search_path`` setting, use the -``postgresql_ignore_search_path`` option, which can be specified as a -dialect-specific argument to both :class:`.Table` as well as -:meth:`.MetaData.reflect`:: - - >>> with engine.connect() as conn: - ... conn.execute("SET search_path TO test_schema, public") - ... meta = MetaData() - ... referring = Table('referring', meta, autoload=True, - ... autoload_with=conn, - ... postgresql_ignore_search_path=True) - ... - - -We will now have ``test_schema.referred`` stored as schema-qualified:: - - >>> meta.tables['test_schema.referred'].schema - 'test_schema' - -.. sidebar:: Best Practices for Postgresql Schema reflection - - The description of Postgresql schema reflection behavior is complex, and - is the product of many years of dealing with widely varied use cases and - user preferences. But in fact, there's no need to understand any of it if - you just stick to the simplest use pattern: leave the ``search_path`` set - to its default of ``public`` only, never refer to the name ``public`` as - an explicit schema name otherwise, and refer to all other schema names - explicitly when building up a :class:`.Table` object. The options - described here are only for those users who can't, or prefer not to, stay - within these guidelines. - -Note that **in all cases**, the "default" schema is always reflected as -``None``. The "default" schema on Postgresql is that which is returned by the -Postgresql ``current_schema()`` function. On a typical Postgresql -installation, this is the name ``public``. So a table that refers to another -which is in the ``public`` (i.e. default) schema will always have the -``.schema`` attribute set to ``None``. - -.. versionadded:: 0.9.2 Added the ``postgresql_ignore_search_path`` - dialect-level option accepted by :class:`.Table` and - :meth:`.MetaData.reflect`. - - -.. seealso:: - - `The Schema Search Path - `_ - - on the Postgresql website. - -INSERT/UPDATE...RETURNING -------------------------- - -The dialect supports PG 8.2's ``INSERT..RETURNING``, ``UPDATE..RETURNING`` and -``DELETE..RETURNING`` syntaxes. ``INSERT..RETURNING`` is used by default -for single-row INSERT statements in order to fetch newly generated -primary key identifiers. To specify an explicit ``RETURNING`` clause, -use the :meth:`._UpdateBase.returning` method on a per-statement basis:: - - # INSERT..RETURNING - result = table.insert().returning(table.c.col1, table.c.col2).\\ - values(name='foo') - print result.fetchall() - - # UPDATE..RETURNING - result = table.update().returning(table.c.col1, table.c.col2).\\ - where(table.c.name=='foo').values(name='bar') - print result.fetchall() - - # DELETE..RETURNING - result = table.delete().returning(table.c.col1, table.c.col2).\\ - where(table.c.name=='foo') - print result.fetchall() - -.. _postgresql_match: - -Full Text Search ----------------- - -SQLAlchemy makes available the Postgresql ``@@`` operator via the -:meth:`.ColumnElement.match` method on any textual column expression. -On a Postgresql dialect, an expression like the following:: - - select([sometable.c.text.match("search string")]) - -will emit to the database:: - - SELECT text @@ to_tsquery('search string') FROM table - -The Postgresql text search functions such as ``to_tsquery()`` -and ``to_tsvector()`` are available -explicitly using the standard :data:`.func` construct. For example:: - - select([ - func.to_tsvector('fat cats ate rats').match('cat & rat') - ]) - -Emits the equivalent of:: - - SELECT to_tsvector('fat cats ate rats') @@ to_tsquery('cat & rat') - -The :class:`.postgresql.TSVECTOR` type can provide for explicit CAST:: - - from sqlalchemy.dialects.postgresql import TSVECTOR - from sqlalchemy import select, cast - select([cast("some text", TSVECTOR)]) - -produces a statement equivalent to:: - - SELECT CAST('some text' AS TSVECTOR) AS anon_1 - -Full Text Searches in Postgresql are influenced by a combination of: the -PostgresSQL setting of ``default_text_search_config``, the ``regconfig`` used -to build the GIN/GiST indexes, and the ``regconfig`` optionally passed in -during a query. - -When performing a Full Text Search against a column that has a GIN or -GiST index that is already pre-computed (which is common on full text -searches) one may need to explicitly pass in a particular PostgresSQL -``regconfig`` value to ensure the query-planner utilizes the index and does -not re-compute the column on demand. - -In order to provide for this explicit query planning, or to use different -search strategies, the ``match`` method accepts a ``postgresql_regconfig`` -keyword argument:: - - select([mytable.c.id]).where( - mytable.c.title.match('somestring', postgresql_regconfig='english') - ) - -Emits the equivalent of:: - - SELECT mytable.id FROM mytable - WHERE mytable.title @@ to_tsquery('english', 'somestring') - -One can also specifically pass in a `'regconfig'` value to the -``to_tsvector()`` command as the initial argument:: - - select([mytable.c.id]).where( - func.to_tsvector('english', mytable.c.title )\ - .match('somestring', postgresql_regconfig='english') - ) - -produces a statement equivalent to:: - - SELECT mytable.id FROM mytable - WHERE to_tsvector('english', mytable.title) @@ - to_tsquery('english', 'somestring') - -It is recommended that you use the ``EXPLAIN ANALYZE...`` tool from -PostgresSQL to ensure that you are generating queries with SQLAlchemy that -take full advantage of any indexes you may have created for full text search. - -FROM ONLY ... ------------------------- - -The dialect supports PostgreSQL's ONLY keyword for targeting only a particular -table in an inheritance hierarchy. This can be used to produce the -``SELECT ... FROM ONLY``, ``UPDATE ONLY ...``, and ``DELETE FROM ONLY ...`` -syntaxes. It uses SQLAlchemy's hints mechanism:: - - # SELECT ... FROM ONLY ... - result = table.select().with_hint(table, 'ONLY', 'postgresql') - print result.fetchall() - - # UPDATE ONLY ... - table.update(values=dict(foo='bar')).with_hint('ONLY', - dialect_name='postgresql') - - # DELETE FROM ONLY ... - table.delete().with_hint('ONLY', dialect_name='postgresql') - -.. _postgresql_indexes: - -Postgresql-Specific Index Options ---------------------------------- - -Several extensions to the :class:`.Index` construct are available, specific -to the PostgreSQL dialect. - -Partial Indexes -^^^^^^^^^^^^^^^^ - -Partial indexes add criterion to the index definition so that the index is -applied to a subset of rows. These can be specified on :class:`.Index` -using the ``postgresql_where`` keyword argument:: - - Index('my_index', my_table.c.id, postgresql_where=tbl.c.value > 10) - -Operator Classes -^^^^^^^^^^^^^^^^^ - -PostgreSQL allows the specification of an *operator class* for each column of -an index (see -http://www.postgresql.org/docs/8.3/interactive/indexes-opclass.html). -The :class:`.Index` construct allows these to be specified via the -``postgresql_ops`` keyword argument:: - - Index('my_index', my_table.c.id, my_table.c.data, - postgresql_ops={ - 'data': 'text_pattern_ops', - 'id': 'int4_ops' - }) - -.. versionadded:: 0.7.2 - ``postgresql_ops`` keyword argument to :class:`.Index` construct. - -Note that the keys in the ``postgresql_ops`` dictionary are the "key" name of -the :class:`.Column`, i.e. the name used to access it from the ``.c`` -collection of :class:`.Table`, which can be configured to be different than -the actual name of the column as expressed in the database. - -Index Types -^^^^^^^^^^^^ - -PostgreSQL provides several index types: B-Tree, Hash, GiST, and GIN, as well -as the ability for users to create their own (see -http://www.postgresql.org/docs/8.3/static/indexes-types.html). These can be -specified on :class:`.Index` using the ``postgresql_using`` keyword argument:: - - Index('my_index', my_table.c.data, postgresql_using='gin') - -The value passed to the keyword argument will be simply passed through to the -underlying CREATE INDEX command, so it *must* be a valid index type for your -version of PostgreSQL. - -.. _postgresql_index_storage: - -Index Storage Parameters -^^^^^^^^^^^^^^^^^^^^^^^^ - -PostgreSQL allows storage parameters to be set on indexes. The storage -parameters available depend on the index method used by the index. Storage -parameters can be specified on :class:`.Index` using the ``postgresql_with`` -keyword argument:: - - Index('my_index', my_table.c.data, postgresql_with={"fillfactor": 50}) - -.. versionadded:: 1.0.6 - -.. _postgresql_index_concurrently: - -Indexes with CONCURRENTLY -^^^^^^^^^^^^^^^^^^^^^^^^^ - -The Postgresql index option CONCURRENTLY is supported by passing the -flag ``postgresql_concurrently`` to the :class:`.Index` construct:: - - tbl = Table('testtbl', m, Column('data', Integer)) - - idx1 = Index('test_idx1', tbl.c.data, postgresql_concurrently=True) - -The above index construct will render SQL as:: - - CREATE INDEX CONCURRENTLY test_idx1 ON testtbl (data) - -.. versionadded:: 0.9.9 - -.. _postgresql_index_reflection: - -Postgresql Index Reflection ---------------------------- - -The Postgresql database creates a UNIQUE INDEX implicitly whenever the -UNIQUE CONSTRAINT construct is used. When inspecting a table using -:class:`.Inspector`, the :meth:`.Inspector.get_indexes` -and the :meth:`.Inspector.get_unique_constraints` will report on these -two constructs distinctly; in the case of the index, the key -``duplicates_constraint`` will be present in the index entry if it is -detected as mirroring a constraint. When performing reflection using -``Table(..., autoload=True)``, the UNIQUE INDEX is **not** returned -in :attr:`.Table.indexes` when it is detected as mirroring a -:class:`.UniqueConstraint` in the :attr:`.Table.constraints` collection. - -.. versionchanged:: 1.0.0 - :class:`.Table` reflection now includes - :class:`.UniqueConstraint` objects present in the :attr:`.Table.constraints` - collection; the Postgresql backend will no longer include a "mirrored" - :class:`.Index` construct in :attr:`.Table.indexes` if it is detected - as corresponding to a unique constraint. - -Special Reflection Options --------------------------- - -The :class:`.Inspector` used for the Postgresql backend is an instance -of :class:`.PGInspector`, which offers additional methods:: - - from sqlalchemy import create_engine, inspect - - engine = create_engine("postgresql+psycopg2://localhost/test") - insp = inspect(engine) # will be a PGInspector - - print(insp.get_enums()) - -.. autoclass:: PGInspector - :members: - -.. _postgresql_table_options: - -PostgreSQL Table Options -------------------------- - -Several options for CREATE TABLE are supported directly by the PostgreSQL -dialect in conjunction with the :class:`.Table` construct: - -* ``TABLESPACE``:: - - Table("some_table", metadata, ..., postgresql_tablespace='some_tablespace') - -* ``ON COMMIT``:: - - Table("some_table", metadata, ..., postgresql_on_commit='PRESERVE ROWS') - -* ``WITH OIDS``:: - - Table("some_table", metadata, ..., postgresql_with_oids=True) - -* ``WITHOUT OIDS``:: - - Table("some_table", metadata, ..., postgresql_with_oids=False) - -* ``INHERITS``:: - - Table("some_table", metadata, ..., postgresql_inherits="some_supertable") - - Table("some_table", metadata, ..., postgresql_inherits=("t1", "t2", ...)) - -.. versionadded:: 1.0.0 - -.. seealso:: - - `Postgresql CREATE TABLE options - `_ - -ENUM Types ----------- - -Postgresql has an independently creatable TYPE structure which is used -to implement an enumerated type. This approach introduces significant -complexity on the SQLAlchemy side in terms of when this type should be -CREATED and DROPPED. The type object is also an independently reflectable -entity. The following sections should be consulted: - -* :class:`.postgresql.ENUM` - DDL and typing support for ENUM. - -* :meth:`.PGInspector.get_enums` - retrieve a listing of current ENUM types - -* :meth:`.postgresql.ENUM.create` , :meth:`.postgresql.ENUM.drop` - individual - CREATE and DROP commands for ENUM. - -.. _postgresql_array_of_enum: - -Using ENUM with ARRAY -^^^^^^^^^^^^^^^^^^^^^ - -The combination of ENUM and ARRAY is not directly supported by backend -DBAPIs at this time. In order to send and receive an ARRAY of ENUM, -use the following workaround type:: - - class ArrayOfEnum(ARRAY): - - def bind_expression(self, bindvalue): - return sa.cast(bindvalue, self) - - def result_processor(self, dialect, coltype): - super_rp = super(ArrayOfEnum, self).result_processor( - dialect, coltype) - - def handle_raw_string(value): - inner = re.match(r"^{(.*)}$", value).group(1) - return inner.split(",") - - def process(value): - if value is None: - return None - return super_rp(handle_raw_string(value)) - return process - -E.g.:: - - Table( - 'mydata', metadata, - Column('id', Integer, primary_key=True), - Column('data', ArrayOfEnum(ENUM('a', 'b, 'c', name='myenum'))) - - ) - -This type is not included as a built-in type as it would be incompatible -with a DBAPI that suddenly decides to support ARRAY of ENUM directly in -a new version. - -""" -from collections import defaultdict -import re - -from ... import sql, schema, exc, util -from ...engine import default, reflection -from ...sql import compiler, expression, operators, default_comparator -from ... import types as sqltypes - -try: - from uuid import UUID as _python_UUID -except ImportError: - _python_UUID = None - -from sqlalchemy.types import INTEGER, BIGINT, SMALLINT, VARCHAR, \ - CHAR, TEXT, FLOAT, NUMERIC, \ - DATE, BOOLEAN, REAL - -RESERVED_WORDS = set( - ["all", "analyse", "analyze", "and", "any", "array", "as", "asc", - "asymmetric", "both", "case", "cast", "check", "collate", "column", - "constraint", "create", "current_catalog", "current_date", - "current_role", "current_time", "current_timestamp", "current_user", - "default", "deferrable", "desc", "distinct", "do", "else", "end", - "except", "false", "fetch", "for", "foreign", "from", "grant", "group", - "having", "in", "initially", "intersect", "into", "leading", "limit", - "localtime", "localtimestamp", "new", "not", "null", "of", "off", - "offset", "old", "on", "only", "or", "order", "placing", "primary", - "references", "returning", "select", "session_user", "some", "symmetric", - "table", "then", "to", "trailing", "true", "union", "unique", "user", - "using", "variadic", "when", "where", "window", "with", "authorization", - "between", "binary", "cross", "current_schema", "freeze", "full", - "ilike", "inner", "is", "isnull", "join", "left", "like", "natural", - "notnull", "outer", "over", "overlaps", "right", "similar", "verbose" - ]) - -_DECIMAL_TYPES = (1231, 1700) -_FLOAT_TYPES = (700, 701, 1021, 1022) -_INT_TYPES = (20, 21, 23, 26, 1005, 1007, 1016) - - -class BYTEA(sqltypes.LargeBinary): - __visit_name__ = 'BYTEA' - - -class DOUBLE_PRECISION(sqltypes.Float): - __visit_name__ = 'DOUBLE_PRECISION' - - -class INET(sqltypes.TypeEngine): - __visit_name__ = "INET" -PGInet = INET - - -class CIDR(sqltypes.TypeEngine): - __visit_name__ = "CIDR" -PGCidr = CIDR - - -class MACADDR(sqltypes.TypeEngine): - __visit_name__ = "MACADDR" -PGMacAddr = MACADDR - - -class OID(sqltypes.TypeEngine): - - """Provide the Postgresql OID type. - - .. versionadded:: 0.9.5 - - """ - __visit_name__ = "OID" - - -class TIMESTAMP(sqltypes.TIMESTAMP): - - def __init__(self, timezone=False, precision=None): - super(TIMESTAMP, self).__init__(timezone=timezone) - self.precision = precision - - -class TIME(sqltypes.TIME): - - def __init__(self, timezone=False, precision=None): - super(TIME, self).__init__(timezone=timezone) - self.precision = precision - - -class INTERVAL(sqltypes.TypeEngine): - - """Postgresql INTERVAL type. - - The INTERVAL type may not be supported on all DBAPIs. - It is known to work on psycopg2 and not pg8000 or zxjdbc. - - """ - __visit_name__ = 'INTERVAL' - - def __init__(self, precision=None): - self.precision = precision - - @classmethod - def _adapt_from_generic_interval(cls, interval): - return INTERVAL(precision=interval.second_precision) - - @property - def _type_affinity(self): - return sqltypes.Interval - -PGInterval = INTERVAL - - -class BIT(sqltypes.TypeEngine): - __visit_name__ = 'BIT' - - def __init__(self, length=None, varying=False): - if not varying: - # BIT without VARYING defaults to length 1 - self.length = length or 1 - else: - # but BIT VARYING can be unlimited-length, so no default - self.length = length - self.varying = varying - -PGBit = BIT - - -class UUID(sqltypes.TypeEngine): - - """Postgresql UUID type. - - Represents the UUID column type, interpreting - data either as natively returned by the DBAPI - or as Python uuid objects. - - The UUID type may not be supported on all DBAPIs. - It is known to work on psycopg2 and not pg8000. - - """ - __visit_name__ = 'UUID' - - def __init__(self, as_uuid=False): - """Construct a UUID type. - - - :param as_uuid=False: if True, values will be interpreted - as Python uuid objects, converting to/from string via the - DBAPI. - - """ - if as_uuid and _python_UUID is None: - raise NotImplementedError( - "This version of Python does not support " - "the native UUID type." - ) - self.as_uuid = as_uuid - - def bind_processor(self, dialect): - if self.as_uuid: - def process(value): - if value is not None: - value = util.text_type(value) - return value - return process - else: - return None - - def result_processor(self, dialect, coltype): - if self.as_uuid: - def process(value): - if value is not None: - value = _python_UUID(value) - return value - return process - else: - return None - -PGUuid = UUID - - -class TSVECTOR(sqltypes.TypeEngine): - - """The :class:`.postgresql.TSVECTOR` type implements the Postgresql - text search type TSVECTOR. - - It can be used to do full text queries on natural language - documents. - - .. versionadded:: 0.9.0 - - .. seealso:: - - :ref:`postgresql_match` - - """ - __visit_name__ = 'TSVECTOR' - - -class _Slice(expression.ColumnElement): - __visit_name__ = 'slice' - type = sqltypes.NULLTYPE - - def __init__(self, slice_, source_comparator): - self.start = default_comparator._check_literal( - source_comparator.expr, - operators.getitem, slice_.start) - self.stop = default_comparator._check_literal( - source_comparator.expr, - operators.getitem, slice_.stop) - - -class Any(expression.ColumnElement): - - """Represent the clause ``left operator ANY (right)``. ``right`` must be - an array expression. - - .. seealso:: - - :class:`.postgresql.ARRAY` - - :meth:`.postgresql.ARRAY.Comparator.any` - ARRAY-bound method - - """ - __visit_name__ = 'any' - - def __init__(self, left, right, operator=operators.eq): - self.type = sqltypes.Boolean() - self.left = expression._literal_as_binds(left) - self.right = right - self.operator = operator - - -class All(expression.ColumnElement): - - """Represent the clause ``left operator ALL (right)``. ``right`` must be - an array expression. - - .. seealso:: - - :class:`.postgresql.ARRAY` - - :meth:`.postgresql.ARRAY.Comparator.all` - ARRAY-bound method - - """ - __visit_name__ = 'all' - - def __init__(self, left, right, operator=operators.eq): - self.type = sqltypes.Boolean() - self.left = expression._literal_as_binds(left) - self.right = right - self.operator = operator - - -class array(expression.Tuple): - - """A Postgresql ARRAY literal. - - This is used to produce ARRAY literals in SQL expressions, e.g.:: - - from sqlalchemy.dialects.postgresql import array - from sqlalchemy.dialects import postgresql - from sqlalchemy import select, func - - stmt = select([ - array([1,2]) + array([3,4,5]) - ]) - - print stmt.compile(dialect=postgresql.dialect()) - - Produces the SQL:: - - SELECT ARRAY[%(param_1)s, %(param_2)s] || - ARRAY[%(param_3)s, %(param_4)s, %(param_5)s]) AS anon_1 - - An instance of :class:`.array` will always have the datatype - :class:`.ARRAY`. The "inner" type of the array is inferred from - the values present, unless the ``type_`` keyword argument is passed:: - - array(['foo', 'bar'], type_=CHAR) - - .. versionadded:: 0.8 Added the :class:`~.postgresql.array` literal type. - - See also: - - :class:`.postgresql.ARRAY` - - """ - __visit_name__ = 'array' - - def __init__(self, clauses, **kw): - super(array, self).__init__(*clauses, **kw) - self.type = ARRAY(self.type) - - def _bind_param(self, operator, obj): - return array([ - expression.BindParameter(None, o, _compared_to_operator=operator, - _compared_to_type=self.type, unique=True) - for o in obj - ]) - - def self_group(self, against=None): - return self - - -class ARRAY(sqltypes.Concatenable, sqltypes.TypeEngine): - - """Postgresql ARRAY type. - - Represents values as Python lists. - - An :class:`.ARRAY` type is constructed given the "type" - of element:: - - mytable = Table("mytable", metadata, - Column("data", ARRAY(Integer)) - ) - - The above type represents an N-dimensional array, - meaning Postgresql will interpret values with any number - of dimensions automatically. To produce an INSERT - construct that passes in a 1-dimensional array of integers:: - - connection.execute( - mytable.insert(), - data=[1,2,3] - ) - - The :class:`.ARRAY` type can be constructed given a fixed number - of dimensions:: - - mytable = Table("mytable", metadata, - Column("data", ARRAY(Integer, dimensions=2)) - ) - - This has the effect of the :class:`.ARRAY` type - specifying that number of bracketed blocks when a :class:`.Table` - is used in a CREATE TABLE statement, or when the type is used - within a :func:`.expression.cast` construct; it also causes - the bind parameter and result set processing of the type - to optimize itself to expect exactly that number of dimensions. - Note that Postgresql itself still allows N dimensions with such a type. - - SQL expressions of type :class:`.ARRAY` have support for "index" and - "slice" behavior. The Python ``[]`` operator works normally here, given - integer indexes or slices. Note that Postgresql arrays default - to 1-based indexing. The operator produces binary expression - constructs which will produce the appropriate SQL, both for - SELECT statements:: - - select([mytable.c.data[5], mytable.c.data[2:7]]) - - as well as UPDATE statements when the :meth:`.Update.values` method - is used:: - - mytable.update().values({ - mytable.c.data[5]: 7, - mytable.c.data[2:7]: [1, 2, 3] - }) - - .. note:: - - Multi-dimensional support for the ``[]`` operator is not supported - in SQLAlchemy 1.0. Please use the :func:`.type_coerce` function - to cast an intermediary expression to ARRAY again as a workaround:: - - expr = type_coerce(my_array_column[5], ARRAY(Integer))[6] - - Multi-dimensional support will be provided in a future release. - - :class:`.ARRAY` provides special methods for containment operations, - e.g.:: - - mytable.c.data.contains([1, 2]) - - For a full list of special methods see :class:`.ARRAY.Comparator`. - - .. versionadded:: 0.8 Added support for index and slice operations - to the :class:`.ARRAY` type, including support for UPDATE - statements, and special array containment operations. - - The :class:`.ARRAY` type may not be supported on all DBAPIs. - It is known to work on psycopg2 and not pg8000. - - Additionally, the :class:`.ARRAY` type does not work directly in - conjunction with the :class:`.ENUM` type. For a workaround, see the - special type at :ref:`postgresql_array_of_enum`. - - See also: - - :class:`.postgresql.array` - produce a literal array value. - - """ - __visit_name__ = 'ARRAY' - - class Comparator(sqltypes.Concatenable.Comparator): - - """Define comparison operations for :class:`.ARRAY`.""" - - def __getitem__(self, index): - shift_indexes = 1 if self.expr.type.zero_indexes else 0 - if isinstance(index, slice): - if shift_indexes: - index = slice( - index.start + shift_indexes, - index.stop + shift_indexes, - index.step - ) - index = _Slice(index, self) - return_type = self.type - else: - index += shift_indexes - return_type = self.type.item_type - - return default_comparator._binary_operate( - self.expr, operators.getitem, index, - result_type=return_type) - - def any(self, other, operator=operators.eq): - """Return ``other operator ANY (array)`` clause. - - Argument places are switched, because ANY requires array - expression to be on the right hand-side. - - E.g.:: - - from sqlalchemy.sql import operators - - conn.execute( - select([table.c.data]).where( - table.c.data.any(7, operator=operators.lt) - ) - ) - - :param other: expression to be compared - :param operator: an operator object from the - :mod:`sqlalchemy.sql.operators` - package, defaults to :func:`.operators.eq`. - - .. seealso:: - - :class:`.postgresql.Any` - - :meth:`.postgresql.ARRAY.Comparator.all` - - """ - return Any(other, self.expr, operator=operator) - - def all(self, other, operator=operators.eq): - """Return ``other operator ALL (array)`` clause. - - Argument places are switched, because ALL requires array - expression to be on the right hand-side. - - E.g.:: - - from sqlalchemy.sql import operators - - conn.execute( - select([table.c.data]).where( - table.c.data.all(7, operator=operators.lt) - ) - ) - - :param other: expression to be compared - :param operator: an operator object from the - :mod:`sqlalchemy.sql.operators` - package, defaults to :func:`.operators.eq`. - - .. seealso:: - - :class:`.postgresql.All` - - :meth:`.postgresql.ARRAY.Comparator.any` - - """ - return All(other, self.expr, operator=operator) - - def contains(self, other, **kwargs): - """Boolean expression. Test if elements are a superset of the - elements of the argument array expression. - """ - return self.expr.op('@>')(other) - - def contained_by(self, other): - """Boolean expression. Test if elements are a proper subset of the - elements of the argument array expression. - """ - return self.expr.op('<@')(other) - - def overlap(self, other): - """Boolean expression. Test if array has elements in common with - an argument array expression. - """ - return self.expr.op('&&')(other) - - def _adapt_expression(self, op, other_comparator): - if isinstance(op, operators.custom_op): - if op.opstring in ['@>', '<@', '&&']: - return op, sqltypes.Boolean - return sqltypes.Concatenable.Comparator.\ - _adapt_expression(self, op, other_comparator) - - comparator_factory = Comparator - - def __init__(self, item_type, as_tuple=False, dimensions=None, - zero_indexes=False): - """Construct an ARRAY. - - E.g.:: - - Column('myarray', ARRAY(Integer)) - - Arguments are: - - :param item_type: The data type of items of this array. Note that - dimensionality is irrelevant here, so multi-dimensional arrays like - ``INTEGER[][]``, are constructed as ``ARRAY(Integer)``, not as - ``ARRAY(ARRAY(Integer))`` or such. - - :param as_tuple=False: Specify whether return results - should be converted to tuples from lists. DBAPIs such - as psycopg2 return lists by default. When tuples are - returned, the results are hashable. - - :param dimensions: if non-None, the ARRAY will assume a fixed - number of dimensions. This will cause the DDL emitted for this - ARRAY to include the exact number of bracket clauses ``[]``, - and will also optimize the performance of the type overall. - Note that PG arrays are always implicitly "non-dimensioned", - meaning they can store any number of dimensions no matter how - they were declared. - - :param zero_indexes=False: when True, index values will be converted - between Python zero-based and Postgresql one-based indexes, e.g. - a value of one will be added to all index values before passing - to the database. - - .. versionadded:: 0.9.5 - - """ - if isinstance(item_type, ARRAY): - raise ValueError("Do not nest ARRAY types; ARRAY(basetype) " - "handles multi-dimensional arrays of basetype") - if isinstance(item_type, type): - item_type = item_type() - self.item_type = item_type - self.as_tuple = as_tuple - self.dimensions = dimensions - self.zero_indexes = zero_indexes - - @property - def python_type(self): - return list - - def compare_values(self, x, y): - return x == y - - def _proc_array(self, arr, itemproc, dim, collection): - if dim is None: - arr = list(arr) - if dim == 1 or dim is None and ( - # this has to be (list, tuple), or at least - # not hasattr('__iter__'), since Py3K strings - # etc. have __iter__ - not arr or not isinstance(arr[0], (list, tuple))): - if itemproc: - return collection(itemproc(x) for x in arr) - else: - return collection(arr) - else: - return collection( - self._proc_array( - x, itemproc, - dim - 1 if dim is not None else None, - collection) - for x in arr - ) - - def bind_processor(self, dialect): - item_proc = self.item_type.\ - dialect_impl(dialect).\ - bind_processor(dialect) - - def process(value): - if value is None: - return value - else: - return self._proc_array( - value, - item_proc, - self.dimensions, - list) - return process - - def result_processor(self, dialect, coltype): - item_proc = self.item_type.\ - dialect_impl(dialect).\ - result_processor(dialect, coltype) - - def process(value): - if value is None: - return value - else: - return self._proc_array( - value, - item_proc, - self.dimensions, - tuple if self.as_tuple else list) - return process - -PGArray = ARRAY - - -class ENUM(sqltypes.Enum): - - """Postgresql ENUM type. - - This is a subclass of :class:`.types.Enum` which includes - support for PG's ``CREATE TYPE`` and ``DROP TYPE``. - - When the builtin type :class:`.types.Enum` is used and the - :paramref:`.Enum.native_enum` flag is left at its default of - True, the Postgresql backend will use a :class:`.postgresql.ENUM` - type as the implementation, so the special create/drop rules - will be used. - - The create/drop behavior of ENUM is necessarily intricate, due to the - awkward relationship the ENUM type has in relationship to the - parent table, in that it may be "owned" by just a single table, or - may be shared among many tables. - - When using :class:`.types.Enum` or :class:`.postgresql.ENUM` - in an "inline" fashion, the ``CREATE TYPE`` and ``DROP TYPE`` is emitted - corresponding to when the :meth:`.Table.create` and :meth:`.Table.drop` - methods are called:: - - table = Table('sometable', metadata, - Column('some_enum', ENUM('a', 'b', 'c', name='myenum')) - ) - - table.create(engine) # will emit CREATE ENUM and CREATE TABLE - table.drop(engine) # will emit DROP TABLE and DROP ENUM - - To use a common enumerated type between multiple tables, the best - practice is to declare the :class:`.types.Enum` or - :class:`.postgresql.ENUM` independently, and associate it with the - :class:`.MetaData` object itself:: - - my_enum = ENUM('a', 'b', 'c', name='myenum', metadata=metadata) - - t1 = Table('sometable_one', metadata, - Column('some_enum', myenum) - ) - - t2 = Table('sometable_two', metadata, - Column('some_enum', myenum) - ) - - When this pattern is used, care must still be taken at the level - of individual table creates. Emitting CREATE TABLE without also - specifying ``checkfirst=True`` will still cause issues:: - - t1.create(engine) # will fail: no such type 'myenum' - - If we specify ``checkfirst=True``, the individual table-level create - operation will check for the ``ENUM`` and create if not exists:: - - # will check if enum exists, and emit CREATE TYPE if not - t1.create(engine, checkfirst=True) - - When using a metadata-level ENUM type, the type will always be created - and dropped if either the metadata-wide create/drop is called:: - - metadata.create_all(engine) # will emit CREATE TYPE - metadata.drop_all(engine) # will emit DROP TYPE - - The type can also be created and dropped directly:: - - my_enum.create(engine) - my_enum.drop(engine) - - .. versionchanged:: 1.0.0 The Postgresql :class:`.postgresql.ENUM` type - now behaves more strictly with regards to CREATE/DROP. A metadata-level - ENUM type will only be created and dropped at the metadata level, - not the table level, with the exception of - ``table.create(checkfirst=True)``. - The ``table.drop()`` call will now emit a DROP TYPE for a table-level - enumerated type. - - """ - - def __init__(self, *enums, **kw): - """Construct an :class:`~.postgresql.ENUM`. - - Arguments are the same as that of - :class:`.types.Enum`, but also including - the following parameters. - - :param create_type: Defaults to True. - Indicates that ``CREATE TYPE`` should be - emitted, after optionally checking for the - presence of the type, when the parent - table is being created; and additionally - that ``DROP TYPE`` is called when the table - is dropped. When ``False``, no check - will be performed and no ``CREATE TYPE`` - or ``DROP TYPE`` is emitted, unless - :meth:`~.postgresql.ENUM.create` - or :meth:`~.postgresql.ENUM.drop` - are called directly. - Setting to ``False`` is helpful - when invoking a creation scheme to a SQL file - without access to the actual database - - the :meth:`~.postgresql.ENUM.create` and - :meth:`~.postgresql.ENUM.drop` methods can - be used to emit SQL to a target bind. - - .. versionadded:: 0.7.4 - - """ - self.create_type = kw.pop("create_type", True) - super(ENUM, self).__init__(*enums, **kw) - - def create(self, bind=None, checkfirst=True): - """Emit ``CREATE TYPE`` for this - :class:`~.postgresql.ENUM`. - - If the underlying dialect does not support - Postgresql CREATE TYPE, no action is taken. - - :param bind: a connectable :class:`.Engine`, - :class:`.Connection`, or similar object to emit - SQL. - :param checkfirst: if ``True``, a query against - the PG catalog will be first performed to see - if the type does not exist already before - creating. - - """ - if not bind.dialect.supports_native_enum: - return - - if not checkfirst or \ - not bind.dialect.has_type( - bind, self.name, schema=self.schema): - bind.execute(CreateEnumType(self)) - - def drop(self, bind=None, checkfirst=True): - """Emit ``DROP TYPE`` for this - :class:`~.postgresql.ENUM`. - - If the underlying dialect does not support - Postgresql DROP TYPE, no action is taken. - - :param bind: a connectable :class:`.Engine`, - :class:`.Connection`, or similar object to emit - SQL. - :param checkfirst: if ``True``, a query against - the PG catalog will be first performed to see - if the type actually exists before dropping. - - """ - if not bind.dialect.supports_native_enum: - return - - if not checkfirst or \ - bind.dialect.has_type(bind, self.name, schema=self.schema): - bind.execute(DropEnumType(self)) - - def _check_for_name_in_memos(self, checkfirst, kw): - """Look in the 'ddl runner' for 'memos', then - note our name in that collection. - - This to ensure a particular named enum is operated - upon only once within any kind of create/drop - sequence without relying upon "checkfirst". - - """ - if not self.create_type: - return True - if '_ddl_runner' in kw: - ddl_runner = kw['_ddl_runner'] - if '_pg_enums' in ddl_runner.memo: - pg_enums = ddl_runner.memo['_pg_enums'] - else: - pg_enums = ddl_runner.memo['_pg_enums'] = set() - present = self.name in pg_enums - pg_enums.add(self.name) - return present - else: - return False - - def _on_table_create(self, target, bind, checkfirst, **kw): - if checkfirst or ( - not self.metadata and - not kw.get('_is_metadata_operation', False)) and \ - not self._check_for_name_in_memos(checkfirst, kw): - self.create(bind=bind, checkfirst=checkfirst) - - def _on_table_drop(self, target, bind, checkfirst, **kw): - if not self.metadata and \ - not kw.get('_is_metadata_operation', False) and \ - not self._check_for_name_in_memos(checkfirst, kw): - self.drop(bind=bind, checkfirst=checkfirst) - - def _on_metadata_create(self, target, bind, checkfirst, **kw): - if not self._check_for_name_in_memos(checkfirst, kw): - self.create(bind=bind, checkfirst=checkfirst) - - def _on_metadata_drop(self, target, bind, checkfirst, **kw): - if not self._check_for_name_in_memos(checkfirst, kw): - self.drop(bind=bind, checkfirst=checkfirst) - -colspecs = { - sqltypes.Interval: INTERVAL, - sqltypes.Enum: ENUM, -} - -ischema_names = { - 'integer': INTEGER, - 'bigint': BIGINT, - 'smallint': SMALLINT, - 'character varying': VARCHAR, - 'character': CHAR, - '"char"': sqltypes.String, - 'name': sqltypes.String, - 'text': TEXT, - 'numeric': NUMERIC, - 'float': FLOAT, - 'real': REAL, - 'inet': INET, - 'cidr': CIDR, - 'uuid': UUID, - 'bit': BIT, - 'bit varying': BIT, - 'macaddr': MACADDR, - 'oid': OID, - 'double precision': DOUBLE_PRECISION, - 'timestamp': TIMESTAMP, - 'timestamp with time zone': TIMESTAMP, - 'timestamp without time zone': TIMESTAMP, - 'time with time zone': TIME, - 'time without time zone': TIME, - 'date': DATE, - 'time': TIME, - 'bytea': BYTEA, - 'boolean': BOOLEAN, - 'interval': INTERVAL, - 'interval year to month': INTERVAL, - 'interval day to second': INTERVAL, - 'tsvector': TSVECTOR -} - - -class PGCompiler(compiler.SQLCompiler): - - def visit_array(self, element, **kw): - return "ARRAY[%s]" % self.visit_clauselist(element, **kw) - - def visit_slice(self, element, **kw): - return "%s:%s" % ( - self.process(element.start, **kw), - self.process(element.stop, **kw), - ) - - def visit_any(self, element, **kw): - return "%s%sANY (%s)" % ( - self.process(element.left, **kw), - compiler.OPERATORS[element.operator], - self.process(element.right, **kw) - ) - - def visit_all(self, element, **kw): - return "%s%sALL (%s)" % ( - self.process(element.left, **kw), - compiler.OPERATORS[element.operator], - self.process(element.right, **kw) - ) - - def visit_getitem_binary(self, binary, operator, **kw): - return "%s[%s]" % ( - self.process(binary.left, **kw), - self.process(binary.right, **kw) - ) - - def visit_match_op_binary(self, binary, operator, **kw): - if "postgresql_regconfig" in binary.modifiers: - regconfig = self.render_literal_value( - binary.modifiers['postgresql_regconfig'], - sqltypes.STRINGTYPE) - if regconfig: - return "%s @@ to_tsquery(%s, %s)" % ( - self.process(binary.left, **kw), - regconfig, - self.process(binary.right, **kw) - ) - return "%s @@ to_tsquery(%s)" % ( - self.process(binary.left, **kw), - self.process(binary.right, **kw) - ) - - def visit_ilike_op_binary(self, binary, operator, **kw): - escape = binary.modifiers.get("escape", None) - - return '%s ILIKE %s' % \ - (self.process(binary.left, **kw), - self.process(binary.right, **kw)) \ - + ( - ' ESCAPE ' + - self.render_literal_value(escape, sqltypes.STRINGTYPE) - if escape else '' - ) - - def visit_notilike_op_binary(self, binary, operator, **kw): - escape = binary.modifiers.get("escape", None) - return '%s NOT ILIKE %s' % \ - (self.process(binary.left, **kw), - self.process(binary.right, **kw)) \ - + ( - ' ESCAPE ' + - self.render_literal_value(escape, sqltypes.STRINGTYPE) - if escape else '' - ) - - def render_literal_value(self, value, type_): - value = super(PGCompiler, self).render_literal_value(value, type_) - - if self.dialect._backslash_escapes: - value = value.replace('\\', '\\\\') - return value - - def visit_sequence(self, seq): - return "nextval('%s')" % self.preparer.format_sequence(seq) - - def limit_clause(self, select, **kw): - text = "" - if select._limit_clause is not None: - text += " \n LIMIT " + self.process(select._limit_clause, **kw) - if select._offset_clause is not None: - if select._limit_clause is None: - text += " \n LIMIT ALL" - text += " OFFSET " + self.process(select._offset_clause, **kw) - return text - - def format_from_hint_text(self, sqltext, table, hint, iscrud): - if hint.upper() != 'ONLY': - raise exc.CompileError("Unrecognized hint: %r" % hint) - return "ONLY " + sqltext - - def get_select_precolumns(self, select, **kw): - if select._distinct is not False: - if select._distinct is True: - return "DISTINCT " - elif isinstance(select._distinct, (list, tuple)): - return "DISTINCT ON (" + ', '.join( - [self.process(col) for col in select._distinct] - ) + ") " - else: - return "DISTINCT ON (" + \ - self.process(select._distinct, **kw) + ") " - else: - return "" - - def for_update_clause(self, select, **kw): - - if select._for_update_arg.read: - tmp = " FOR SHARE" - else: - tmp = " FOR UPDATE" - - if select._for_update_arg.of: - tables = util.OrderedSet( - c.table if isinstance(c, expression.ColumnClause) - else c for c in select._for_update_arg.of) - tmp += " OF " + ", ".join( - self.process(table, ashint=True, **kw) - for table in tables - ) - - if select._for_update_arg.nowait: - tmp += " NOWAIT" - - return tmp - - def returning_clause(self, stmt, returning_cols): - - columns = [ - self._label_select_column(None, c, True, False, {}) - for c in expression._select_iterables(returning_cols) - ] - - return 'RETURNING ' + ', '.join(columns) - - def visit_substring_func(self, func, **kw): - s = self.process(func.clauses.clauses[0], **kw) - start = self.process(func.clauses.clauses[1], **kw) - if len(func.clauses.clauses) > 2: - length = self.process(func.clauses.clauses[2], **kw) - return "SUBSTRING(%s FROM %s FOR %s)" % (s, start, length) - else: - return "SUBSTRING(%s FROM %s)" % (s, start) - - -class PGDDLCompiler(compiler.DDLCompiler): - - def get_column_specification(self, column, **kwargs): - - colspec = self.preparer.format_column(column) - impl_type = column.type.dialect_impl(self.dialect) - if column.primary_key and \ - column is column.table._autoincrement_column and \ - ( - self.dialect.supports_smallserial or - not isinstance(impl_type, sqltypes.SmallInteger) - ) and ( - column.default is None or - ( - isinstance(column.default, schema.Sequence) and - column.default.optional - )): - if isinstance(impl_type, sqltypes.BigInteger): - colspec += " BIGSERIAL" - elif isinstance(impl_type, sqltypes.SmallInteger): - colspec += " SMALLSERIAL" - else: - colspec += " SERIAL" - else: - colspec += " " + self.dialect.type_compiler.process(column.type, - type_expression=column) - default = self.get_column_default_string(column) - if default is not None: - colspec += " DEFAULT " + default - - if not column.nullable: - colspec += " NOT NULL" - return colspec - - def visit_create_enum_type(self, create): - type_ = create.element - - return "CREATE TYPE %s AS ENUM (%s)" % ( - self.preparer.format_type(type_), - ", ".join( - self.sql_compiler.process(sql.literal(e), literal_binds=True) - for e in type_.enums) - ) - - def visit_drop_enum_type(self, drop): - type_ = drop.element - - return "DROP TYPE %s" % ( - self.preparer.format_type(type_) - ) - - def visit_create_index(self, create): - preparer = self.preparer - index = create.element - self._verify_index_table(index) - text = "CREATE " - if index.unique: - text += "UNIQUE " - text += "INDEX " - - concurrently = index.dialect_options['postgresql']['concurrently'] - if concurrently: - text += "CONCURRENTLY " - - text += "%s ON %s " % ( - self._prepared_index_name(index, - include_schema=False), - preparer.format_table(index.table) - ) - - using = index.dialect_options['postgresql']['using'] - if using: - text += "USING %s " % preparer.quote(using) - - ops = index.dialect_options["postgresql"]["ops"] - text += "(%s)" \ - % ( - ', '.join([ - self.sql_compiler.process( - expr.self_group() - if not isinstance(expr, expression.ColumnClause) - else expr, - include_table=False, literal_binds=True) + - ( - (' ' + ops[expr.key]) - if hasattr(expr, 'key') - and expr.key in ops else '' - ) - for expr in index.expressions - ]) - ) - - withclause = index.dialect_options['postgresql']['with'] - - if withclause: - text += " WITH (%s)" % (', '.join( - ['%s = %s' % storage_parameter - for storage_parameter in withclause.items()])) - - whereclause = index.dialect_options["postgresql"]["where"] - - if whereclause is not None: - where_compiled = self.sql_compiler.process( - whereclause, include_table=False, - literal_binds=True) - text += " WHERE " + where_compiled - return text - - def visit_exclude_constraint(self, constraint, **kw): - text = "" - if constraint.name is not None: - text += "CONSTRAINT %s " % \ - self.preparer.format_constraint(constraint) - elements = [] - for expr, name, op in constraint._render_exprs: - kw['include_table'] = False - elements.append( - "%s WITH %s" % (self.sql_compiler.process(expr, **kw), op) - ) - text += "EXCLUDE USING %s (%s)" % (constraint.using, - ', '.join(elements)) - if constraint.where is not None: - text += ' WHERE (%s)' % self.sql_compiler.process( - constraint.where, - literal_binds=True) - text += self.define_constraint_deferrability(constraint) - return text - - def post_create_table(self, table): - table_opts = [] - pg_opts = table.dialect_options['postgresql'] - - inherits = pg_opts.get('inherits') - if inherits is not None: - if not isinstance(inherits, (list, tuple)): - inherits = (inherits, ) - table_opts.append( - '\n INHERITS ( ' + - ', '.join(self.preparer.quote(name) for name in inherits) + - ' )') - - if pg_opts['with_oids'] is True: - table_opts.append('\n WITH OIDS') - elif pg_opts['with_oids'] is False: - table_opts.append('\n WITHOUT OIDS') - - if pg_opts['on_commit']: - on_commit_options = pg_opts['on_commit'].replace("_", " ").upper() - table_opts.append('\n ON COMMIT %s' % on_commit_options) - - if pg_opts['tablespace']: - tablespace_name = pg_opts['tablespace'] - table_opts.append( - '\n TABLESPACE %s' % self.preparer.quote(tablespace_name) - ) - - return ''.join(table_opts) - - -class PGTypeCompiler(compiler.GenericTypeCompiler): - def visit_TSVECTOR(self, type, **kw): - return "TSVECTOR" - - def visit_INET(self, type_, **kw): - return "INET" - - def visit_CIDR(self, type_, **kw): - return "CIDR" - - def visit_MACADDR(self, type_, **kw): - return "MACADDR" - - def visit_OID(self, type_, **kw): - return "OID" - - def visit_FLOAT(self, type_, **kw): - if not type_.precision: - return "FLOAT" - else: - return "FLOAT(%(precision)s)" % {'precision': type_.precision} - - def visit_DOUBLE_PRECISION(self, type_, **kw): - return "DOUBLE PRECISION" - - def visit_BIGINT(self, type_, **kw): - return "BIGINT" - - def visit_HSTORE(self, type_, **kw): - return "HSTORE" - - def visit_JSON(self, type_, **kw): - return "JSON" - - def visit_JSONB(self, type_, **kw): - return "JSONB" - - def visit_INT4RANGE(self, type_, **kw): - return "INT4RANGE" - - def visit_INT8RANGE(self, type_, **kw): - return "INT8RANGE" - - def visit_NUMRANGE(self, type_, **kw): - return "NUMRANGE" - - def visit_DATERANGE(self, type_, **kw): - return "DATERANGE" - - def visit_TSRANGE(self, type_, **kw): - return "TSRANGE" - - def visit_TSTZRANGE(self, type_, **kw): - return "TSTZRANGE" - - def visit_datetime(self, type_, **kw): - return self.visit_TIMESTAMP(type_, **kw) - - def visit_enum(self, type_, **kw): - if not type_.native_enum or not self.dialect.supports_native_enum: - return super(PGTypeCompiler, self).visit_enum(type_, **kw) - else: - return self.visit_ENUM(type_, **kw) - - def visit_ENUM(self, type_, **kw): - return self.dialect.identifier_preparer.format_type(type_) - - def visit_TIMESTAMP(self, type_, **kw): - return "TIMESTAMP%s %s" % ( - getattr(type_, 'precision', None) and "(%d)" % - type_.precision or "", - (type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE" - ) - - def visit_TIME(self, type_, **kw): - return "TIME%s %s" % ( - getattr(type_, 'precision', None) and "(%d)" % - type_.precision or "", - (type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE" - ) - - def visit_INTERVAL(self, type_, **kw): - if type_.precision is not None: - return "INTERVAL(%d)" % type_.precision - else: - return "INTERVAL" - - def visit_BIT(self, type_, **kw): - if type_.varying: - compiled = "BIT VARYING" - if type_.length is not None: - compiled += "(%d)" % type_.length - else: - compiled = "BIT(%d)" % type_.length - return compiled - - def visit_UUID(self, type_, **kw): - return "UUID" - - def visit_large_binary(self, type_, **kw): - return self.visit_BYTEA(type_, **kw) - - def visit_BYTEA(self, type_, **kw): - return "BYTEA" - - def visit_ARRAY(self, type_, **kw): - return self.process(type_.item_type) + ('[]' * (type_.dimensions - if type_.dimensions - is not None else 1)) - - -class PGIdentifierPreparer(compiler.IdentifierPreparer): - - reserved_words = RESERVED_WORDS - - def _unquote_identifier(self, value): - if value[0] == self.initial_quote: - value = value[1:-1].\ - replace(self.escape_to_quote, self.escape_quote) - return value - - def format_type(self, type_, use_schema=True): - if not type_.name: - raise exc.CompileError("Postgresql ENUM type requires a name.") - - name = self.quote(type_.name) - if not self.omit_schema and use_schema and type_.schema is not None: - name = self.quote_schema(type_.schema) + "." + name - return name - - -class PGInspector(reflection.Inspector): - - def __init__(self, conn): - reflection.Inspector.__init__(self, conn) - - def get_table_oid(self, table_name, schema=None): - """Return the OID for the given table name.""" - - return self.dialect.get_table_oid(self.bind, table_name, schema, - info_cache=self.info_cache) - - def get_enums(self, schema=None): - """Return a list of ENUM objects. - - Each member is a dictionary containing these fields: - - * name - name of the enum - * schema - the schema name for the enum. - * visible - boolean, whether or not this enum is visible - in the default search path. - * labels - a list of string labels that apply to the enum. - - :param schema: schema name. If None, the default schema - (typically 'public') is used. May also be set to '*' to - indicate load enums for all schemas. - - .. versionadded:: 1.0.0 - - """ - schema = schema or self.default_schema_name - return self.dialect._load_enums(self.bind, schema) - - def get_foreign_table_names(self, schema=None): - """Return a list of FOREIGN TABLE names. - - Behavior is similar to that of :meth:`.Inspector.get_table_names`, - except that the list is limited to those tables tha report a - ``relkind`` value of ``f``. - - .. versionadded:: 1.0.0 - - """ - schema = schema or self.default_schema_name - return self.dialect._get_foreign_table_names(self.bind, schema) - - -class CreateEnumType(schema._CreateDropBase): - __visit_name__ = "create_enum_type" - - -class DropEnumType(schema._CreateDropBase): - __visit_name__ = "drop_enum_type" - - -class PGExecutionContext(default.DefaultExecutionContext): - - def fire_sequence(self, seq, type_): - return self._execute_scalar(( - "select nextval('%s')" % - self.dialect.identifier_preparer.format_sequence(seq)), type_) - - def get_insert_default(self, column): - if column.primary_key and \ - column is column.table._autoincrement_column: - if column.server_default and column.server_default.has_argument: - - # pre-execute passive defaults on primary key columns - return self._execute_scalar("select %s" % - column.server_default.arg, - column.type) - - elif (column.default is None or - (column.default.is_sequence and - column.default.optional)): - - # execute the sequence associated with a SERIAL primary - # key column. for non-primary-key SERIAL, the ID just - # generates server side. - - try: - seq_name = column._postgresql_seq_name - except AttributeError: - tab = column.table.name - col = column.name - tab = tab[0:29 + max(0, (29 - len(col)))] - col = col[0:29 + max(0, (29 - len(tab)))] - name = "%s_%s_seq" % (tab, col) - column._postgresql_seq_name = seq_name = name - - sch = column.table.schema - if sch is not None: - exc = "select nextval('\"%s\".\"%s\"')" % \ - (sch, seq_name) - else: - exc = "select nextval('\"%s\"')" % \ - (seq_name, ) - - return self._execute_scalar(exc, column.type) - - return super(PGExecutionContext, self).get_insert_default(column) - - -class PGDialect(default.DefaultDialect): - name = 'postgresql' - supports_alter = True - max_identifier_length = 63 - supports_sane_rowcount = True - - supports_native_enum = True - supports_native_boolean = True - supports_smallserial = True - - supports_sequences = True - sequences_optional = True - preexecute_autoincrement_sequences = True - postfetch_lastrowid = False - - supports_default_values = True - supports_empty_insert = False - supports_multivalues_insert = True - default_paramstyle = 'pyformat' - ischema_names = ischema_names - colspecs = colspecs - - statement_compiler = PGCompiler - ddl_compiler = PGDDLCompiler - type_compiler = PGTypeCompiler - preparer = PGIdentifierPreparer - execution_ctx_cls = PGExecutionContext - inspector = PGInspector - isolation_level = None - - construct_arguments = [ - (schema.Index, { - "using": False, - "where": None, - "ops": {}, - "concurrently": False, - "with": {} - }), - (schema.Table, { - "ignore_search_path": False, - "tablespace": None, - "with_oids": None, - "on_commit": None, - "inherits": None - }) - ] - - reflection_options = ('postgresql_ignore_search_path', ) - - _backslash_escapes = True - - def __init__(self, isolation_level=None, json_serializer=None, - json_deserializer=None, **kwargs): - default.DefaultDialect.__init__(self, **kwargs) - self.isolation_level = isolation_level - self._json_deserializer = json_deserializer - self._json_serializer = json_serializer - - def initialize(self, connection): - super(PGDialect, self).initialize(connection) - self.implicit_returning = self.server_version_info > (8, 2) and \ - self.__dict__.get('implicit_returning', True) - self.supports_native_enum = self.server_version_info >= (8, 3) - if not self.supports_native_enum: - self.colspecs = self.colspecs.copy() - # pop base Enum type - self.colspecs.pop(sqltypes.Enum, None) - # psycopg2, others may have placed ENUM here as well - self.colspecs.pop(ENUM, None) - - # http://www.postgresql.org/docs/9.3/static/release-9-2.html#AEN116689 - self.supports_smallserial = self.server_version_info >= (9, 2) - - self._backslash_escapes = self.server_version_info < (8, 2) or \ - connection.scalar( - "show standard_conforming_strings" - ) == 'off' - - def on_connect(self): - if self.isolation_level is not None: - def connect(conn): - self.set_isolation_level(conn, self.isolation_level) - return connect - else: - return None - - _isolation_lookup = set(['SERIALIZABLE', 'READ UNCOMMITTED', - 'READ COMMITTED', 'REPEATABLE READ']) - - def set_isolation_level(self, connection, level): - level = level.replace('_', ' ') - if level not in self._isolation_lookup: - raise exc.ArgumentError( - "Invalid value '%s' for isolation_level. " - "Valid isolation levels for %s are %s" % - (level, self.name, ", ".join(self._isolation_lookup)) - ) - cursor = connection.cursor() - cursor.execute( - "SET SESSION CHARACTERISTICS AS TRANSACTION " - "ISOLATION LEVEL %s" % level) - cursor.execute("COMMIT") - cursor.close() - - def get_isolation_level(self, connection): - cursor = connection.cursor() - cursor.execute('show transaction isolation level') - val = cursor.fetchone()[0] - cursor.close() - return val.upper() - - def do_begin_twophase(self, connection, xid): - self.do_begin(connection.connection) - - def do_prepare_twophase(self, connection, xid): - connection.execute("PREPARE TRANSACTION '%s'" % xid) - - def do_rollback_twophase(self, connection, xid, - is_prepared=True, recover=False): - if is_prepared: - if recover: - # FIXME: ugly hack to get out of transaction - # context when committing recoverable transactions - # Must find out a way how to make the dbapi not - # open a transaction. - connection.execute("ROLLBACK") - connection.execute("ROLLBACK PREPARED '%s'" % xid) - connection.execute("BEGIN") - self.do_rollback(connection.connection) - else: - self.do_rollback(connection.connection) - - def do_commit_twophase(self, connection, xid, - is_prepared=True, recover=False): - if is_prepared: - if recover: - connection.execute("ROLLBACK") - connection.execute("COMMIT PREPARED '%s'" % xid) - connection.execute("BEGIN") - self.do_rollback(connection.connection) - else: - self.do_commit(connection.connection) - - def do_recover_twophase(self, connection): - resultset = connection.execute( - sql.text("SELECT gid FROM pg_prepared_xacts")) - return [row[0] for row in resultset] - - def _get_default_schema_name(self, connection): - return connection.scalar("select current_schema()") - - def has_schema(self, connection, schema): - query = ("select nspname from pg_namespace " - "where lower(nspname)=:schema") - cursor = connection.execute( - sql.text( - query, - bindparams=[ - sql.bindparam( - 'schema', util.text_type(schema.lower()), - type_=sqltypes.Unicode)] - ) - ) - - return bool(cursor.first()) - - def has_table(self, connection, table_name, schema=None): - # seems like case gets folded in pg_class... - if schema is None: - cursor = connection.execute( - sql.text( - "select relname from pg_class c join pg_namespace n on " - "n.oid=c.relnamespace where " - "pg_catalog.pg_table_is_visible(c.oid) " - "and relname=:name", - bindparams=[ - sql.bindparam('name', util.text_type(table_name), - type_=sqltypes.Unicode)] - ) - ) - else: - cursor = connection.execute( - sql.text( - "select relname from pg_class c join pg_namespace n on " - "n.oid=c.relnamespace where n.nspname=:schema and " - "relname=:name", - bindparams=[ - sql.bindparam('name', - util.text_type(table_name), - type_=sqltypes.Unicode), - sql.bindparam('schema', - util.text_type(schema), - type_=sqltypes.Unicode)] - ) - ) - return bool(cursor.first()) - - def has_sequence(self, connection, sequence_name, schema=None): - if schema is None: - cursor = connection.execute( - sql.text( - "SELECT relname FROM pg_class c join pg_namespace n on " - "n.oid=c.relnamespace where relkind='S' and " - "n.nspname=current_schema() " - "and relname=:name", - bindparams=[ - sql.bindparam('name', util.text_type(sequence_name), - type_=sqltypes.Unicode) - ] - ) - ) - else: - cursor = connection.execute( - sql.text( - "SELECT relname FROM pg_class c join pg_namespace n on " - "n.oid=c.relnamespace where relkind='S' and " - "n.nspname=:schema and relname=:name", - bindparams=[ - sql.bindparam('name', util.text_type(sequence_name), - type_=sqltypes.Unicode), - sql.bindparam('schema', - util.text_type(schema), - type_=sqltypes.Unicode) - ] - ) - ) - - return bool(cursor.first()) - - def has_type(self, connection, type_name, schema=None): - if schema is not None: - query = """ - SELECT EXISTS ( - SELECT * FROM pg_catalog.pg_type t, pg_catalog.pg_namespace n - WHERE t.typnamespace = n.oid - AND t.typname = :typname - AND n.nspname = :nspname - ) - """ - query = sql.text(query) - else: - query = """ - SELECT EXISTS ( - SELECT * FROM pg_catalog.pg_type t - WHERE t.typname = :typname - AND pg_type_is_visible(t.oid) - ) - """ - query = sql.text(query) - query = query.bindparams( - sql.bindparam('typname', - util.text_type(type_name), type_=sqltypes.Unicode), - ) - if schema is not None: - query = query.bindparams( - sql.bindparam('nspname', - util.text_type(schema), type_=sqltypes.Unicode), - ) - cursor = connection.execute(query) - return bool(cursor.scalar()) - - def _get_server_version_info(self, connection): - v = connection.execute("select version()").scalar() - m = re.match( - '.*(?:PostgreSQL|EnterpriseDB) ' - '(\d+)\.(\d+)(?:\.(\d+))?(?:\.\d+)?(?:devel)?', - v) - if not m: - raise AssertionError( - "Could not determine version from string '%s'" % v) - return tuple([int(x) for x in m.group(1, 2, 3) if x is not None]) - - @reflection.cache - def get_table_oid(self, connection, table_name, schema=None, **kw): - """Fetch the oid for schema.table_name. - - Several reflection methods require the table oid. The idea for using - this method is that it can be fetched one time and cached for - subsequent calls. - - """ - table_oid = None - if schema is not None: - schema_where_clause = "n.nspname = :schema" - else: - schema_where_clause = "pg_catalog.pg_table_is_visible(c.oid)" - query = """ - SELECT c.oid - FROM pg_catalog.pg_class c - LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace - WHERE (%s) - AND c.relname = :table_name AND c.relkind in ('r', 'v', 'm', 'f') - """ % schema_where_clause - # Since we're binding to unicode, table_name and schema_name must be - # unicode. - table_name = util.text_type(table_name) - if schema is not None: - schema = util.text_type(schema) - s = sql.text(query).bindparams(table_name=sqltypes.Unicode) - s = s.columns(oid=sqltypes.Integer) - if schema: - s = s.bindparams(sql.bindparam('schema', type_=sqltypes.Unicode)) - c = connection.execute(s, table_name=table_name, schema=schema) - table_oid = c.scalar() - if table_oid is None: - raise exc.NoSuchTableError(table_name) - return table_oid - - @reflection.cache - def get_schema_names(self, connection, **kw): - s = """ - SELECT nspname - FROM pg_namespace - ORDER BY nspname - """ - rp = connection.execute(s) - # what about system tables? - - if util.py2k: - schema_names = [row[0].decode(self.encoding) for row in rp - if not row[0].startswith('pg_')] - else: - schema_names = [row[0] for row in rp - if not row[0].startswith('pg_')] - return schema_names - - @reflection.cache - def get_table_names(self, connection, schema=None, **kw): - if schema is not None: - current_schema = schema - else: - current_schema = self.default_schema_name - - result = connection.execute( - sql.text("SELECT relname FROM pg_class c " - "WHERE relkind = 'r' " - "AND '%s' = (select nspname from pg_namespace n " - "where n.oid = c.relnamespace) " % - current_schema, - typemap={'relname': sqltypes.Unicode} - ) - ) - return [row[0] for row in result] - - @reflection.cache - def _get_foreign_table_names(self, connection, schema=None, **kw): - if schema is not None: - current_schema = schema - else: - current_schema = self.default_schema_name - - result = connection.execute( - sql.text("SELECT relname FROM pg_class c " - "WHERE relkind = 'f' " - "AND '%s' = (select nspname from pg_namespace n " - "where n.oid = c.relnamespace) " % - current_schema, - typemap={'relname': sqltypes.Unicode} - ) - ) - return [row[0] for row in result] - - @reflection.cache - def get_view_names(self, connection, schema=None, **kw): - if schema is not None: - current_schema = schema - else: - current_schema = self.default_schema_name - s = """ - SELECT relname - FROM pg_class c - WHERE relkind IN ('m', 'v') - AND '%(schema)s' = (select nspname from pg_namespace n - where n.oid = c.relnamespace) - """ % dict(schema=current_schema) - - if util.py2k: - view_names = [row[0].decode(self.encoding) - for row in connection.execute(s)] - else: - view_names = [row[0] for row in connection.execute(s)] - return view_names - - @reflection.cache - def get_view_definition(self, connection, view_name, schema=None, **kw): - if schema is not None: - current_schema = schema - else: - current_schema = self.default_schema_name - s = """ - SELECT definition FROM pg_views - WHERE schemaname = :schema - AND viewname = :view_name - """ - rp = connection.execute(sql.text(s), - view_name=view_name, schema=current_schema) - if rp: - if util.py2k: - view_def = rp.scalar().decode(self.encoding) - else: - view_def = rp.scalar() - return view_def - - @reflection.cache - def get_columns(self, connection, table_name, schema=None, **kw): - - table_oid = self.get_table_oid(connection, table_name, schema, - info_cache=kw.get('info_cache')) - SQL_COLS = """ - SELECT a.attname, - pg_catalog.format_type(a.atttypid, a.atttypmod), - (SELECT pg_catalog.pg_get_expr(d.adbin, d.adrelid) - FROM pg_catalog.pg_attrdef d - WHERE d.adrelid = a.attrelid AND d.adnum = a.attnum - AND a.atthasdef) - AS DEFAULT, - a.attnotnull, a.attnum, a.attrelid as table_oid - FROM pg_catalog.pg_attribute a - WHERE a.attrelid = :table_oid - AND a.attnum > 0 AND NOT a.attisdropped - ORDER BY a.attnum - """ - s = sql.text(SQL_COLS, - bindparams=[ - sql.bindparam('table_oid', type_=sqltypes.Integer)], - typemap={ - 'attname': sqltypes.Unicode, - 'default': sqltypes.Unicode} - ) - c = connection.execute(s, table_oid=table_oid) - rows = c.fetchall() - domains = self._load_domains(connection) - enums = dict( - ( - "%s.%s" % (rec['schema'], rec['name']) - if not rec['visible'] else rec['name'], rec) for rec in - self._load_enums(connection, schema='*') - ) - - # format columns - columns = [] - for name, format_type, default, notnull, attnum, table_oid in rows: - column_info = self._get_column_info( - name, format_type, default, notnull, domains, enums, schema) - columns.append(column_info) - return columns - - def _get_column_info(self, name, format_type, default, - notnull, domains, enums, schema): - # strip (*) from character varying(5), timestamp(5) - # with time zone, geometry(POLYGON), etc. - attype = re.sub(r'\(.*\)', '', format_type) - - # strip '[]' from integer[], etc. - attype = re.sub(r'\[\]', '', attype) - - nullable = not notnull - is_array = format_type.endswith('[]') - charlen = re.search('\(([\d,]+)\)', format_type) - if charlen: - charlen = charlen.group(1) - args = re.search('\((.*)\)', format_type) - if args and args.group(1): - args = tuple(re.split('\s*,\s*', args.group(1))) - else: - args = () - kwargs = {} - - if attype == 'numeric': - if charlen: - prec, scale = charlen.split(',') - args = (int(prec), int(scale)) - else: - args = () - elif attype == 'double precision': - args = (53, ) - elif attype == 'integer': - args = () - elif attype in ('timestamp with time zone', - 'time with time zone'): - kwargs['timezone'] = True - if charlen: - kwargs['precision'] = int(charlen) - args = () - elif attype in ('timestamp without time zone', - 'time without time zone', 'time'): - kwargs['timezone'] = False - if charlen: - kwargs['precision'] = int(charlen) - args = () - elif attype == 'bit varying': - kwargs['varying'] = True - if charlen: - args = (int(charlen),) - else: - args = () - elif attype in ('interval', 'interval year to month', - 'interval day to second'): - if charlen: - kwargs['precision'] = int(charlen) - args = () - elif charlen: - args = (int(charlen),) - - while True: - if attype in self.ischema_names: - coltype = self.ischema_names[attype] - break - elif attype in enums: - enum = enums[attype] - coltype = ENUM - kwargs['name'] = enum['name'] - if not enum['visible']: - kwargs['schema'] = enum['schema'] - args = tuple(enum['labels']) - break - elif attype in domains: - domain = domains[attype] - attype = domain['attype'] - # A table can't override whether the domain is nullable. - nullable = domain['nullable'] - if domain['default'] and not default: - # It can, however, override the default - # value, but can't set it to null. - default = domain['default'] - continue - else: - coltype = None - break - - if coltype: - coltype = coltype(*args, **kwargs) - if is_array: - coltype = ARRAY(coltype) - else: - util.warn("Did not recognize type '%s' of column '%s'" % - (attype, name)) - coltype = sqltypes.NULLTYPE - # adjust the default value - autoincrement = False - if default is not None: - match = re.search(r"""(nextval\(')([^']+)('.*$)""", default) - if match is not None: - autoincrement = True - # the default is related to a Sequence - sch = schema - if '.' not in match.group(2) and sch is not None: - # unconditionally quote the schema name. this could - # later be enhanced to obey quoting rules / - # "quote schema" - default = match.group(1) + \ - ('"%s"' % sch) + '.' + \ - match.group(2) + match.group(3) - - column_info = dict(name=name, type=coltype, nullable=nullable, - default=default, autoincrement=autoincrement) - return column_info - - @reflection.cache - def get_pk_constraint(self, connection, table_name, schema=None, **kw): - table_oid = self.get_table_oid(connection, table_name, schema, - info_cache=kw.get('info_cache')) - - if self.server_version_info < (8, 4): - PK_SQL = """ - SELECT a.attname - FROM - pg_class t - join pg_index ix on t.oid = ix.indrelid - join pg_attribute a - on t.oid=a.attrelid AND %s - WHERE - t.oid = :table_oid and ix.indisprimary = 't' - ORDER BY a.attnum - """ % self._pg_index_any("a.attnum", "ix.indkey") - - else: - # unnest() and generate_subscripts() both introduced in - # version 8.4 - PK_SQL = """ - SELECT a.attname - FROM pg_attribute a JOIN ( - SELECT unnest(ix.indkey) attnum, - generate_subscripts(ix.indkey, 1) ord - FROM pg_index ix - WHERE ix.indrelid = :table_oid AND ix.indisprimary - ) k ON a.attnum=k.attnum - WHERE a.attrelid = :table_oid - ORDER BY k.ord - """ - t = sql.text(PK_SQL, typemap={'attname': sqltypes.Unicode}) - c = connection.execute(t, table_oid=table_oid) - cols = [r[0] for r in c.fetchall()] - - PK_CONS_SQL = """ - SELECT conname - FROM pg_catalog.pg_constraint r - WHERE r.conrelid = :table_oid AND r.contype = 'p' - ORDER BY 1 - """ - t = sql.text(PK_CONS_SQL, typemap={'conname': sqltypes.Unicode}) - c = connection.execute(t, table_oid=table_oid) - name = c.scalar() - - return {'constrained_columns': cols, 'name': name} - - @reflection.cache - def get_foreign_keys(self, connection, table_name, schema=None, - postgresql_ignore_search_path=False, **kw): - preparer = self.identifier_preparer - table_oid = self.get_table_oid(connection, table_name, schema, - info_cache=kw.get('info_cache')) - - FK_SQL = """ - SELECT r.conname, - pg_catalog.pg_get_constraintdef(r.oid, true) as condef, - n.nspname as conschema - FROM pg_catalog.pg_constraint r, - pg_namespace n, - pg_class c - - WHERE r.conrelid = :table AND - r.contype = 'f' AND - c.oid = confrelid AND - n.oid = c.relnamespace - ORDER BY 1 - """ - # http://www.postgresql.org/docs/9.0/static/sql-createtable.html - FK_REGEX = re.compile( - r'FOREIGN KEY \((.*?)\) REFERENCES (?:(.*?)\.)?(.*?)\((.*?)\)' - r'[\s]?(MATCH (FULL|PARTIAL|SIMPLE)+)?' - r'[\s]?(ON UPDATE ' - r'(CASCADE|RESTRICT|NO ACTION|SET NULL|SET DEFAULT)+)?' - r'[\s]?(ON DELETE ' - r'(CASCADE|RESTRICT|NO ACTION|SET NULL|SET DEFAULT)+)?' - r'[\s]?(DEFERRABLE|NOT DEFERRABLE)?' - r'[\s]?(INITIALLY (DEFERRED|IMMEDIATE)+)?' - ) - - t = sql.text(FK_SQL, typemap={ - 'conname': sqltypes.Unicode, - 'condef': sqltypes.Unicode}) - c = connection.execute(t, table=table_oid) - fkeys = [] - for conname, condef, conschema in c.fetchall(): - m = re.search(FK_REGEX, condef).groups() - - constrained_columns, referred_schema, \ - referred_table, referred_columns, \ - _, match, _, onupdate, _, ondelete, \ - deferrable, _, initially = m - - if deferrable is not None: - deferrable = True if deferrable == 'DEFERRABLE' else False - constrained_columns = [preparer._unquote_identifier(x) - for x in re.split( - r'\s*,\s*', constrained_columns)] - - if postgresql_ignore_search_path: - # when ignoring search path, we use the actual schema - # provided it isn't the "default" schema - if conschema != self.default_schema_name: - referred_schema = conschema - else: - referred_schema = schema - elif referred_schema: - # referred_schema is the schema that we regexp'ed from - # pg_get_constraintdef(). If the schema is in the search - # path, pg_get_constraintdef() will give us None. - referred_schema = \ - preparer._unquote_identifier(referred_schema) - elif schema is not None and schema == conschema: - # If the actual schema matches the schema of the table - # we're reflecting, then we will use that. - referred_schema = schema - - referred_table = preparer._unquote_identifier(referred_table) - referred_columns = [preparer._unquote_identifier(x) - for x in - re.split(r'\s*,\s', referred_columns)] - fkey_d = { - 'name': conname, - 'constrained_columns': constrained_columns, - 'referred_schema': referred_schema, - 'referred_table': referred_table, - 'referred_columns': referred_columns, - 'options': { - 'onupdate': onupdate, - 'ondelete': ondelete, - 'deferrable': deferrable, - 'initially': initially, - 'match': match - } - } - fkeys.append(fkey_d) - return fkeys - - def _pg_index_any(self, col, compare_to): - if self.server_version_info < (8, 1): - # http://www.postgresql.org/message-id/10279.1124395722@sss.pgh.pa.us - # "In CVS tip you could replace this with "attnum = ANY (indkey)". - # Unfortunately, most array support doesn't work on int2vector in - # pre-8.1 releases, so I think you're kinda stuck with the above - # for now. - # regards, tom lane" - return "(%s)" % " OR ".join( - "%s[%d] = %s" % (compare_to, ind, col) - for ind in range(0, 10) - ) - else: - return "%s = ANY(%s)" % (col, compare_to) - - @reflection.cache - def get_indexes(self, connection, table_name, schema, **kw): - table_oid = self.get_table_oid(connection, table_name, schema, - info_cache=kw.get('info_cache')) - - # cast indkey as varchar since it's an int2vector, - # returned as a list by some drivers such as pypostgresql - - if self.server_version_info < (8, 5): - IDX_SQL = """ - SELECT - i.relname as relname, - ix.indisunique, ix.indexprs, ix.indpred, - a.attname, a.attnum, NULL, ix.indkey%s, - %s, am.amname - FROM - pg_class t - join pg_index ix on t.oid = ix.indrelid - join pg_class i on i.oid = ix.indexrelid - left outer join - pg_attribute a - on t.oid = a.attrelid and %s - left outer join - pg_am am - on i.relam = am.oid - WHERE - t.relkind IN ('r', 'v', 'f', 'm') - and t.oid = :table_oid - and ix.indisprimary = 'f' - ORDER BY - t.relname, - i.relname - """ % ( - # version 8.3 here was based on observing the - # cast does not work in PG 8.2.4, does work in 8.3.0. - # nothing in PG changelogs regarding this. - "::varchar" if self.server_version_info >= (8, 3) else "", - "i.reloptions" if self.server_version_info >= (8, 2) - else "NULL", - self._pg_index_any("a.attnum", "ix.indkey") - ) - else: - IDX_SQL = """ - SELECT - i.relname as relname, - ix.indisunique, ix.indexprs, ix.indpred, - a.attname, a.attnum, c.conrelid, ix.indkey::varchar, - i.reloptions, am.amname - FROM - pg_class t - join pg_index ix on t.oid = ix.indrelid - join pg_class i on i.oid = ix.indexrelid - left outer join - pg_attribute a - on t.oid = a.attrelid and a.attnum = ANY(ix.indkey) - left outer join - pg_constraint c - on (ix.indrelid = c.conrelid and - ix.indexrelid = c.conindid and - c.contype in ('p', 'u', 'x')) - left outer join - pg_am am - on i.relam = am.oid - WHERE - t.relkind IN ('r', 'v', 'f', 'm') - and t.oid = :table_oid - and ix.indisprimary = 'f' - ORDER BY - t.relname, - i.relname - """ - - t = sql.text(IDX_SQL, typemap={'attname': sqltypes.Unicode}) - c = connection.execute(t, table_oid=table_oid) - - indexes = defaultdict(lambda: defaultdict(dict)) - - sv_idx_name = None - for row in c.fetchall(): - (idx_name, unique, expr, prd, col, - col_num, conrelid, idx_key, options, amname) = row - - if expr: - if idx_name != sv_idx_name: - util.warn( - "Skipped unsupported reflection of " - "expression-based index %s" - % idx_name) - sv_idx_name = idx_name - continue - - if prd and not idx_name == sv_idx_name: - util.warn( - "Predicate of partial index %s ignored during reflection" - % idx_name) - sv_idx_name = idx_name - - has_idx = idx_name in indexes - index = indexes[idx_name] - if col is not None: - index['cols'][col_num] = col - if not has_idx: - index['key'] = [int(k.strip()) for k in idx_key.split()] - index['unique'] = unique - if conrelid is not None: - index['duplicates_constraint'] = idx_name - if options: - index['options'] = dict( - [option.split("=") for option in options]) - - # it *might* be nice to include that this is 'btree' in the - # reflection info. But we don't want an Index object - # to have a ``postgresql_using`` in it that is just the - # default, so for the moment leaving this out. - if amname and amname != 'btree': - index['amname'] = amname - - result = [] - for name, idx in indexes.items(): - entry = { - 'name': name, - 'unique': idx['unique'], - 'column_names': [idx['cols'][i] for i in idx['key']] - } - if 'duplicates_constraint' in idx: - entry['duplicates_constraint'] = idx['duplicates_constraint'] - if 'options' in idx: - entry.setdefault( - 'dialect_options', {})["postgresql_with"] = idx['options'] - if 'amname' in idx: - entry.setdefault( - 'dialect_options', {})["postgresql_using"] = idx['amname'] - result.append(entry) - return result - - @reflection.cache - def get_unique_constraints(self, connection, table_name, - schema=None, **kw): - table_oid = self.get_table_oid(connection, table_name, schema, - info_cache=kw.get('info_cache')) - - UNIQUE_SQL = """ - SELECT - cons.conname as name, - cons.conkey as key, - a.attnum as col_num, - a.attname as col_name - FROM - pg_catalog.pg_constraint cons - join pg_attribute a - on cons.conrelid = a.attrelid AND - a.attnum = ANY(cons.conkey) - WHERE - cons.conrelid = :table_oid AND - cons.contype = 'u' - """ - - t = sql.text(UNIQUE_SQL, typemap={'col_name': sqltypes.Unicode}) - c = connection.execute(t, table_oid=table_oid) - - uniques = defaultdict(lambda: defaultdict(dict)) - for row in c.fetchall(): - uc = uniques[row.name] - uc["key"] = row.key - uc["cols"][row.col_num] = row.col_name - - return [ - {'name': name, - 'column_names': [uc["cols"][i] for i in uc["key"]]} - for name, uc in uniques.items() - ] - - def _load_enums(self, connection, schema=None): - schema = schema or self.default_schema_name - if not self.supports_native_enum: - return {} - - # Load data types for enums: - SQL_ENUMS = """ - SELECT t.typname as "name", - -- no enum defaults in 8.4 at least - -- t.typdefault as "default", - pg_catalog.pg_type_is_visible(t.oid) as "visible", - n.nspname as "schema", - e.enumlabel as "label" - FROM pg_catalog.pg_type t - LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace - LEFT JOIN pg_catalog.pg_enum e ON t.oid = e.enumtypid - WHERE t.typtype = 'e' - """ - - if schema != '*': - SQL_ENUMS += "AND n.nspname = :schema " - - # e.oid gives us label order within an enum - SQL_ENUMS += 'ORDER BY "schema", "name", e.oid' - - s = sql.text(SQL_ENUMS, typemap={ - 'attname': sqltypes.Unicode, - 'label': sqltypes.Unicode}) - - if schema != '*': - s = s.bindparams(schema=schema) - - c = connection.execute(s) - - enums = [] - enum_by_name = {} - for enum in c.fetchall(): - key = (enum['schema'], enum['name']) - if key in enum_by_name: - enum_by_name[key]['labels'].append(enum['label']) - else: - enum_by_name[key] = enum_rec = { - 'name': enum['name'], - 'schema': enum['schema'], - 'visible': enum['visible'], - 'labels': [enum['label']], - } - enums.append(enum_rec) - - return enums - - def _load_domains(self, connection): - # Load data types for domains: - SQL_DOMAINS = """ - SELECT t.typname as "name", - pg_catalog.format_type(t.typbasetype, t.typtypmod) as "attype", - not t.typnotnull as "nullable", - t.typdefault as "default", - pg_catalog.pg_type_is_visible(t.oid) as "visible", - n.nspname as "schema" - FROM pg_catalog.pg_type t - LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace - WHERE t.typtype = 'd' - """ - - s = sql.text(SQL_DOMAINS, typemap={'attname': sqltypes.Unicode}) - c = connection.execute(s) - - domains = {} - for domain in c.fetchall(): - # strip (30) from character varying(30) - attype = re.search('([^\(]+)', domain['attype']).group(1) - if domain['visible']: - # 'visible' just means whether or not the domain is in a - # schema that's on the search path -- or not overridden by - # a schema with higher precedence. If it's not visible, - # it will be prefixed with the schema-name when it's used. - name = domain['name'] - else: - name = "%s.%s" % (domain['schema'], domain['name']) - - domains[name] = { - 'attype': attype, - 'nullable': domain['nullable'], - 'default': domain['default'] - } - - return domains diff --git a/python/sqlalchemy/dialects/postgresql/constraints.py b/python/sqlalchemy/dialects/postgresql/constraints.py deleted file mode 100644 index 4cfc050d..00000000 --- a/python/sqlalchemy/dialects/postgresql/constraints.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright (C) 2013-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php -from ...sql.schema import ColumnCollectionConstraint -from ...sql import expression -from ... import util - - -class ExcludeConstraint(ColumnCollectionConstraint): - """A table-level EXCLUDE constraint. - - Defines an EXCLUDE constraint as described in the `postgres - documentation`__. - - __ http://www.postgresql.org/docs/9.0/\ -static/sql-createtable.html#SQL-CREATETABLE-EXCLUDE - """ - - __visit_name__ = 'exclude_constraint' - - where = None - - def __init__(self, *elements, **kw): - """ - :param \*elements: - A sequence of two tuples of the form ``(column, operator)`` where - column must be a column name or Column object and operator must - be a string containing the operator to use. - - :param name: - Optional, the in-database name of this constraint. - - :param deferrable: - Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when - issuing DDL for this constraint. - - :param initially: - Optional string. If set, emit INITIALLY when issuing DDL - for this constraint. - - :param using: - Optional string. If set, emit USING when issuing DDL - for this constraint. Defaults to 'gist'. - - :param where: - Optional string. If set, emit WHERE when issuing DDL - for this constraint. - - """ - columns = [] - render_exprs = [] - self.operators = {} - - expressions, operators = zip(*elements) - - for (expr, column, strname, add_element), operator in zip( - self._extract_col_expression_collection(expressions), - operators - ): - if add_element is not None: - columns.append(add_element) - - name = column.name if column is not None else strname - - if name is not None: - # backwards compat - self.operators[name] = operator - - expr = expression._literal_as_text(expr) - - render_exprs.append( - (expr, name, operator) - ) - - self._render_exprs = render_exprs - ColumnCollectionConstraint.__init__( - self, - *columns, - name=kw.get('name'), - deferrable=kw.get('deferrable'), - initially=kw.get('initially') - ) - self.using = kw.get('using', 'gist') - where = kw.get('where') - if where: - self.where = expression._literal_as_text(where) - - def copy(self, **kw): - elements = [(col, self.operators[col]) - for col in self.columns.keys()] - c = self.__class__(*elements, - name=self.name, - deferrable=self.deferrable, - initially=self.initially) - c.dispatch._update(self.dispatch) - return c diff --git a/python/sqlalchemy/dialects/postgresql/hstore.py b/python/sqlalchemy/dialects/postgresql/hstore.py deleted file mode 100644 index 9f369cb5..00000000 --- a/python/sqlalchemy/dialects/postgresql/hstore.py +++ /dev/null @@ -1,376 +0,0 @@ -# postgresql/hstore.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -import re - -from .base import ARRAY, ischema_names -from ... import types as sqltypes -from ...sql import functions as sqlfunc -from ...sql.operators import custom_op -from ... import util - -__all__ = ('HSTORE', 'hstore') - -# My best guess at the parsing rules of hstore literals, since no formal -# grammar is given. This is mostly reverse engineered from PG's input parser -# behavior. -HSTORE_PAIR_RE = re.compile(r""" -( - "(?P (\\ . | [^"])* )" # Quoted key -) -[ ]* => [ ]* # Pair operator, optional adjoining whitespace -( - (?P NULL ) # NULL value - | "(?P (\\ . | [^"])* )" # Quoted value -) -""", re.VERBOSE) - -HSTORE_DELIMITER_RE = re.compile(r""" -[ ]* , [ ]* -""", re.VERBOSE) - - -def _parse_error(hstore_str, pos): - """format an unmarshalling error.""" - - ctx = 20 - hslen = len(hstore_str) - - parsed_tail = hstore_str[max(pos - ctx - 1, 0):min(pos, hslen)] - residual = hstore_str[min(pos, hslen):min(pos + ctx + 1, hslen)] - - if len(parsed_tail) > ctx: - parsed_tail = '[...]' + parsed_tail[1:] - if len(residual) > ctx: - residual = residual[:-1] + '[...]' - - return "After %r, could not parse residual at position %d: %r" % ( - parsed_tail, pos, residual) - - -def _parse_hstore(hstore_str): - """Parse an hstore from its literal string representation. - - Attempts to approximate PG's hstore input parsing rules as closely as - possible. Although currently this is not strictly necessary, since the - current implementation of hstore's output syntax is stricter than what it - accepts as input, the documentation makes no guarantees that will always - be the case. - - - - """ - result = {} - pos = 0 - pair_match = HSTORE_PAIR_RE.match(hstore_str) - - while pair_match is not None: - key = pair_match.group('key').replace(r'\"', '"').replace( - "\\\\", "\\") - if pair_match.group('value_null'): - value = None - else: - value = pair_match.group('value').replace( - r'\"', '"').replace("\\\\", "\\") - result[key] = value - - pos += pair_match.end() - - delim_match = HSTORE_DELIMITER_RE.match(hstore_str[pos:]) - if delim_match is not None: - pos += delim_match.end() - - pair_match = HSTORE_PAIR_RE.match(hstore_str[pos:]) - - if pos != len(hstore_str): - raise ValueError(_parse_error(hstore_str, pos)) - - return result - - -def _serialize_hstore(val): - """Serialize a dictionary into an hstore literal. Keys and values must - both be strings (except None for values). - - """ - def esc(s, position): - if position == 'value' and s is None: - return 'NULL' - elif isinstance(s, util.string_types): - return '"%s"' % s.replace("\\", "\\\\").replace('"', r'\"') - else: - raise ValueError("%r in %s position is not a string." % - (s, position)) - - return ', '.join('%s=>%s' % (esc(k, 'key'), esc(v, 'value')) - for k, v in val.items()) - - -class HSTORE(sqltypes.Concatenable, sqltypes.TypeEngine): - """Represent the Postgresql HSTORE type. - - The :class:`.HSTORE` type stores dictionaries containing strings, e.g.:: - - data_table = Table('data_table', metadata, - Column('id', Integer, primary_key=True), - Column('data', HSTORE) - ) - - with engine.connect() as conn: - conn.execute( - data_table.insert(), - data = {"key1": "value1", "key2": "value2"} - ) - - :class:`.HSTORE` provides for a wide range of operations, including: - - * Index operations:: - - data_table.c.data['some key'] == 'some value' - - * Containment operations:: - - data_table.c.data.has_key('some key') - - data_table.c.data.has_all(['one', 'two', 'three']) - - * Concatenation:: - - data_table.c.data + {"k1": "v1"} - - For a full list of special methods see - :class:`.HSTORE.comparator_factory`. - - For usage with the SQLAlchemy ORM, it may be desirable to combine - the usage of :class:`.HSTORE` with :class:`.MutableDict` dictionary - now part of the :mod:`sqlalchemy.ext.mutable` - extension. This extension will allow "in-place" changes to the - dictionary, e.g. addition of new keys or replacement/removal of existing - keys to/from the current dictionary, to produce events which will be - detected by the unit of work:: - - from sqlalchemy.ext.mutable import MutableDict - - class MyClass(Base): - __tablename__ = 'data_table' - - id = Column(Integer, primary_key=True) - data = Column(MutableDict.as_mutable(HSTORE)) - - my_object = session.query(MyClass).one() - - # in-place mutation, requires Mutable extension - # in order for the ORM to detect - my_object.data['some_key'] = 'some value' - - session.commit() - - When the :mod:`sqlalchemy.ext.mutable` extension is not used, the ORM - will not be alerted to any changes to the contents of an existing - dictionary, unless that dictionary value is re-assigned to the - HSTORE-attribute itself, thus generating a change event. - - .. versionadded:: 0.8 - - .. seealso:: - - :class:`.hstore` - render the Postgresql ``hstore()`` function. - - - """ - - __visit_name__ = 'HSTORE' - hashable = False - - class comparator_factory(sqltypes.Concatenable.Comparator): - """Define comparison operations for :class:`.HSTORE`.""" - - def has_key(self, other): - """Boolean expression. Test for presence of a key. Note that the - key may be a SQLA expression. - """ - return self.expr.op('?')(other) - - def has_all(self, other): - """Boolean expression. Test for presence of all keys in the PG - array. - """ - return self.expr.op('?&')(other) - - def has_any(self, other): - """Boolean expression. Test for presence of any key in the PG - array. - """ - return self.expr.op('?|')(other) - - def defined(self, key): - """Boolean expression. Test for presence of a non-NULL value for - the key. Note that the key may be a SQLA expression. - """ - return _HStoreDefinedFunction(self.expr, key) - - def contains(self, other, **kwargs): - """Boolean expression. Test if keys are a superset of the keys of - the argument hstore expression. - """ - return self.expr.op('@>')(other) - - def contained_by(self, other): - """Boolean expression. Test if keys are a proper subset of the - keys of the argument hstore expression. - """ - return self.expr.op('<@')(other) - - def __getitem__(self, other): - """Text expression. Get the value at a given key. Note that the - key may be a SQLA expression. - """ - return self.expr.op('->', precedence=5)(other) - - def delete(self, key): - """HStore expression. Returns the contents of this hstore with the - given key deleted. Note that the key may be a SQLA expression. - """ - if isinstance(key, dict): - key = _serialize_hstore(key) - return _HStoreDeleteFunction(self.expr, key) - - def slice(self, array): - """HStore expression. Returns a subset of an hstore defined by - array of keys. - """ - return _HStoreSliceFunction(self.expr, array) - - def keys(self): - """Text array expression. Returns array of keys.""" - return _HStoreKeysFunction(self.expr) - - def vals(self): - """Text array expression. Returns array of values.""" - return _HStoreValsFunction(self.expr) - - def array(self): - """Text array expression. Returns array of alternating keys and - values. - """ - return _HStoreArrayFunction(self.expr) - - def matrix(self): - """Text array expression. Returns array of [key, value] pairs.""" - return _HStoreMatrixFunction(self.expr) - - def _adapt_expression(self, op, other_comparator): - if isinstance(op, custom_op): - if op.opstring in ['?', '?&', '?|', '@>', '<@']: - return op, sqltypes.Boolean - elif op.opstring == '->': - return op, sqltypes.Text - return sqltypes.Concatenable.Comparator.\ - _adapt_expression(self, op, other_comparator) - - def bind_processor(self, dialect): - if util.py2k: - encoding = dialect.encoding - - def process(value): - if isinstance(value, dict): - return _serialize_hstore(value).encode(encoding) - else: - return value - else: - def process(value): - if isinstance(value, dict): - return _serialize_hstore(value) - else: - return value - return process - - def result_processor(self, dialect, coltype): - if util.py2k: - encoding = dialect.encoding - - def process(value): - if value is not None: - return _parse_hstore(value.decode(encoding)) - else: - return value - else: - def process(value): - if value is not None: - return _parse_hstore(value) - else: - return value - return process - - -ischema_names['hstore'] = HSTORE - - -class hstore(sqlfunc.GenericFunction): - """Construct an hstore value within a SQL expression using the - Postgresql ``hstore()`` function. - - The :class:`.hstore` function accepts one or two arguments as described - in the Postgresql documentation. - - E.g.:: - - from sqlalchemy.dialects.postgresql import array, hstore - - select([hstore('key1', 'value1')]) - - select([ - hstore( - array(['key1', 'key2', 'key3']), - array(['value1', 'value2', 'value3']) - ) - ]) - - .. versionadded:: 0.8 - - .. seealso:: - - :class:`.HSTORE` - the Postgresql ``HSTORE`` datatype. - - """ - type = HSTORE - name = 'hstore' - - -class _HStoreDefinedFunction(sqlfunc.GenericFunction): - type = sqltypes.Boolean - name = 'defined' - - -class _HStoreDeleteFunction(sqlfunc.GenericFunction): - type = HSTORE - name = 'delete' - - -class _HStoreSliceFunction(sqlfunc.GenericFunction): - type = HSTORE - name = 'slice' - - -class _HStoreKeysFunction(sqlfunc.GenericFunction): - type = ARRAY(sqltypes.Text) - name = 'akeys' - - -class _HStoreValsFunction(sqlfunc.GenericFunction): - type = ARRAY(sqltypes.Text) - name = 'avals' - - -class _HStoreArrayFunction(sqlfunc.GenericFunction): - type = ARRAY(sqltypes.Text) - name = 'hstore_to_array' - - -class _HStoreMatrixFunction(sqlfunc.GenericFunction): - type = ARRAY(sqltypes.Text) - name = 'hstore_to_matrix' diff --git a/python/sqlalchemy/dialects/postgresql/json.py b/python/sqlalchemy/dialects/postgresql/json.py deleted file mode 100644 index 13ebc4af..00000000 --- a/python/sqlalchemy/dialects/postgresql/json.py +++ /dev/null @@ -1,358 +0,0 @@ -# postgresql/json.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php -from __future__ import absolute_import - -import json - -from .base import ischema_names -from ... import types as sqltypes -from ...sql.operators import custom_op -from ... import sql -from ...sql import elements, default_comparator -from ... import util - -__all__ = ('JSON', 'JSONElement', 'JSONB') - - -class JSONElement(elements.BinaryExpression): - """Represents accessing an element of a :class:`.JSON` value. - - The :class:`.JSONElement` is produced whenever using the Python index - operator on an expression that has the type :class:`.JSON`:: - - expr = mytable.c.json_data['some_key'] - - The expression typically compiles to a JSON access such as ``col -> key``. - Modifiers are then available for typing behavior, including - :meth:`.JSONElement.cast` and :attr:`.JSONElement.astext`. - - """ - - def __init__(self, left, right, astext=False, - opstring=None, result_type=None): - self._astext = astext - if opstring is None: - if hasattr(right, '__iter__') and \ - not isinstance(right, util.string_types): - opstring = "#>" - right = "{%s}" % ( - ", ".join(util.text_type(elem) for elem in right)) - else: - opstring = "->" - - self._json_opstring = opstring - operator = custom_op(opstring, precedence=5) - right = default_comparator._check_literal( - left, operator, right) - super(JSONElement, self).__init__( - left, right, operator, type_=result_type) - - @property - def astext(self): - """Convert this :class:`.JSONElement` to use the 'astext' operator - when evaluated. - - E.g.:: - - select([data_table.c.data['some key'].astext]) - - .. seealso:: - - :meth:`.JSONElement.cast` - - """ - if self._astext: - return self - else: - return JSONElement( - self.left, - self.right, - astext=True, - opstring=self._json_opstring + ">", - result_type=sqltypes.String(convert_unicode=True) - ) - - def cast(self, type_): - """Convert this :class:`.JSONElement` to apply both the 'astext' operator - as well as an explicit type cast when evaluated. - - E.g.:: - - select([data_table.c.data['some key'].cast(Integer)]) - - .. seealso:: - - :attr:`.JSONElement.astext` - - """ - if not self._astext: - return self.astext.cast(type_) - else: - return sql.cast(self, type_) - - -class JSON(sqltypes.TypeEngine): - """Represent the Postgresql JSON type. - - The :class:`.JSON` type stores arbitrary JSON format data, e.g.:: - - data_table = Table('data_table', metadata, - Column('id', Integer, primary_key=True), - Column('data', JSON) - ) - - with engine.connect() as conn: - conn.execute( - data_table.insert(), - data = {"key1": "value1", "key2": "value2"} - ) - - :class:`.JSON` provides several operations: - - * Index operations:: - - data_table.c.data['some key'] - - * Index operations returning text (required for text comparison):: - - data_table.c.data['some key'].astext == 'some value' - - * Index operations with a built-in CAST call:: - - data_table.c.data['some key'].cast(Integer) == 5 - - * Path index operations:: - - data_table.c.data[('key_1', 'key_2', ..., 'key_n')] - - * Path index operations returning text (required for text comparison):: - - data_table.c.data[('key_1', 'key_2', ..., 'key_n')].astext == \\ - 'some value' - - Index operations return an instance of :class:`.JSONElement`, which - represents an expression such as ``column -> index``. This element then - defines methods such as :attr:`.JSONElement.astext` and - :meth:`.JSONElement.cast` for setting up type behavior. - - The :class:`.JSON` type, when used with the SQLAlchemy ORM, does not - detect in-place mutations to the structure. In order to detect these, the - :mod:`sqlalchemy.ext.mutable` extension must be used. This extension will - allow "in-place" changes to the datastructure to produce events which - will be detected by the unit of work. See the example at :class:`.HSTORE` - for a simple example involving a dictionary. - - Custom serializers and deserializers are specified at the dialect level, - that is using :func:`.create_engine`. The reason for this is that when - using psycopg2, the DBAPI only allows serializers at the per-cursor - or per-connection level. E.g.:: - - engine = create_engine("postgresql://scott:tiger@localhost/test", - json_serializer=my_serialize_fn, - json_deserializer=my_deserialize_fn - ) - - When using the psycopg2 dialect, the json_deserializer is registered - against the database using ``psycopg2.extras.register_default_json``. - - .. versionadded:: 0.9 - - """ - - __visit_name__ = 'JSON' - - def __init__(self, none_as_null=False): - """Construct a :class:`.JSON` type. - - :param none_as_null: if True, persist the value ``None`` as a - SQL NULL value, not the JSON encoding of ``null``. Note that - when this flag is False, the :func:`.null` construct can still - be used to persist a NULL value:: - - from sqlalchemy import null - conn.execute(table.insert(), data=null()) - - .. versionchanged:: 0.9.8 - Added ``none_as_null``, and :func:`.null` - is now supported in order to persist a NULL value. - - """ - self.none_as_null = none_as_null - - class comparator_factory(sqltypes.Concatenable.Comparator): - """Define comparison operations for :class:`.JSON`.""" - - def __getitem__(self, other): - """Get the value at a given key.""" - - return JSONElement(self.expr, other) - - def _adapt_expression(self, op, other_comparator): - if isinstance(op, custom_op): - if op.opstring == '->': - return op, sqltypes.Text - return sqltypes.Concatenable.Comparator.\ - _adapt_expression(self, op, other_comparator) - - def bind_processor(self, dialect): - json_serializer = dialect._json_serializer or json.dumps - if util.py2k: - encoding = dialect.encoding - - def process(value): - if isinstance(value, elements.Null) or ( - value is None and self.none_as_null - ): - return None - return json_serializer(value).encode(encoding) - else: - def process(value): - if isinstance(value, elements.Null) or ( - value is None and self.none_as_null - ): - return None - return json_serializer(value) - return process - - def result_processor(self, dialect, coltype): - json_deserializer = dialect._json_deserializer or json.loads - if util.py2k: - encoding = dialect.encoding - - def process(value): - if value is None: - return None - return json_deserializer(value.decode(encoding)) - else: - def process(value): - if value is None: - return None - return json_deserializer(value) - return process - - -ischema_names['json'] = JSON - - -class JSONB(JSON): - """Represent the Postgresql JSONB type. - - The :class:`.JSONB` type stores arbitrary JSONB format data, e.g.:: - - data_table = Table('data_table', metadata, - Column('id', Integer, primary_key=True), - Column('data', JSONB) - ) - - with engine.connect() as conn: - conn.execute( - data_table.insert(), - data = {"key1": "value1", "key2": "value2"} - ) - - :class:`.JSONB` provides several operations: - - * Index operations:: - - data_table.c.data['some key'] - - * Index operations returning text (required for text comparison):: - - data_table.c.data['some key'].astext == 'some value' - - * Index operations with a built-in CAST call:: - - data_table.c.data['some key'].cast(Integer) == 5 - - * Path index operations:: - - data_table.c.data[('key_1', 'key_2', ..., 'key_n')] - - * Path index operations returning text (required for text comparison):: - - data_table.c.data[('key_1', 'key_2', ..., 'key_n')].astext == \\ - 'some value' - - Index operations return an instance of :class:`.JSONElement`, which - represents an expression such as ``column -> index``. This element then - defines methods such as :attr:`.JSONElement.astext` and - :meth:`.JSONElement.cast` for setting up type behavior. - - The :class:`.JSON` type, when used with the SQLAlchemy ORM, does not - detect in-place mutations to the structure. In order to detect these, the - :mod:`sqlalchemy.ext.mutable` extension must be used. This extension will - allow "in-place" changes to the datastructure to produce events which - will be detected by the unit of work. See the example at :class:`.HSTORE` - for a simple example involving a dictionary. - - Custom serializers and deserializers are specified at the dialect level, - that is using :func:`.create_engine`. The reason for this is that when - using psycopg2, the DBAPI only allows serializers at the per-cursor - or per-connection level. E.g.:: - - engine = create_engine("postgresql://scott:tiger@localhost/test", - json_serializer=my_serialize_fn, - json_deserializer=my_deserialize_fn - ) - - When using the psycopg2 dialect, the json_deserializer is registered - against the database using ``psycopg2.extras.register_default_json``. - - .. versionadded:: 0.9.7 - - """ - - __visit_name__ = 'JSONB' - hashable = False - - class comparator_factory(sqltypes.Concatenable.Comparator): - """Define comparison operations for :class:`.JSON`.""" - - def __getitem__(self, other): - """Get the value at a given key.""" - - return JSONElement(self.expr, other) - - def _adapt_expression(self, op, other_comparator): - # How does one do equality?? jsonb also has "=" eg. - # '[1,2,3]'::jsonb = '[1,2,3]'::jsonb - if isinstance(op, custom_op): - if op.opstring in ['?', '?&', '?|', '@>', '<@']: - return op, sqltypes.Boolean - if op.opstring == '->': - return op, sqltypes.Text - return sqltypes.Concatenable.Comparator.\ - _adapt_expression(self, op, other_comparator) - - def has_key(self, other): - """Boolean expression. Test for presence of a key. Note that the - key may be a SQLA expression. - """ - return self.expr.op('?')(other) - - def has_all(self, other): - """Boolean expression. Test for presence of all keys in jsonb - """ - return self.expr.op('?&')(other) - - def has_any(self, other): - """Boolean expression. Test for presence of any key in jsonb - """ - return self.expr.op('?|')(other) - - def contains(self, other, **kwargs): - """Boolean expression. Test if keys (or array) are a superset of/contained - the keys of the argument jsonb expression. - """ - return self.expr.op('@>')(other) - - def contained_by(self, other): - """Boolean expression. Test if keys are a proper subset of the - keys of the argument jsonb expression. - """ - return self.expr.op('<@')(other) - -ischema_names['jsonb'] = JSONB diff --git a/python/sqlalchemy/dialects/postgresql/pg8000.py b/python/sqlalchemy/dialects/postgresql/pg8000.py deleted file mode 100644 index c71f689a..00000000 --- a/python/sqlalchemy/dialects/postgresql/pg8000.py +++ /dev/null @@ -1,264 +0,0 @@ -# postgresql/pg8000.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" -.. dialect:: postgresql+pg8000 - :name: pg8000 - :dbapi: pg8000 - :connectstring: \ -postgresql+pg8000://user:password@host:port/dbname[?key=value&key=value...] - :url: https://pythonhosted.org/pg8000/ - - -.. _pg8000_unicode: - -Unicode -------- - -pg8000 will encode / decode string values between it and the server using the -PostgreSQL ``client_encoding`` parameter; by default this is the value in -the ``postgresql.conf`` file, which often defaults to ``SQL_ASCII``. -Typically, this can be changed to ``utf-8``, as a more useful default:: - - #client_encoding = sql_ascii # actually, defaults to database - # encoding - client_encoding = utf8 - -The ``client_encoding`` can be overriden for a session by executing the SQL: - -SET CLIENT_ENCODING TO 'utf8'; - -SQLAlchemy will execute this SQL on all new connections based on the value -passed to :func:`.create_engine` using the ``client_encoding`` parameter:: - - engine = create_engine( - "postgresql+pg8000://user:pass@host/dbname", client_encoding='utf8') - - -.. _pg8000_isolation_level: - -pg8000 Transaction Isolation Level -------------------------------------- - -The pg8000 dialect offers the same isolation level settings as that -of the :ref:`psycopg2 ` dialect: - -* ``READ COMMITTED`` -* ``READ UNCOMMITTED`` -* ``REPEATABLE READ`` -* ``SERIALIZABLE`` -* ``AUTOCOMMIT`` - -.. versionadded:: 0.9.5 support for AUTOCOMMIT isolation level when using - pg8000. - -.. seealso:: - - :ref:`postgresql_isolation_level` - - :ref:`psycopg2_isolation_level` - - -""" -from ... import util, exc -import decimal -from ... import processors -from ... import types as sqltypes -from .base import ( - PGDialect, PGCompiler, PGIdentifierPreparer, PGExecutionContext, - _DECIMAL_TYPES, _FLOAT_TYPES, _INT_TYPES) -import re -from sqlalchemy.dialects.postgresql.json import JSON - - -class _PGNumeric(sqltypes.Numeric): - def result_processor(self, dialect, coltype): - if self.asdecimal: - if coltype in _FLOAT_TYPES: - return processors.to_decimal_processor_factory( - decimal.Decimal, self._effective_decimal_return_scale) - elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES: - # pg8000 returns Decimal natively for 1700 - return None - else: - raise exc.InvalidRequestError( - "Unknown PG numeric type: %d" % coltype) - else: - if coltype in _FLOAT_TYPES: - # pg8000 returns float natively for 701 - return None - elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES: - return processors.to_float - else: - raise exc.InvalidRequestError( - "Unknown PG numeric type: %d" % coltype) - - -class _PGNumericNoBind(_PGNumeric): - def bind_processor(self, dialect): - return None - - -class _PGJSON(JSON): - - def result_processor(self, dialect, coltype): - if dialect._dbapi_version > (1, 10, 1): - return None # Has native JSON - else: - return super(_PGJSON, self).result_processor(dialect, coltype) - - -class PGExecutionContext_pg8000(PGExecutionContext): - pass - - -class PGCompiler_pg8000(PGCompiler): - def visit_mod_binary(self, binary, operator, **kw): - return self.process(binary.left, **kw) + " %% " + \ - self.process(binary.right, **kw) - - def post_process_text(self, text): - if '%%' in text: - util.warn("The SQLAlchemy postgresql dialect " - "now automatically escapes '%' in text() " - "expressions to '%%'.") - return text.replace('%', '%%') - - -class PGIdentifierPreparer_pg8000(PGIdentifierPreparer): - def _escape_identifier(self, value): - value = value.replace(self.escape_quote, self.escape_to_quote) - return value.replace('%', '%%') - - -class PGDialect_pg8000(PGDialect): - driver = 'pg8000' - - supports_unicode_statements = True - - supports_unicode_binds = True - - default_paramstyle = 'format' - supports_sane_multi_rowcount = True - execution_ctx_cls = PGExecutionContext_pg8000 - statement_compiler = PGCompiler_pg8000 - preparer = PGIdentifierPreparer_pg8000 - description_encoding = 'use_encoding' - - colspecs = util.update_copy( - PGDialect.colspecs, - { - sqltypes.Numeric: _PGNumericNoBind, - sqltypes.Float: _PGNumeric, - JSON: _PGJSON, - } - ) - - def __init__(self, client_encoding=None, **kwargs): - PGDialect.__init__(self, **kwargs) - self.client_encoding = client_encoding - - def initialize(self, connection): - self.supports_sane_multi_rowcount = self._dbapi_version >= (1, 9, 14) - super(PGDialect_pg8000, self).initialize(connection) - - @util.memoized_property - def _dbapi_version(self): - if self.dbapi and hasattr(self.dbapi, '__version__'): - return tuple( - [ - int(x) for x in re.findall( - r'(\d+)(?:[-\.]?|$)', self.dbapi.__version__)]) - else: - return (99, 99, 99) - - @classmethod - def dbapi(cls): - return __import__('pg8000') - - def create_connect_args(self, url): - opts = url.translate_connect_args(username='user') - if 'port' in opts: - opts['port'] = int(opts['port']) - opts.update(url.query) - return ([], opts) - - def is_disconnect(self, e, connection, cursor): - return "connection is closed" in str(e) - - def set_isolation_level(self, connection, level): - level = level.replace('_', ' ') - - # adjust for ConnectionFairy possibly being present - if hasattr(connection, 'connection'): - connection = connection.connection - - if level == 'AUTOCOMMIT': - connection.autocommit = True - elif level in self._isolation_lookup: - connection.autocommit = False - cursor = connection.cursor() - cursor.execute( - "SET SESSION CHARACTERISTICS AS TRANSACTION " - "ISOLATION LEVEL %s" % level) - cursor.execute("COMMIT") - cursor.close() - else: - raise exc.ArgumentError( - "Invalid value '%s' for isolation_level. " - "Valid isolation levels for %s are %s or AUTOCOMMIT" % - (level, self.name, ", ".join(self._isolation_lookup)) - ) - - def set_client_encoding(self, connection, client_encoding): - # adjust for ConnectionFairy possibly being present - if hasattr(connection, 'connection'): - connection = connection.connection - - cursor = connection.cursor() - cursor.execute("SET CLIENT_ENCODING TO '" + client_encoding + "'") - cursor.execute("COMMIT") - cursor.close() - - def do_begin_twophase(self, connection, xid): - connection.connection.tpc_begin((0, xid, '')) - - def do_prepare_twophase(self, connection, xid): - connection.connection.tpc_prepare() - - def do_rollback_twophase( - self, connection, xid, is_prepared=True, recover=False): - connection.connection.tpc_rollback((0, xid, '')) - - def do_commit_twophase( - self, connection, xid, is_prepared=True, recover=False): - connection.connection.tpc_commit((0, xid, '')) - - def do_recover_twophase(self, connection): - return [row[1] for row in connection.connection.tpc_recover()] - - def on_connect(self): - fns = [] - if self.client_encoding is not None: - def on_connect(conn): - self.set_client_encoding(conn, self.client_encoding) - fns.append(on_connect) - - if self.isolation_level is not None: - def on_connect(conn): - self.set_isolation_level(conn, self.isolation_level) - fns.append(on_connect) - - if len(fns) > 0: - def on_connect(conn): - for fn in fns: - fn(conn) - return on_connect - else: - return None - -dialect = PGDialect_pg8000 diff --git a/python/sqlalchemy/dialects/postgresql/psycopg2.py b/python/sqlalchemy/dialects/postgresql/psycopg2.py deleted file mode 100644 index 36a9d7bf..00000000 --- a/python/sqlalchemy/dialects/postgresql/psycopg2.py +++ /dev/null @@ -1,726 +0,0 @@ -# postgresql/psycopg2.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" -.. dialect:: postgresql+psycopg2 - :name: psycopg2 - :dbapi: psycopg2 - :connectstring: postgresql+psycopg2://user:password@host:port/dbname\ -[?key=value&key=value...] - :url: http://pypi.python.org/pypi/psycopg2/ - -psycopg2 Connect Arguments ------------------------------------ - -psycopg2-specific keyword arguments which are accepted by -:func:`.create_engine()` are: - -* ``server_side_cursors``: Enable the usage of "server side cursors" for SQL - statements which support this feature. What this essentially means from a - psycopg2 point of view is that the cursor is created using a name, e.g. - ``connection.cursor('some name')``, which has the effect that result rows - are not immediately pre-fetched and buffered after statement execution, but - are instead left on the server and only retrieved as needed. SQLAlchemy's - :class:`~sqlalchemy.engine.ResultProxy` uses special row-buffering - behavior when this feature is enabled, such that groups of 100 rows at a - time are fetched over the wire to reduce conversational overhead. - Note that the ``stream_results=True`` execution option is a more targeted - way of enabling this mode on a per-execution basis. -* ``use_native_unicode``: Enable the usage of Psycopg2 "native unicode" mode - per connection. True by default. - - .. seealso:: - - :ref:`psycopg2_disable_native_unicode` - -* ``isolation_level``: This option, available for all PostgreSQL dialects, - includes the ``AUTOCOMMIT`` isolation level when using the psycopg2 - dialect. - - .. seealso:: - - :ref:`psycopg2_isolation_level` - -* ``client_encoding``: sets the client encoding in a libpq-agnostic way, - using psycopg2's ``set_client_encoding()`` method. - - .. seealso:: - - :ref:`psycopg2_unicode` - -Unix Domain Connections ------------------------- - -psycopg2 supports connecting via Unix domain connections. When the ``host`` -portion of the URL is omitted, SQLAlchemy passes ``None`` to psycopg2, -which specifies Unix-domain communication rather than TCP/IP communication:: - - create_engine("postgresql+psycopg2://user:password@/dbname") - -By default, the socket file used is to connect to a Unix-domain socket -in ``/tmp``, or whatever socket directory was specified when PostgreSQL -was built. This value can be overridden by passing a pathname to psycopg2, -using ``host`` as an additional keyword argument:: - - create_engine("postgresql+psycopg2://user:password@/dbname?\ -host=/var/lib/postgresql") - -See also: - -`PQconnectdbParams `_ - -.. _psycopg2_execution_options: - -Per-Statement/Connection Execution Options -------------------------------------------- - -The following DBAPI-specific options are respected when used with -:meth:`.Connection.execution_options`, :meth:`.Executable.execution_options`, -:meth:`.Query.execution_options`, in addition to those not specific to DBAPIs: - -* ``isolation_level`` - Set the transaction isolation level for the lifespan of a - :class:`.Connection` (can only be set on a connection, not a statement - or query). See :ref:`psycopg2_isolation_level`. - -* ``stream_results`` - Enable or disable usage of psycopg2 server side cursors - - this feature makes use of "named" cursors in combination with special - result handling methods so that result rows are not fully buffered. - If ``None`` or not set, the ``server_side_cursors`` option of the - :class:`.Engine` is used. - -* ``max_row_buffer`` - when using ``stream_results``, an integer value that - specifies the maximum number of rows to buffer at a time. This is - interpreted by the :class:`.BufferedRowResultProxy`, and if omitted the - buffer will grow to ultimately store 1000 rows at a time. - - .. versionadded:: 1.0.6 - -.. _psycopg2_unicode: - -Unicode with Psycopg2 ----------------------- - -By default, the psycopg2 driver uses the ``psycopg2.extensions.UNICODE`` -extension, such that the DBAPI receives and returns all strings as Python -Unicode objects directly - SQLAlchemy passes these values through without -change. Psycopg2 here will encode/decode string values based on the -current "client encoding" setting; by default this is the value in -the ``postgresql.conf`` file, which often defaults to ``SQL_ASCII``. -Typically, this can be changed to ``utf8``, as a more useful default:: - - # postgresql.conf file - - # client_encoding = sql_ascii # actually, defaults to database - # encoding - client_encoding = utf8 - -A second way to affect the client encoding is to set it within Psycopg2 -locally. SQLAlchemy will call psycopg2's -:meth:`psycopg2:connection.set_client_encoding` method -on all new connections based on the value passed to -:func:`.create_engine` using the ``client_encoding`` parameter:: - - # set_client_encoding() setting; - # works for *all* Postgresql versions - engine = create_engine("postgresql://user:pass@host/dbname", - client_encoding='utf8') - -This overrides the encoding specified in the Postgresql client configuration. -When using the parameter in this way, the psycopg2 driver emits -``SET client_encoding TO 'utf8'`` on the connection explicitly, and works -in all Postgresql versions. - -Note that the ``client_encoding`` setting as passed to :func:`.create_engine` -is **not the same** as the more recently added ``client_encoding`` parameter -now supported by libpq directly. This is enabled when ``client_encoding`` -is passed directly to ``psycopg2.connect()``, and from SQLAlchemy is passed -using the :paramref:`.create_engine.connect_args` parameter:: - - # libpq direct parameter setting; - # only works for Postgresql **9.1 and above** - engine = create_engine("postgresql://user:pass@host/dbname", - connect_args={'client_encoding': 'utf8'}) - - # using the query string is equivalent - engine = create_engine("postgresql://user:pass@host/dbname?client_encoding=utf8") - -The above parameter was only added to libpq as of version 9.1 of Postgresql, -so using the previous method is better for cross-version support. - -.. _psycopg2_disable_native_unicode: - -Disabling Native Unicode -^^^^^^^^^^^^^^^^^^^^^^^^ - -SQLAlchemy can also be instructed to skip the usage of the psycopg2 -``UNICODE`` extension and to instead utilize its own unicode encode/decode -services, which are normally reserved only for those DBAPIs that don't -fully support unicode directly. Passing ``use_native_unicode=False`` to -:func:`.create_engine` will disable usage of ``psycopg2.extensions.UNICODE``. -SQLAlchemy will instead encode data itself into Python bytestrings on the way -in and coerce from bytes on the way back, -using the value of the :func:`.create_engine` ``encoding`` parameter, which -defaults to ``utf-8``. -SQLAlchemy's own unicode encode/decode functionality is steadily becoming -obsolete as most DBAPIs now support unicode fully. - -Bound Parameter Styles ----------------------- - -The default parameter style for the psycopg2 dialect is "pyformat", where -SQL is rendered using ``%(paramname)s`` style. This format has the limitation -that it does not accommodate the unusual case of parameter names that -actually contain percent or parenthesis symbols; as SQLAlchemy in many cases -generates bound parameter names based on the name of a column, the presence -of these characters in a column name can lead to problems. - -There are two solutions to the issue of a :class:`.schema.Column` that contains -one of these characters in its name. One is to specify the -:paramref:`.schema.Column.key` for columns that have such names:: - - measurement = Table('measurement', metadata, - Column('Size (meters)', Integer, key='size_meters') - ) - -Above, an INSERT statement such as ``measurement.insert()`` will use -``size_meters`` as the parameter name, and a SQL expression such as -``measurement.c.size_meters > 10`` will derive the bound parameter name -from the ``size_meters`` key as well. - -.. versionchanged:: 1.0.0 - SQL expressions will use :attr:`.Column.key` - as the source of naming when anonymous bound parameters are created - in SQL expressions; previously, this behavior only applied to - :meth:`.Table.insert` and :meth:`.Table.update` parameter names. - -The other solution is to use a positional format; psycopg2 allows use of the -"format" paramstyle, which can be passed to -:paramref:`.create_engine.paramstyle`:: - - engine = create_engine( - 'postgresql://scott:tiger@localhost:5432/test', paramstyle='format') - -With the above engine, instead of a statement like:: - - INSERT INTO measurement ("Size (meters)") VALUES (%(Size (meters))s) - {'Size (meters)': 1} - -we instead see:: - - INSERT INTO measurement ("Size (meters)") VALUES (%s) - (1, ) - -Where above, the dictionary style is converted into a tuple with positional -style. - - -Transactions ------------- - -The psycopg2 dialect fully supports SAVEPOINT and two-phase commit operations. - -.. _psycopg2_isolation_level: - -Psycopg2 Transaction Isolation Level -------------------------------------- - -As discussed in :ref:`postgresql_isolation_level`, -all Postgresql dialects support setting of transaction isolation level -both via the ``isolation_level`` parameter passed to :func:`.create_engine`, -as well as the ``isolation_level`` argument used by -:meth:`.Connection.execution_options`. When using the psycopg2 dialect, these -options make use of psycopg2's ``set_isolation_level()`` connection method, -rather than emitting a Postgresql directive; this is because psycopg2's -API-level setting is always emitted at the start of each transaction in any -case. - -The psycopg2 dialect supports these constants for isolation level: - -* ``READ COMMITTED`` -* ``READ UNCOMMITTED`` -* ``REPEATABLE READ`` -* ``SERIALIZABLE`` -* ``AUTOCOMMIT`` - -.. versionadded:: 0.8.2 support for AUTOCOMMIT isolation level when using - psycopg2. - -.. seealso:: - - :ref:`postgresql_isolation_level` - - :ref:`pg8000_isolation_level` - - -NOTICE logging ---------------- - -The psycopg2 dialect will log Postgresql NOTICE messages via the -``sqlalchemy.dialects.postgresql`` logger:: - - import logging - logging.getLogger('sqlalchemy.dialects.postgresql').setLevel(logging.INFO) - -.. _psycopg2_hstore:: - -HSTORE type ------------- - -The ``psycopg2`` DBAPI includes an extension to natively handle marshalling of -the HSTORE type. The SQLAlchemy psycopg2 dialect will enable this extension -by default when psycopg2 version 2.4 or greater is used, and -it is detected that the target database has the HSTORE type set up for use. -In other words, when the dialect makes the first -connection, a sequence like the following is performed: - -1. Request the available HSTORE oids using - ``psycopg2.extras.HstoreAdapter.get_oids()``. - If this function returns a list of HSTORE identifiers, we then determine - that the ``HSTORE`` extension is present. - This function is **skipped** if the version of psycopg2 installed is - less than version 2.4. - -2. If the ``use_native_hstore`` flag is at its default of ``True``, and - we've detected that ``HSTORE`` oids are available, the - ``psycopg2.extensions.register_hstore()`` extension is invoked for all - connections. - -The ``register_hstore()`` extension has the effect of **all Python -dictionaries being accepted as parameters regardless of the type of target -column in SQL**. The dictionaries are converted by this extension into a -textual HSTORE expression. If this behavior is not desired, disable the -use of the hstore extension by setting ``use_native_hstore`` to ``False`` as -follows:: - - engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/test", - use_native_hstore=False) - -The ``HSTORE`` type is **still supported** when the -``psycopg2.extensions.register_hstore()`` extension is not used. It merely -means that the coercion between Python dictionaries and the HSTORE -string format, on both the parameter side and the result side, will take -place within SQLAlchemy's own marshalling logic, and not that of ``psycopg2`` -which may be more performant. - -""" -from __future__ import absolute_import - -import re -import logging - -from ... import util, exc -import decimal -from ... import processors -from ...engine import result as _result -from ...sql import expression -from ... import types as sqltypes -from .base import PGDialect, PGCompiler, \ - PGIdentifierPreparer, PGExecutionContext, \ - ENUM, ARRAY, _DECIMAL_TYPES, _FLOAT_TYPES,\ - _INT_TYPES, UUID -from .hstore import HSTORE -from .json import JSON, JSONB - -try: - from uuid import UUID as _python_UUID -except ImportError: - _python_UUID = None - - -logger = logging.getLogger('sqlalchemy.dialects.postgresql') - - -class _PGNumeric(sqltypes.Numeric): - def bind_processor(self, dialect): - return None - - def result_processor(self, dialect, coltype): - if self.asdecimal: - if coltype in _FLOAT_TYPES: - return processors.to_decimal_processor_factory( - decimal.Decimal, - self._effective_decimal_return_scale) - elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES: - # pg8000 returns Decimal natively for 1700 - return None - else: - raise exc.InvalidRequestError( - "Unknown PG numeric type: %d" % coltype) - else: - if coltype in _FLOAT_TYPES: - # pg8000 returns float natively for 701 - return None - elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES: - return processors.to_float - else: - raise exc.InvalidRequestError( - "Unknown PG numeric type: %d" % coltype) - - -class _PGEnum(ENUM): - def result_processor(self, dialect, coltype): - if self.native_enum and util.py2k and self.convert_unicode is True: - # we can't easily use PG's extensions here because - # the OID is on the fly, and we need to give it a python - # function anyway - not really worth it. - self.convert_unicode = "force_nocheck" - return super(_PGEnum, self).result_processor(dialect, coltype) - - -class _PGHStore(HSTORE): - def bind_processor(self, dialect): - if dialect._has_native_hstore: - return None - else: - return super(_PGHStore, self).bind_processor(dialect) - - def result_processor(self, dialect, coltype): - if dialect._has_native_hstore: - return None - else: - return super(_PGHStore, self).result_processor(dialect, coltype) - - -class _PGJSON(JSON): - - def result_processor(self, dialect, coltype): - if dialect._has_native_json: - return None - else: - return super(_PGJSON, self).result_processor(dialect, coltype) - - -class _PGJSONB(JSONB): - - def result_processor(self, dialect, coltype): - if dialect._has_native_jsonb: - return None - else: - return super(_PGJSONB, self).result_processor(dialect, coltype) - - -class _PGUUID(UUID): - def bind_processor(self, dialect): - if not self.as_uuid and dialect.use_native_uuid: - nonetype = type(None) - - def process(value): - if value is not None: - value = _python_UUID(value) - return value - return process - - def result_processor(self, dialect, coltype): - if not self.as_uuid and dialect.use_native_uuid: - def process(value): - if value is not None: - value = str(value) - return value - return process - -# When we're handed literal SQL, ensure it's a SELECT query. Since -# 8.3, combining cursors and "FOR UPDATE" has been fine. -SERVER_SIDE_CURSOR_RE = re.compile( - r'\s*SELECT', - re.I | re.UNICODE) - -_server_side_id = util.counter() - - -class PGExecutionContext_psycopg2(PGExecutionContext): - def create_cursor(self): - # TODO: coverage for server side cursors + select.for_update() - - if self.dialect.server_side_cursors: - is_server_side = \ - self.execution_options.get('stream_results', True) and ( - (self.compiled and isinstance(self.compiled.statement, - expression.Selectable) - or - ( - (not self.compiled or - isinstance(self.compiled.statement, - expression.TextClause)) - and self.statement and SERVER_SIDE_CURSOR_RE.match( - self.statement)) - ) - ) - else: - is_server_side = \ - self.execution_options.get('stream_results', False) - - self.__is_server_side = is_server_side - if is_server_side: - # use server-side cursors: - # http://lists.initd.org/pipermail/psycopg/2007-January/005251.html - ident = "c_%s_%s" % (hex(id(self))[2:], - hex(_server_side_id())[2:]) - return self._dbapi_connection.cursor(ident) - else: - return self._dbapi_connection.cursor() - - def get_result_proxy(self): - # TODO: ouch - if logger.isEnabledFor(logging.INFO): - self._log_notices(self.cursor) - - if self.__is_server_side: - return _result.BufferedRowResultProxy(self) - else: - return _result.ResultProxy(self) - - def _log_notices(self, cursor): - for notice in cursor.connection.notices: - # NOTICE messages have a - # newline character at the end - logger.info(notice.rstrip()) - - cursor.connection.notices[:] = [] - - -class PGCompiler_psycopg2(PGCompiler): - def visit_mod_binary(self, binary, operator, **kw): - return self.process(binary.left, **kw) + " %% " + \ - self.process(binary.right, **kw) - - def post_process_text(self, text): - return text.replace('%', '%%') - - -class PGIdentifierPreparer_psycopg2(PGIdentifierPreparer): - def _escape_identifier(self, value): - value = value.replace(self.escape_quote, self.escape_to_quote) - return value.replace('%', '%%') - - -class PGDialect_psycopg2(PGDialect): - driver = 'psycopg2' - if util.py2k: - supports_unicode_statements = False - - default_paramstyle = 'pyformat' - # set to true based on psycopg2 version - supports_sane_multi_rowcount = False - execution_ctx_cls = PGExecutionContext_psycopg2 - statement_compiler = PGCompiler_psycopg2 - preparer = PGIdentifierPreparer_psycopg2 - psycopg2_version = (0, 0) - - FEATURE_VERSION_MAP = dict( - native_json=(2, 5), - native_jsonb=(2, 5, 4), - sane_multi_rowcount=(2, 0, 9), - array_oid=(2, 4, 3), - hstore_adapter=(2, 4) - ) - - _has_native_hstore = False - _has_native_json = False - _has_native_jsonb = False - - engine_config_types = PGDialect.engine_config_types.union([ - ('use_native_unicode', util.asbool), - ]) - - colspecs = util.update_copy( - PGDialect.colspecs, - { - sqltypes.Numeric: _PGNumeric, - ENUM: _PGEnum, # needs force_unicode - sqltypes.Enum: _PGEnum, # needs force_unicode - HSTORE: _PGHStore, - JSON: _PGJSON, - JSONB: _PGJSONB, - UUID: _PGUUID - } - ) - - def __init__(self, server_side_cursors=False, use_native_unicode=True, - client_encoding=None, - use_native_hstore=True, use_native_uuid=True, - **kwargs): - PGDialect.__init__(self, **kwargs) - self.server_side_cursors = server_side_cursors - self.use_native_unicode = use_native_unicode - self.use_native_hstore = use_native_hstore - self.use_native_uuid = use_native_uuid - self.supports_unicode_binds = use_native_unicode - self.client_encoding = client_encoding - if self.dbapi and hasattr(self.dbapi, '__version__'): - m = re.match(r'(\d+)\.(\d+)(?:\.(\d+))?', - self.dbapi.__version__) - if m: - self.psycopg2_version = tuple( - int(x) - for x in m.group(1, 2, 3) - if x is not None) - - def initialize(self, connection): - super(PGDialect_psycopg2, self).initialize(connection) - self._has_native_hstore = self.use_native_hstore and \ - self._hstore_oids(connection.connection) \ - is not None - self._has_native_json = \ - self.psycopg2_version >= self.FEATURE_VERSION_MAP['native_json'] - self._has_native_jsonb = \ - self.psycopg2_version >= self.FEATURE_VERSION_MAP['native_jsonb'] - - # http://initd.org/psycopg/docs/news.html#what-s-new-in-psycopg-2-0-9 - self.supports_sane_multi_rowcount = \ - self.psycopg2_version >= \ - self.FEATURE_VERSION_MAP['sane_multi_rowcount'] - - @classmethod - def dbapi(cls): - import psycopg2 - return psycopg2 - - @classmethod - def _psycopg2_extensions(cls): - from psycopg2 import extensions - return extensions - - @classmethod - def _psycopg2_extras(cls): - from psycopg2 import extras - return extras - - @util.memoized_property - def _isolation_lookup(self): - extensions = self._psycopg2_extensions() - return { - 'AUTOCOMMIT': extensions.ISOLATION_LEVEL_AUTOCOMMIT, - 'READ COMMITTED': extensions.ISOLATION_LEVEL_READ_COMMITTED, - 'READ UNCOMMITTED': extensions.ISOLATION_LEVEL_READ_UNCOMMITTED, - 'REPEATABLE READ': extensions.ISOLATION_LEVEL_REPEATABLE_READ, - 'SERIALIZABLE': extensions.ISOLATION_LEVEL_SERIALIZABLE - } - - def set_isolation_level(self, connection, level): - try: - level = self._isolation_lookup[level.replace('_', ' ')] - except KeyError: - raise exc.ArgumentError( - "Invalid value '%s' for isolation_level. " - "Valid isolation levels for %s are %s" % - (level, self.name, ", ".join(self._isolation_lookup)) - ) - - connection.set_isolation_level(level) - - def on_connect(self): - extras = self._psycopg2_extras() - extensions = self._psycopg2_extensions() - - fns = [] - if self.client_encoding is not None: - def on_connect(conn): - conn.set_client_encoding(self.client_encoding) - fns.append(on_connect) - - if self.isolation_level is not None: - def on_connect(conn): - self.set_isolation_level(conn, self.isolation_level) - fns.append(on_connect) - - if self.dbapi and self.use_native_uuid: - def on_connect(conn): - extras.register_uuid(None, conn) - fns.append(on_connect) - - if self.dbapi and self.use_native_unicode: - def on_connect(conn): - extensions.register_type(extensions.UNICODE, conn) - extensions.register_type(extensions.UNICODEARRAY, conn) - fns.append(on_connect) - - if self.dbapi and self.use_native_hstore: - def on_connect(conn): - hstore_oids = self._hstore_oids(conn) - if hstore_oids is not None: - oid, array_oid = hstore_oids - kw = {'oid': oid} - if util.py2k: - kw['unicode'] = True - if self.psycopg2_version >= \ - self.FEATURE_VERSION_MAP['array_oid']: - kw['array_oid'] = array_oid - extras.register_hstore(conn, **kw) - fns.append(on_connect) - - if self.dbapi and self._json_deserializer: - def on_connect(conn): - if self._has_native_json: - extras.register_default_json( - conn, loads=self._json_deserializer) - if self._has_native_jsonb: - extras.register_default_jsonb( - conn, loads=self._json_deserializer) - fns.append(on_connect) - - if fns: - def on_connect(conn): - for fn in fns: - fn(conn) - return on_connect - else: - return None - - @util.memoized_instancemethod - def _hstore_oids(self, conn): - if self.psycopg2_version >= self.FEATURE_VERSION_MAP['hstore_adapter']: - extras = self._psycopg2_extras() - oids = extras.HstoreAdapter.get_oids(conn) - if oids is not None and oids[0]: - return oids[0:2] - return None - - def create_connect_args(self, url): - opts = url.translate_connect_args(username='user') - if 'port' in opts: - opts['port'] = int(opts['port']) - opts.update(url.query) - return ([], opts) - - def is_disconnect(self, e, connection, cursor): - if isinstance(e, self.dbapi.Error): - # check the "closed" flag. this might not be - # present on old psycopg2 versions. Also, - # this flag doesn't actually help in a lot of disconnect - # situations, so don't rely on it. - if getattr(connection, 'closed', False): - return True - - # checks based on strings. in the case that .closed - # didn't cut it, fall back onto these. - str_e = str(e).partition("\n")[0] - for msg in [ - # these error messages from libpq: interfaces/libpq/fe-misc.c - # and interfaces/libpq/fe-secure.c. - 'terminating connection', - 'closed the connection', - 'connection not open', - 'could not receive data from server', - 'could not send data to server', - # psycopg2 client errors, psycopg2/conenction.h, - # psycopg2/cursor.h - 'connection already closed', - 'cursor already closed', - # not sure where this path is originally from, it may - # be obsolete. It really says "losed", not "closed". - 'losed the connection unexpectedly', - # these can occur in newer SSL - 'connection has been closed unexpectedly', - 'SSL SYSCALL error: Bad file descriptor', - 'SSL SYSCALL error: EOF detected', - ]: - idx = str_e.find(msg) - if idx >= 0 and '"' not in str_e[:idx]: - return True - return False - -dialect = PGDialect_psycopg2 diff --git a/python/sqlalchemy/dialects/postgresql/psycopg2cffi.py b/python/sqlalchemy/dialects/postgresql/psycopg2cffi.py deleted file mode 100644 index 97f241d2..00000000 --- a/python/sqlalchemy/dialects/postgresql/psycopg2cffi.py +++ /dev/null @@ -1,61 +0,0 @@ -# testing/engines.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php -""" -.. dialect:: postgresql+psycopg2cffi - :name: psycopg2cffi - :dbapi: psycopg2cffi - :connectstring: \ -postgresql+psycopg2cffi://user:password@host:port/dbname\ -[?key=value&key=value...] - :url: http://pypi.python.org/pypi/psycopg2cffi/ - -``psycopg2cffi`` is an adaptation of ``psycopg2``, using CFFI for the C -layer. This makes it suitable for use in e.g. PyPy. Documentation -is as per ``psycopg2``. - -.. versionadded:: 1.0.0 - -.. seealso:: - - :mod:`sqlalchemy.dialects.postgresql.psycopg2` - -""" -from .psycopg2 import PGDialect_psycopg2 - - -class PGDialect_psycopg2cffi(PGDialect_psycopg2): - driver = 'psycopg2cffi' - supports_unicode_statements = True - - # psycopg2cffi's first release is 2.5.0, but reports - # __version__ as 2.4.4. Subsequent releases seem to have - # fixed this. - - FEATURE_VERSION_MAP = dict( - native_json=(2, 4, 4), - native_jsonb=(2, 7, 1), - sane_multi_rowcount=(2, 4, 4), - array_oid=(2, 4, 4), - hstore_adapter=(2, 4, 4) - ) - - @classmethod - def dbapi(cls): - return __import__('psycopg2cffi') - - @classmethod - def _psycopg2_extensions(cls): - root = __import__('psycopg2cffi', fromlist=['extensions']) - return root.extensions - - @classmethod - def _psycopg2_extras(cls): - root = __import__('psycopg2cffi', fromlist=['extras']) - return root.extras - - -dialect = PGDialect_psycopg2cffi diff --git a/python/sqlalchemy/dialects/postgresql/pypostgresql.py b/python/sqlalchemy/dialects/postgresql/pypostgresql.py deleted file mode 100644 index db6d5e16..00000000 --- a/python/sqlalchemy/dialects/postgresql/pypostgresql.py +++ /dev/null @@ -1,97 +0,0 @@ -# postgresql/pypostgresql.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" -.. dialect:: postgresql+pypostgresql - :name: py-postgresql - :dbapi: pypostgresql - :connectstring: postgresql+pypostgresql://user:password@host:port/dbname\ -[?key=value&key=value...] - :url: http://python.projects.pgfoundry.org/ - - -""" -from ... import util -from ... import types as sqltypes -from .base import PGDialect, PGExecutionContext -from ... import processors - - -class PGNumeric(sqltypes.Numeric): - def bind_processor(self, dialect): - return processors.to_str - - def result_processor(self, dialect, coltype): - if self.asdecimal: - return None - else: - return processors.to_float - - -class PGExecutionContext_pypostgresql(PGExecutionContext): - pass - - -class PGDialect_pypostgresql(PGDialect): - driver = 'pypostgresql' - - supports_unicode_statements = True - supports_unicode_binds = True - description_encoding = None - default_paramstyle = 'pyformat' - - # requires trunk version to support sane rowcounts - # TODO: use dbapi version information to set this flag appropriately - supports_sane_rowcount = True - supports_sane_multi_rowcount = False - - execution_ctx_cls = PGExecutionContext_pypostgresql - colspecs = util.update_copy( - PGDialect.colspecs, - { - sqltypes.Numeric: PGNumeric, - - # prevents PGNumeric from being used - sqltypes.Float: sqltypes.Float, - } - ) - - @classmethod - def dbapi(cls): - from postgresql.driver import dbapi20 - return dbapi20 - - _DBAPI_ERROR_NAMES = [ - "Error", - "InterfaceError", "DatabaseError", "DataError", - "OperationalError", "IntegrityError", "InternalError", - "ProgrammingError", "NotSupportedError" - ] - - @util.memoized_property - def dbapi_exception_translation_map(self): - if self.dbapi is None: - return {} - - return dict( - (getattr(self.dbapi, name).__name__, name) - for name in self._DBAPI_ERROR_NAMES - ) - - def create_connect_args(self, url): - opts = url.translate_connect_args(username='user') - if 'port' in opts: - opts['port'] = int(opts['port']) - else: - opts['port'] = 5432 - opts.update(url.query) - return ([], opts) - - def is_disconnect(self, e, connection, cursor): - return "connection is closed" in str(e) - -dialect = PGDialect_pypostgresql diff --git a/python/sqlalchemy/dialects/postgresql/ranges.py b/python/sqlalchemy/dialects/postgresql/ranges.py deleted file mode 100644 index 59c35c87..00000000 --- a/python/sqlalchemy/dialects/postgresql/ranges.py +++ /dev/null @@ -1,168 +0,0 @@ -# Copyright (C) 2013-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -from .base import ischema_names -from ... import types as sqltypes - -__all__ = ('INT4RANGE', 'INT8RANGE', 'NUMRANGE') - - -class RangeOperators(object): - """ - This mixin provides functionality for the Range Operators - listed in Table 9-44 of the `postgres documentation`__ for Range - Functions and Operators. It is used by all the range types - provided in the ``postgres`` dialect and can likely be used for - any range types you create yourself. - - __ http://www.postgresql.org/docs/devel/static/functions-range.html - - No extra support is provided for the Range Functions listed in - Table 9-45 of the postgres documentation. For these, the normal - :func:`~sqlalchemy.sql.expression.func` object should be used. - - .. versionadded:: 0.8.2 Support for Postgresql RANGE operations. - - """ - - class comparator_factory(sqltypes.Concatenable.Comparator): - """Define comparison operations for range types.""" - - def __ne__(self, other): - "Boolean expression. Returns true if two ranges are not equal" - return self.expr.op('<>')(other) - - def contains(self, other, **kw): - """Boolean expression. Returns true if the right hand operand, - which can be an element or a range, is contained within the - column. - """ - return self.expr.op('@>')(other) - - def contained_by(self, other): - """Boolean expression. Returns true if the column is contained - within the right hand operand. - """ - return self.expr.op('<@')(other) - - def overlaps(self, other): - """Boolean expression. Returns true if the column overlaps - (has points in common with) the right hand operand. - """ - return self.expr.op('&&')(other) - - def strictly_left_of(self, other): - """Boolean expression. Returns true if the column is strictly - left of the right hand operand. - """ - return self.expr.op('<<')(other) - - __lshift__ = strictly_left_of - - def strictly_right_of(self, other): - """Boolean expression. Returns true if the column is strictly - right of the right hand operand. - """ - return self.expr.op('>>')(other) - - __rshift__ = strictly_right_of - - def not_extend_right_of(self, other): - """Boolean expression. Returns true if the range in the column - does not extend right of the range in the operand. - """ - return self.expr.op('&<')(other) - - def not_extend_left_of(self, other): - """Boolean expression. Returns true if the range in the column - does not extend left of the range in the operand. - """ - return self.expr.op('&>')(other) - - def adjacent_to(self, other): - """Boolean expression. Returns true if the range in the column - is adjacent to the range in the operand. - """ - return self.expr.op('-|-')(other) - - def __add__(self, other): - """Range expression. Returns the union of the two ranges. - Will raise an exception if the resulting range is not - contigous. - """ - return self.expr.op('+')(other) - - -class INT4RANGE(RangeOperators, sqltypes.TypeEngine): - """Represent the Postgresql INT4RANGE type. - - .. versionadded:: 0.8.2 - - """ - - __visit_name__ = 'INT4RANGE' - -ischema_names['int4range'] = INT4RANGE - - -class INT8RANGE(RangeOperators, sqltypes.TypeEngine): - """Represent the Postgresql INT8RANGE type. - - .. versionadded:: 0.8.2 - - """ - - __visit_name__ = 'INT8RANGE' - -ischema_names['int8range'] = INT8RANGE - - -class NUMRANGE(RangeOperators, sqltypes.TypeEngine): - """Represent the Postgresql NUMRANGE type. - - .. versionadded:: 0.8.2 - - """ - - __visit_name__ = 'NUMRANGE' - -ischema_names['numrange'] = NUMRANGE - - -class DATERANGE(RangeOperators, sqltypes.TypeEngine): - """Represent the Postgresql DATERANGE type. - - .. versionadded:: 0.8.2 - - """ - - __visit_name__ = 'DATERANGE' - -ischema_names['daterange'] = DATERANGE - - -class TSRANGE(RangeOperators, sqltypes.TypeEngine): - """Represent the Postgresql TSRANGE type. - - .. versionadded:: 0.8.2 - - """ - - __visit_name__ = 'TSRANGE' - -ischema_names['tsrange'] = TSRANGE - - -class TSTZRANGE(RangeOperators, sqltypes.TypeEngine): - """Represent the Postgresql TSTZRANGE type. - - .. versionadded:: 0.8.2 - - """ - - __visit_name__ = 'TSTZRANGE' - -ischema_names['tstzrange'] = TSTZRANGE diff --git a/python/sqlalchemy/dialects/postgresql/zxjdbc.py b/python/sqlalchemy/dialects/postgresql/zxjdbc.py deleted file mode 100644 index 1b542152..00000000 --- a/python/sqlalchemy/dialects/postgresql/zxjdbc.py +++ /dev/null @@ -1,46 +0,0 @@ -# postgresql/zxjdbc.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" -.. dialect:: postgresql+zxjdbc - :name: zxJDBC for Jython - :dbapi: zxjdbc - :connectstring: postgresql+zxjdbc://scott:tiger@localhost/db - :driverurl: http://jdbc.postgresql.org/ - - -""" -from ...connectors.zxJDBC import ZxJDBCConnector -from .base import PGDialect, PGExecutionContext - - -class PGExecutionContext_zxjdbc(PGExecutionContext): - - def create_cursor(self): - cursor = self._dbapi_connection.cursor() - cursor.datahandler = self.dialect.DataHandler(cursor.datahandler) - return cursor - - -class PGDialect_zxjdbc(ZxJDBCConnector, PGDialect): - jdbc_db_name = 'postgresql' - jdbc_driver_name = 'org.postgresql.Driver' - - execution_ctx_cls = PGExecutionContext_zxjdbc - - supports_native_decimal = True - - def __init__(self, *args, **kwargs): - super(PGDialect_zxjdbc, self).__init__(*args, **kwargs) - from com.ziclix.python.sql.handler import PostgresqlDataHandler - self.DataHandler = PostgresqlDataHandler - - def _get_server_version_info(self, connection): - parts = connection.connection.dbversion.split('.') - return tuple(int(x) for x in parts) - -dialect = PGDialect_zxjdbc diff --git a/python/sqlalchemy/dialects/sqlite/__init__.py b/python/sqlalchemy/dialects/sqlite/__init__.py deleted file mode 100644 index 608630a2..00000000 --- a/python/sqlalchemy/dialects/sqlite/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# sqlite/__init__.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -from sqlalchemy.dialects.sqlite import base, pysqlite, pysqlcipher - -# default dialect -base.dialect = pysqlite.dialect - -from sqlalchemy.dialects.sqlite.base import ( - BLOB, BOOLEAN, CHAR, DATE, DATETIME, DECIMAL, FLOAT, INTEGER, REAL, - NUMERIC, SMALLINT, TEXT, TIME, TIMESTAMP, VARCHAR, dialect, -) - -__all__ = ('BLOB', 'BOOLEAN', 'CHAR', 'DATE', 'DATETIME', 'DECIMAL', - 'FLOAT', 'INTEGER', 'NUMERIC', 'SMALLINT', 'TEXT', 'TIME', - 'TIMESTAMP', 'VARCHAR', 'REAL', 'dialect') diff --git a/python/sqlalchemy/dialects/sqlite/base.py b/python/sqlalchemy/dialects/sqlite/base.py deleted file mode 100644 index e19047b7..00000000 --- a/python/sqlalchemy/dialects/sqlite/base.py +++ /dev/null @@ -1,1476 +0,0 @@ -# sqlite/base.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" -.. dialect:: sqlite - :name: SQLite - -.. _sqlite_datetime: - -Date and Time Types -------------------- - -SQLite does not have built-in DATE, TIME, or DATETIME types, and pysqlite does -not provide out of the box functionality for translating values between Python -`datetime` objects and a SQLite-supported format. SQLAlchemy's own -:class:`~sqlalchemy.types.DateTime` and related types provide date formatting -and parsing functionality when SQlite is used. The implementation classes are -:class:`~.sqlite.DATETIME`, :class:`~.sqlite.DATE` and :class:`~.sqlite.TIME`. -These types represent dates and times as ISO formatted strings, which also -nicely support ordering. There's no reliance on typical "libc" internals for -these functions so historical dates are fully supported. - -Ensuring Text affinity -^^^^^^^^^^^^^^^^^^^^^^ - -The DDL rendered for these types is the standard ``DATE``, ``TIME`` -and ``DATETIME`` indicators. However, custom storage formats can also be -applied to these types. When the -storage format is detected as containing no alpha characters, the DDL for -these types is rendered as ``DATE_CHAR``, ``TIME_CHAR``, and ``DATETIME_CHAR``, -so that the column continues to have textual affinity. - -.. seealso:: - - `Type Affinity `_ - in the SQLite documentation - -.. _sqlite_autoincrement: - -SQLite Auto Incrementing Behavior ----------------------------------- - -Background on SQLite's autoincrement is at: http://sqlite.org/autoinc.html - -Key concepts: - -* SQLite has an implicit "auto increment" feature that takes place for any - non-composite primary-key column that is specifically created using - "INTEGER PRIMARY KEY" for the type + primary key. - -* SQLite also has an explicit "AUTOINCREMENT" keyword, that is **not** - equivalent to the implicit autoincrement feature; this keyword is not - recommended for general use. SQLAlchemy does not render this keyword - unless a special SQLite-specific directive is used (see below). However, - it still requires that the column's type is named "INTEGER". - -Using the AUTOINCREMENT Keyword -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -To specifically render the AUTOINCREMENT keyword on the primary key column -when rendering DDL, add the flag ``sqlite_autoincrement=True`` to the Table -construct:: - - Table('sometable', metadata, - Column('id', Integer, primary_key=True), - sqlite_autoincrement=True) - -Allowing autoincrement behavior SQLAlchemy types other than Integer/INTEGER -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -SQLite's typing model is based on naming conventions. Among -other things, this means that any type name which contains the -substring ``"INT"`` will be determined to be of "integer affinity". A -type named ``"BIGINT"``, ``"SPECIAL_INT"`` or even ``"XYZINTQPR"``, will be considered by -SQLite to be of "integer" affinity. However, **the SQLite -autoincrement feature, whether implicitly or explicitly enabled, -requires that the name of the column's type -is exactly the string "INTEGER"**. Therefore, if an -application uses a type like :class:`.BigInteger` for a primary key, on -SQLite this type will need to be rendered as the name ``"INTEGER"`` when -emitting the initial ``CREATE TABLE`` statement in order for the autoincrement -behavior to be available. - -One approach to achieve this is to use :class:`.Integer` on SQLite -only using :meth:`.TypeEngine.with_variant`:: - - table = Table( - "my_table", metadata, - Column("id", BigInteger().with_variant(Integer, "sqlite"), primary_key=True) - ) - -Another is to use a subclass of :class:`.BigInteger` that overrides its DDL name -to be ``INTEGER`` when compiled against SQLite:: - - from sqlalchemy import BigInteger - from sqlalchemy.ext.compiler import compiles - - class SLBigInteger(BigInteger): - pass - - @compiles(SLBigInteger, 'sqlite') - def bi_c(element, compiler, **kw): - return "INTEGER" - - @compiles(SLBigInteger) - def bi_c(element, compiler, **kw): - return compiler.visit_BIGINT(element, **kw) - - - table = Table( - "my_table", metadata, - Column("id", SLBigInteger(), primary_key=True) - ) - -.. seealso:: - - :meth:`.TypeEngine.with_variant` - - :ref:`sqlalchemy.ext.compiler_toplevel` - - `Datatypes In SQLite Version 3 `_ - -.. _sqlite_concurrency: - -Database Locking Behavior / Concurrency ---------------------------------------- - -SQLite is not designed for a high level of write concurrency. The database -itself, being a file, is locked completely during write operations within -transactions, meaning exactly one "connection" (in reality a file handle) -has exclusive access to the database during this period - all other -"connections" will be blocked during this time. - -The Python DBAPI specification also calls for a connection model that is -always in a transaction; there is no ``connection.begin()`` method, -only ``connection.commit()`` and ``connection.rollback()``, upon which a -new transaction is to be begun immediately. This may seem to imply -that the SQLite driver would in theory allow only a single filehandle on a -particular database file at any time; however, there are several -factors both within SQlite itself as well as within the pysqlite driver -which loosen this restriction significantly. - -However, no matter what locking modes are used, SQLite will still always -lock the database file once a transaction is started and DML (e.g. INSERT, -UPDATE, DELETE) has at least been emitted, and this will block -other transactions at least at the point that they also attempt to emit DML. -By default, the length of time on this block is very short before it times out -with an error. - -This behavior becomes more critical when used in conjunction with the -SQLAlchemy ORM. SQLAlchemy's :class:`.Session` object by default runs -within a transaction, and with its autoflush model, may emit DML preceding -any SELECT statement. This may lead to a SQLite database that locks -more quickly than is expected. The locking mode of SQLite and the pysqlite -driver can be manipulated to some degree, however it should be noted that -achieving a high degree of write-concurrency with SQLite is a losing battle. - -For more information on SQLite's lack of write concurrency by design, please -see -`Situations Where Another RDBMS May Work Better - High Concurrency -`_ near the bottom of the page. - -The following subsections introduce areas that are impacted by SQLite's -file-based architecture and additionally will usually require workarounds to -work when using the pysqlite driver. - -.. _sqlite_isolation_level: - -Transaction Isolation Level ----------------------------- - -SQLite supports "transaction isolation" in a non-standard way, along two -axes. One is that of the `PRAGMA read_uncommitted `_ -instruction. This setting can essentially switch SQLite between its -default mode of ``SERIALIZABLE`` isolation, and a "dirty read" isolation -mode normally referred to as ``READ UNCOMMITTED``. - -SQLAlchemy ties into this PRAGMA statement using the -:paramref:`.create_engine.isolation_level` parameter of :func:`.create_engine`. -Valid values for this parameter when used with SQLite are ``"SERIALIZABLE"`` -and ``"READ UNCOMMITTED"`` corresponding to a value of 0 and 1, respectively. -SQLite defaults to ``SERIALIZABLE``, however its behavior is impacted by -the pysqlite driver's default behavior. - -The other axis along which SQLite's transactional locking is impacted is -via the nature of the ``BEGIN`` statement used. The three varieties -are "deferred", "immediate", and "exclusive", as described at -`BEGIN TRANSACTION `_. A straight -``BEGIN`` statement uses the "deferred" mode, where the the database file is -not locked until the first read or write operation, and read access remains -open to other transactions until the first write operation. But again, -it is critical to note that the pysqlite driver interferes with this behavior -by *not even emitting BEGIN* until the first write operation. - -.. warning:: - - SQLite's transactional scope is impacted by unresolved - issues in the pysqlite driver, which defers BEGIN statements to a greater - degree than is often feasible. See the section :ref:`pysqlite_serializable` - for techniques to work around this behavior. - -SAVEPOINT Support ----------------------------- - -SQLite supports SAVEPOINTs, which only function once a transaction is -begun. SQLAlchemy's SAVEPOINT support is available using the -:meth:`.Connection.begin_nested` method at the Core level, and -:meth:`.Session.begin_nested` at the ORM level. However, SAVEPOINTs -won't work at all with pysqlite unless workarounds are taken. - -.. warning:: - - SQLite's SAVEPOINT feature is impacted by unresolved - issues in the pysqlite driver, which defers BEGIN statements to a greater - degree than is often feasible. See the section :ref:`pysqlite_serializable` - for techniques to work around this behavior. - -Transactional DDL ----------------------------- - -The SQLite database supports transactional :term:`DDL` as well. -In this case, the pysqlite driver is not only failing to start transactions, -it also is ending any existing transction when DDL is detected, so again, -workarounds are required. - -.. warning:: - - SQLite's transactional DDL is impacted by unresolved issues - in the pysqlite driver, which fails to emit BEGIN and additionally - forces a COMMIT to cancel any transaction when DDL is encountered. - See the section :ref:`pysqlite_serializable` - for techniques to work around this behavior. - -.. _sqlite_foreign_keys: - -Foreign Key Support -------------------- - -SQLite supports FOREIGN KEY syntax when emitting CREATE statements for tables, -however by default these constraints have no effect on the operation of the -table. - -Constraint checking on SQLite has three prerequisites: - -* At least version 3.6.19 of SQLite must be in use -* The SQLite library must be compiled *without* the SQLITE_OMIT_FOREIGN_KEY - or SQLITE_OMIT_TRIGGER symbols enabled. -* The ``PRAGMA foreign_keys = ON`` statement must be emitted on all - connections before use. - -SQLAlchemy allows for the ``PRAGMA`` statement to be emitted automatically for -new connections through the usage of events:: - - from sqlalchemy.engine import Engine - from sqlalchemy import event - - @event.listens_for(Engine, "connect") - def set_sqlite_pragma(dbapi_connection, connection_record): - cursor = dbapi_connection.cursor() - cursor.execute("PRAGMA foreign_keys=ON") - cursor.close() - -.. warning:: - - When SQLite foreign keys are enabled, it is **not possible** - to emit CREATE or DROP statements for tables that contain - mutually-dependent foreign key constraints; - to emit the DDL for these tables requires that ALTER TABLE be used to - create or drop these constraints separately, for which SQLite has - no support. - -.. seealso:: - - `SQLite Foreign Key Support `_ - - on the SQLite web site. - - :ref:`event_toplevel` - SQLAlchemy event API. - - :ref:`use_alter` - more information on SQLAlchemy's facilities for handling - mutually-dependent foreign key constraints. - -.. _sqlite_type_reflection: - -Type Reflection ---------------- - -SQLite types are unlike those of most other database backends, in that -the string name of the type usually does not correspond to a "type" in a -one-to-one fashion. Instead, SQLite links per-column typing behavior -to one of five so-called "type affinities" based on a string matching -pattern for the type. - -SQLAlchemy's reflection process, when inspecting types, uses a simple -lookup table to link the keywords returned to provided SQLAlchemy types. -This lookup table is present within the SQLite dialect as it is for all -other dialects. However, the SQLite dialect has a different "fallback" -routine for when a particular type name is not located in the lookup map; -it instead implements the SQLite "type affinity" scheme located at -http://www.sqlite.org/datatype3.html section 2.1. - -The provided typemap will make direct associations from an exact string -name match for the following types: - -:class:`~.types.BIGINT`, :class:`~.types.BLOB`, -:class:`~.types.BOOLEAN`, :class:`~.types.BOOLEAN`, -:class:`~.types.CHAR`, :class:`~.types.DATE`, -:class:`~.types.DATETIME`, :class:`~.types.FLOAT`, -:class:`~.types.DECIMAL`, :class:`~.types.FLOAT`, -:class:`~.types.INTEGER`, :class:`~.types.INTEGER`, -:class:`~.types.NUMERIC`, :class:`~.types.REAL`, -:class:`~.types.SMALLINT`, :class:`~.types.TEXT`, -:class:`~.types.TIME`, :class:`~.types.TIMESTAMP`, -:class:`~.types.VARCHAR`, :class:`~.types.NVARCHAR`, -:class:`~.types.NCHAR` - -When a type name does not match one of the above types, the "type affinity" -lookup is used instead: - -* :class:`~.types.INTEGER` is returned if the type name includes the - string ``INT`` -* :class:`~.types.TEXT` is returned if the type name includes the - string ``CHAR``, ``CLOB`` or ``TEXT`` -* :class:`~.types.NullType` is returned if the type name includes the - string ``BLOB`` -* :class:`~.types.REAL` is returned if the type name includes the string - ``REAL``, ``FLOA`` or ``DOUB``. -* Otherwise, the :class:`~.types.NUMERIC` type is used. - -.. versionadded:: 0.9.3 Support for SQLite type affinity rules when reflecting - columns. - - -.. _sqlite_partial_index: - -Partial Indexes ---------------- - -A partial index, e.g. one which uses a WHERE clause, can be specified -with the DDL system using the argument ``sqlite_where``:: - - tbl = Table('testtbl', m, Column('data', Integer)) - idx = Index('test_idx1', tbl.c.data, - sqlite_where=and_(tbl.c.data > 5, tbl.c.data < 10)) - -The index will be rendered at create time as:: - - CREATE INDEX test_idx1 ON testtbl (data) - WHERE data > 5 AND data < 10 - -.. versionadded:: 0.9.9 - -Dotted Column Names -------------------- - -Using table or column names that explicitly have periods in them is -**not recommended**. While this is generally a bad idea for relational -databases in general, as the dot is a syntactically significant character, -the SQLite driver has a bug which requires that SQLAlchemy filter out these -dots in result sets. - -The bug, entirely outside of SQLAlchemy, can be illustrated thusly:: - - import sqlite3 - - conn = sqlite3.connect(":memory:") - cursor = conn.cursor() - - cursor.execute("create table x (a integer, b integer)") - cursor.execute("insert into x (a, b) values (1, 1)") - cursor.execute("insert into x (a, b) values (2, 2)") - - cursor.execute("select x.a, x.b from x") - assert [c[0] for c in cursor.description] == ['a', 'b'] - - cursor.execute(''' - select x.a, x.b from x where a=1 - union - select x.a, x.b from x where a=2 - ''') - assert [c[0] for c in cursor.description] == ['a', 'b'], \\ - [c[0] for c in cursor.description] - -The second assertion fails:: - - Traceback (most recent call last): - File "test.py", line 19, in - [c[0] for c in cursor.description] - AssertionError: ['x.a', 'x.b'] - -Where above, the driver incorrectly reports the names of the columns -including the name of the table, which is entirely inconsistent vs. -when the UNION is not present. - -SQLAlchemy relies upon column names being predictable in how they match -to the original statement, so the SQLAlchemy dialect has no choice but -to filter these out:: - - - from sqlalchemy import create_engine - - eng = create_engine("sqlite://") - conn = eng.connect() - - conn.execute("create table x (a integer, b integer)") - conn.execute("insert into x (a, b) values (1, 1)") - conn.execute("insert into x (a, b) values (2, 2)") - - result = conn.execute("select x.a, x.b from x") - assert result.keys() == ["a", "b"] - - result = conn.execute(''' - select x.a, x.b from x where a=1 - union - select x.a, x.b from x where a=2 - ''') - assert result.keys() == ["a", "b"] - -Note that above, even though SQLAlchemy filters out the dots, *both -names are still addressable*:: - - >>> row = result.first() - >>> row["a"] - 1 - >>> row["x.a"] - 1 - >>> row["b"] - 1 - >>> row["x.b"] - 1 - -Therefore, the workaround applied by SQLAlchemy only impacts -:meth:`.ResultProxy.keys` and :meth:`.RowProxy.keys()` in the public API. -In the very specific case where -an application is forced to use column names that contain dots, and the -functionality of :meth:`.ResultProxy.keys` and :meth:`.RowProxy.keys()` -is required to return these dotted names unmodified, the ``sqlite_raw_colnames`` -execution option may be provided, either on a per-:class:`.Connection` basis:: - - result = conn.execution_options(sqlite_raw_colnames=True).execute(''' - select x.a, x.b from x where a=1 - union - select x.a, x.b from x where a=2 - ''') - assert result.keys() == ["x.a", "x.b"] - -or on a per-:class:`.Engine` basis:: - - engine = create_engine("sqlite://", execution_options={"sqlite_raw_colnames": True}) - -When using the per-:class:`.Engine` execution option, note that -**Core and ORM queries that use UNION may not function properly**. - -""" - -import datetime -import re - -from ... import processors -from ... import sql, exc -from ... import types as sqltypes, schema as sa_schema -from ... import util -from ...engine import default, reflection -from ...sql import compiler - -from ...types import (BLOB, BOOLEAN, CHAR, DECIMAL, FLOAT, - INTEGER, REAL, NUMERIC, SMALLINT, TEXT, - TIMESTAMP, VARCHAR) - - -class _DateTimeMixin(object): - _reg = None - _storage_format = None - - def __init__(self, storage_format=None, regexp=None, **kw): - super(_DateTimeMixin, self).__init__(**kw) - if regexp is not None: - self._reg = re.compile(regexp) - if storage_format is not None: - self._storage_format = storage_format - - @property - def format_is_text_affinity(self): - """return True if the storage format will automatically imply - a TEXT affinity. - - If the storage format contains no non-numeric characters, - it will imply a NUMERIC storage format on SQLite; in this case, - the type will generate its DDL as DATE_CHAR, DATETIME_CHAR, - TIME_CHAR. - - .. versionadded:: 1.0.0 - - """ - spec = self._storage_format % { - "year": 0, "month": 0, "day": 0, "hour": 0, - "minute": 0, "second": 0, "microsecond": 0 - } - return bool(re.search(r'[^0-9]', spec)) - - def adapt(self, cls, **kw): - if issubclass(cls, _DateTimeMixin): - if self._storage_format: - kw["storage_format"] = self._storage_format - if self._reg: - kw["regexp"] = self._reg - return super(_DateTimeMixin, self).adapt(cls, **kw) - - def literal_processor(self, dialect): - bp = self.bind_processor(dialect) - - def process(value): - return "'%s'" % bp(value) - return process - - -class DATETIME(_DateTimeMixin, sqltypes.DateTime): - """Represent a Python datetime object in SQLite using a string. - - The default string storage format is:: - - "%(year)04d-%(month)02d-%(day)02d %(hour)02d:%(min)02d:\ -%(second)02d.%(microsecond)06d" - - e.g.:: - - 2011-03-15 12:05:57.10558 - - The storage format can be customized to some degree using the - ``storage_format`` and ``regexp`` parameters, such as:: - - import re - from sqlalchemy.dialects.sqlite import DATETIME - - dt = DATETIME( - storage_format="%(year)04d/%(month)02d/%(day)02d %(hour)02d:\ -%(min)02d:%(second)02d", - regexp=r"(\d+)/(\d+)/(\d+) (\d+)-(\d+)-(\d+)" - ) - - :param storage_format: format string which will be applied to the dict - with keys year, month, day, hour, minute, second, and microsecond. - - :param regexp: regular expression which will be applied to incoming result - rows. If the regexp contains named groups, the resulting match dict is - applied to the Python datetime() constructor as keyword arguments. - Otherwise, if positional groups are used, the datetime() constructor - is called with positional arguments via - ``*map(int, match_obj.groups(0))``. - """ - - _storage_format = ( - "%(year)04d-%(month)02d-%(day)02d " - "%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d" - ) - - def __init__(self, *args, **kwargs): - truncate_microseconds = kwargs.pop('truncate_microseconds', False) - super(DATETIME, self).__init__(*args, **kwargs) - if truncate_microseconds: - assert 'storage_format' not in kwargs, "You can specify only "\ - "one of truncate_microseconds or storage_format." - assert 'regexp' not in kwargs, "You can specify only one of "\ - "truncate_microseconds or regexp." - self._storage_format = ( - "%(year)04d-%(month)02d-%(day)02d " - "%(hour)02d:%(minute)02d:%(second)02d" - ) - - def bind_processor(self, dialect): - datetime_datetime = datetime.datetime - datetime_date = datetime.date - format = self._storage_format - - def process(value): - if value is None: - return None - elif isinstance(value, datetime_datetime): - return format % { - 'year': value.year, - 'month': value.month, - 'day': value.day, - 'hour': value.hour, - 'minute': value.minute, - 'second': value.second, - 'microsecond': value.microsecond, - } - elif isinstance(value, datetime_date): - return format % { - 'year': value.year, - 'month': value.month, - 'day': value.day, - 'hour': 0, - 'minute': 0, - 'second': 0, - 'microsecond': 0, - } - else: - raise TypeError("SQLite DateTime type only accepts Python " - "datetime and date objects as input.") - return process - - def result_processor(self, dialect, coltype): - if self._reg: - return processors.str_to_datetime_processor_factory( - self._reg, datetime.datetime) - else: - return processors.str_to_datetime - - -class DATE(_DateTimeMixin, sqltypes.Date): - """Represent a Python date object in SQLite using a string. - - The default string storage format is:: - - "%(year)04d-%(month)02d-%(day)02d" - - e.g.:: - - 2011-03-15 - - The storage format can be customized to some degree using the - ``storage_format`` and ``regexp`` parameters, such as:: - - import re - from sqlalchemy.dialects.sqlite import DATE - - d = DATE( - storage_format="%(month)02d/%(day)02d/%(year)04d", - regexp=re.compile("(?P\d+)/(?P\d+)/(?P\d+)") - ) - - :param storage_format: format string which will be applied to the - dict with keys year, month, and day. - - :param regexp: regular expression which will be applied to - incoming result rows. If the regexp contains named groups, the - resulting match dict is applied to the Python date() constructor - as keyword arguments. Otherwise, if positional groups are used, the - date() constructor is called with positional arguments via - ``*map(int, match_obj.groups(0))``. - """ - - _storage_format = "%(year)04d-%(month)02d-%(day)02d" - - def bind_processor(self, dialect): - datetime_date = datetime.date - format = self._storage_format - - def process(value): - if value is None: - return None - elif isinstance(value, datetime_date): - return format % { - 'year': value.year, - 'month': value.month, - 'day': value.day, - } - else: - raise TypeError("SQLite Date type only accepts Python " - "date objects as input.") - return process - - def result_processor(self, dialect, coltype): - if self._reg: - return processors.str_to_datetime_processor_factory( - self._reg, datetime.date) - else: - return processors.str_to_date - - -class TIME(_DateTimeMixin, sqltypes.Time): - """Represent a Python time object in SQLite using a string. - - The default string storage format is:: - - "%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d" - - e.g.:: - - 12:05:57.10558 - - The storage format can be customized to some degree using the - ``storage_format`` and ``regexp`` parameters, such as:: - - import re - from sqlalchemy.dialects.sqlite import TIME - - t = TIME( - storage_format="%(hour)02d-%(minute)02d-%(second)02d-\ -%(microsecond)06d", - regexp=re.compile("(\d+)-(\d+)-(\d+)-(?:-(\d+))?") - ) - - :param storage_format: format string which will be applied to the dict - with keys hour, minute, second, and microsecond. - - :param regexp: regular expression which will be applied to incoming result - rows. If the regexp contains named groups, the resulting match dict is - applied to the Python time() constructor as keyword arguments. Otherwise, - if positional groups are used, the time() constructor is called with - positional arguments via ``*map(int, match_obj.groups(0))``. - """ - - _storage_format = "%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d" - - def __init__(self, *args, **kwargs): - truncate_microseconds = kwargs.pop('truncate_microseconds', False) - super(TIME, self).__init__(*args, **kwargs) - if truncate_microseconds: - assert 'storage_format' not in kwargs, "You can specify only "\ - "one of truncate_microseconds or storage_format." - assert 'regexp' not in kwargs, "You can specify only one of "\ - "truncate_microseconds or regexp." - self._storage_format = "%(hour)02d:%(minute)02d:%(second)02d" - - def bind_processor(self, dialect): - datetime_time = datetime.time - format = self._storage_format - - def process(value): - if value is None: - return None - elif isinstance(value, datetime_time): - return format % { - 'hour': value.hour, - 'minute': value.minute, - 'second': value.second, - 'microsecond': value.microsecond, - } - else: - raise TypeError("SQLite Time type only accepts Python " - "time objects as input.") - return process - - def result_processor(self, dialect, coltype): - if self._reg: - return processors.str_to_datetime_processor_factory( - self._reg, datetime.time) - else: - return processors.str_to_time - -colspecs = { - sqltypes.Date: DATE, - sqltypes.DateTime: DATETIME, - sqltypes.Time: TIME, -} - -ischema_names = { - 'BIGINT': sqltypes.BIGINT, - 'BLOB': sqltypes.BLOB, - 'BOOL': sqltypes.BOOLEAN, - 'BOOLEAN': sqltypes.BOOLEAN, - 'CHAR': sqltypes.CHAR, - 'DATE': sqltypes.DATE, - 'DATE_CHAR': sqltypes.DATE, - 'DATETIME': sqltypes.DATETIME, - 'DATETIME_CHAR': sqltypes.DATETIME, - 'DOUBLE': sqltypes.FLOAT, - 'DECIMAL': sqltypes.DECIMAL, - 'FLOAT': sqltypes.FLOAT, - 'INT': sqltypes.INTEGER, - 'INTEGER': sqltypes.INTEGER, - 'NUMERIC': sqltypes.NUMERIC, - 'REAL': sqltypes.REAL, - 'SMALLINT': sqltypes.SMALLINT, - 'TEXT': sqltypes.TEXT, - 'TIME': sqltypes.TIME, - 'TIME_CHAR': sqltypes.TIME, - 'TIMESTAMP': sqltypes.TIMESTAMP, - 'VARCHAR': sqltypes.VARCHAR, - 'NVARCHAR': sqltypes.NVARCHAR, - 'NCHAR': sqltypes.NCHAR, -} - - -class SQLiteCompiler(compiler.SQLCompiler): - extract_map = util.update_copy( - compiler.SQLCompiler.extract_map, - { - 'month': '%m', - 'day': '%d', - 'year': '%Y', - 'second': '%S', - 'hour': '%H', - 'doy': '%j', - 'minute': '%M', - 'epoch': '%s', - 'dow': '%w', - 'week': '%W', - }) - - def visit_now_func(self, fn, **kw): - return "CURRENT_TIMESTAMP" - - def visit_localtimestamp_func(self, func, **kw): - return 'DATETIME(CURRENT_TIMESTAMP, "localtime")' - - def visit_true(self, expr, **kw): - return '1' - - def visit_false(self, expr, **kw): - return '0' - - def visit_char_length_func(self, fn, **kw): - return "length%s" % self.function_argspec(fn) - - def visit_cast(self, cast, **kwargs): - if self.dialect.supports_cast: - return super(SQLiteCompiler, self).visit_cast(cast, **kwargs) - else: - return self.process(cast.clause, **kwargs) - - def visit_extract(self, extract, **kw): - try: - return "CAST(STRFTIME('%s', %s) AS INTEGER)" % ( - self.extract_map[extract.field], - self.process(extract.expr, **kw) - ) - except KeyError: - raise exc.CompileError( - "%s is not a valid extract argument." % extract.field) - - def limit_clause(self, select, **kw): - text = "" - if select._limit_clause is not None: - text += "\n LIMIT " + self.process(select._limit_clause, **kw) - if select._offset_clause is not None: - if select._limit_clause is None: - text += "\n LIMIT " + self.process(sql.literal(-1)) - text += " OFFSET " + self.process(select._offset_clause, **kw) - else: - text += " OFFSET " + self.process(sql.literal(0), **kw) - return text - - def for_update_clause(self, select, **kw): - # sqlite has no "FOR UPDATE" AFAICT - return '' - - -class SQLiteDDLCompiler(compiler.DDLCompiler): - - def get_column_specification(self, column, **kwargs): - coltype = self.dialect.type_compiler.process( - column.type, type_expression=column) - colspec = self.preparer.format_column(column) + " " + coltype - default = self.get_column_default_string(column) - if default is not None: - colspec += " DEFAULT " + default - - if not column.nullable: - colspec += " NOT NULL" - - if (column.primary_key and - column.table.dialect_options['sqlite']['autoincrement'] and - len(column.table.primary_key.columns) == 1 and - issubclass(column.type._type_affinity, sqltypes.Integer) and - not column.foreign_keys): - colspec += " PRIMARY KEY AUTOINCREMENT" - - return colspec - - def visit_primary_key_constraint(self, constraint): - # for columns with sqlite_autoincrement=True, - # the PRIMARY KEY constraint can only be inline - # with the column itself. - if len(constraint.columns) == 1: - c = list(constraint)[0] - if (c.primary_key and - c.table.dialect_options['sqlite']['autoincrement'] and - issubclass(c.type._type_affinity, sqltypes.Integer) and - not c.foreign_keys): - return None - - return super(SQLiteDDLCompiler, self).visit_primary_key_constraint( - constraint) - - def visit_foreign_key_constraint(self, constraint): - - local_table = constraint.elements[0].parent.table - remote_table = constraint.elements[0].column.table - - if local_table.schema != remote_table.schema: - return None - else: - return super( - SQLiteDDLCompiler, - self).visit_foreign_key_constraint(constraint) - - def define_constraint_remote_table(self, constraint, table, preparer): - """Format the remote table clause of a CREATE CONSTRAINT clause.""" - - return preparer.format_table(table, use_schema=False) - - def visit_create_index(self, create): - index = create.element - - text = super(SQLiteDDLCompiler, self).visit_create_index( - create, include_table_schema=False) - - whereclause = index.dialect_options["sqlite"]["where"] - if whereclause is not None: - where_compiled = self.sql_compiler.process( - whereclause, include_table=False, - literal_binds=True) - text += " WHERE " + where_compiled - - return text - - -class SQLiteTypeCompiler(compiler.GenericTypeCompiler): - def visit_large_binary(self, type_, **kw): - return self.visit_BLOB(type_) - - def visit_DATETIME(self, type_, **kw): - if not isinstance(type_, _DateTimeMixin) or \ - type_.format_is_text_affinity: - return super(SQLiteTypeCompiler, self).visit_DATETIME(type_) - else: - return "DATETIME_CHAR" - - def visit_DATE(self, type_, **kw): - if not isinstance(type_, _DateTimeMixin) or \ - type_.format_is_text_affinity: - return super(SQLiteTypeCompiler, self).visit_DATE(type_) - else: - return "DATE_CHAR" - - def visit_TIME(self, type_, **kw): - if not isinstance(type_, _DateTimeMixin) or \ - type_.format_is_text_affinity: - return super(SQLiteTypeCompiler, self).visit_TIME(type_) - else: - return "TIME_CHAR" - - -class SQLiteIdentifierPreparer(compiler.IdentifierPreparer): - reserved_words = set([ - 'add', 'after', 'all', 'alter', 'analyze', 'and', 'as', 'asc', - 'attach', 'autoincrement', 'before', 'begin', 'between', 'by', - 'cascade', 'case', 'cast', 'check', 'collate', 'column', 'commit', - 'conflict', 'constraint', 'create', 'cross', 'current_date', - 'current_time', 'current_timestamp', 'database', 'default', - 'deferrable', 'deferred', 'delete', 'desc', 'detach', 'distinct', - 'drop', 'each', 'else', 'end', 'escape', 'except', 'exclusive', - 'explain', 'false', 'fail', 'for', 'foreign', 'from', 'full', 'glob', - 'group', 'having', 'if', 'ignore', 'immediate', 'in', 'index', - 'indexed', 'initially', 'inner', 'insert', 'instead', 'intersect', - 'into', 'is', 'isnull', 'join', 'key', 'left', 'like', 'limit', - 'match', 'natural', 'not', 'notnull', 'null', 'of', 'offset', 'on', - 'or', 'order', 'outer', 'plan', 'pragma', 'primary', 'query', - 'raise', 'references', 'reindex', 'rename', 'replace', 'restrict', - 'right', 'rollback', 'row', 'select', 'set', 'table', 'temp', - 'temporary', 'then', 'to', 'transaction', 'trigger', 'true', 'union', - 'unique', 'update', 'using', 'vacuum', 'values', 'view', 'virtual', - 'when', 'where', - ]) - - def format_index(self, index, use_schema=True, name=None): - """Prepare a quoted index and schema name.""" - - if name is None: - name = index.name - result = self.quote(name, index.quote) - if (not self.omit_schema and - use_schema and - getattr(index.table, "schema", None)): - result = self.quote_schema( - index.table.schema, index.table.quote_schema) + "." + result - return result - - -class SQLiteExecutionContext(default.DefaultExecutionContext): - @util.memoized_property - def _preserve_raw_colnames(self): - return self.execution_options.get("sqlite_raw_colnames", False) - - def _translate_colname(self, colname): - # adjust for dotted column names. SQLite - # in the case of UNION may store col names as - # "tablename.colname", or if using an attached database, - # "database.tablename.colname", in cursor.description - if not self._preserve_raw_colnames and "." in colname: - return colname.split(".")[-1], colname - else: - return colname, None - - -class SQLiteDialect(default.DefaultDialect): - name = 'sqlite' - supports_alter = False - supports_unicode_statements = True - supports_unicode_binds = True - supports_default_values = True - supports_empty_insert = False - supports_cast = True - supports_multivalues_insert = True - supports_right_nested_joins = False - - default_paramstyle = 'qmark' - execution_ctx_cls = SQLiteExecutionContext - statement_compiler = SQLiteCompiler - ddl_compiler = SQLiteDDLCompiler - type_compiler = SQLiteTypeCompiler - preparer = SQLiteIdentifierPreparer - ischema_names = ischema_names - colspecs = colspecs - isolation_level = None - - supports_cast = True - supports_default_values = True - - construct_arguments = [ - (sa_schema.Table, { - "autoincrement": False - }), - (sa_schema.Index, { - "where": None, - }), - ] - - _broken_fk_pragma_quotes = False - - def __init__(self, isolation_level=None, native_datetime=False, **kwargs): - default.DefaultDialect.__init__(self, **kwargs) - self.isolation_level = isolation_level - - # this flag used by pysqlite dialect, and perhaps others in the - # future, to indicate the driver is handling date/timestamp - # conversions (and perhaps datetime/time as well on some hypothetical - # driver ?) - self.native_datetime = native_datetime - - if self.dbapi is not None: - self.supports_default_values = ( - self.dbapi.sqlite_version_info >= (3, 3, 8)) - self.supports_cast = ( - self.dbapi.sqlite_version_info >= (3, 2, 3)) - self.supports_multivalues_insert = ( - # http://www.sqlite.org/releaselog/3_7_11.html - self.dbapi.sqlite_version_info >= (3, 7, 11)) - # see http://www.sqlalchemy.org/trac/ticket/2568 - # as well as http://www.sqlite.org/src/info/600482d161 - self._broken_fk_pragma_quotes = ( - self.dbapi.sqlite_version_info < (3, 6, 14)) - - _isolation_lookup = { - 'READ UNCOMMITTED': 1, - 'SERIALIZABLE': 0, - } - - def set_isolation_level(self, connection, level): - try: - isolation_level = self._isolation_lookup[level.replace('_', ' ')] - except KeyError: - raise exc.ArgumentError( - "Invalid value '%s' for isolation_level. " - "Valid isolation levels for %s are %s" % - (level, self.name, ", ".join(self._isolation_lookup)) - ) - cursor = connection.cursor() - cursor.execute("PRAGMA read_uncommitted = %d" % isolation_level) - cursor.close() - - def get_isolation_level(self, connection): - cursor = connection.cursor() - cursor.execute('PRAGMA read_uncommitted') - res = cursor.fetchone() - if res: - value = res[0] - else: - # http://www.sqlite.org/changes.html#version_3_3_3 - # "Optional READ UNCOMMITTED isolation (instead of the - # default isolation level of SERIALIZABLE) and - # table level locking when database connections - # share a common cache."" - # pre-SQLite 3.3.0 default to 0 - value = 0 - cursor.close() - if value == 0: - return "SERIALIZABLE" - elif value == 1: - return "READ UNCOMMITTED" - else: - assert False, "Unknown isolation level %s" % value - - def on_connect(self): - if self.isolation_level is not None: - def connect(conn): - self.set_isolation_level(conn, self.isolation_level) - return connect - else: - return None - - @reflection.cache - def get_table_names(self, connection, schema=None, **kw): - if schema is not None: - qschema = self.identifier_preparer.quote_identifier(schema) - master = '%s.sqlite_master' % qschema - else: - master = "sqlite_master" - s = ("SELECT name FROM %s " - "WHERE type='table' ORDER BY name") % (master,) - rs = connection.execute(s) - return [row[0] for row in rs] - - @reflection.cache - def get_temp_table_names(self, connection, **kw): - s = "SELECT name FROM sqlite_temp_master "\ - "WHERE type='table' ORDER BY name " - rs = connection.execute(s) - - return [row[0] for row in rs] - - @reflection.cache - def get_temp_view_names(self, connection, **kw): - s = "SELECT name FROM sqlite_temp_master "\ - "WHERE type='view' ORDER BY name " - rs = connection.execute(s) - - return [row[0] for row in rs] - - def has_table(self, connection, table_name, schema=None): - info = self._get_table_pragma( - connection, "table_info", table_name, schema=schema) - return bool(info) - - @reflection.cache - def get_view_names(self, connection, schema=None, **kw): - if schema is not None: - qschema = self.identifier_preparer.quote_identifier(schema) - master = '%s.sqlite_master' % qschema - else: - master = "sqlite_master" - s = ("SELECT name FROM %s " - "WHERE type='view' ORDER BY name") % (master,) - rs = connection.execute(s) - - return [row[0] for row in rs] - - @reflection.cache - def get_view_definition(self, connection, view_name, schema=None, **kw): - if schema is not None: - qschema = self.identifier_preparer.quote_identifier(schema) - master = '%s.sqlite_master' % qschema - s = ("SELECT sql FROM %s WHERE name = '%s'" - "AND type='view'") % (master, view_name) - rs = connection.execute(s) - else: - try: - s = ("SELECT sql FROM " - " (SELECT * FROM sqlite_master UNION ALL " - " SELECT * FROM sqlite_temp_master) " - "WHERE name = '%s' " - "AND type='view'") % view_name - rs = connection.execute(s) - except exc.DBAPIError: - s = ("SELECT sql FROM sqlite_master WHERE name = '%s' " - "AND type='view'") % view_name - rs = connection.execute(s) - - result = rs.fetchall() - if result: - return result[0].sql - - @reflection.cache - def get_columns(self, connection, table_name, schema=None, **kw): - info = self._get_table_pragma( - connection, "table_info", table_name, schema=schema) - - columns = [] - for row in info: - (name, type_, nullable, default, primary_key) = ( - row[1], row[2].upper(), not row[3], row[4], row[5]) - - columns.append(self._get_column_info(name, type_, nullable, - default, primary_key)) - return columns - - def _get_column_info(self, name, type_, nullable, default, primary_key): - coltype = self._resolve_type_affinity(type_) - - if default is not None: - default = util.text_type(default) - - return { - 'name': name, - 'type': coltype, - 'nullable': nullable, - 'default': default, - 'autoincrement': default is None, - 'primary_key': primary_key, - } - - def _resolve_type_affinity(self, type_): - """Return a data type from a reflected column, using affinity tules. - - SQLite's goal for universal compatibility introduces some complexity - during reflection, as a column's defined type might not actually be a - type that SQLite understands - or indeed, my not be defined *at all*. - Internally, SQLite handles this with a 'data type affinity' for each - column definition, mapping to one of 'TEXT', 'NUMERIC', 'INTEGER', - 'REAL', or 'NONE' (raw bits). The algorithm that determines this is - listed in http://www.sqlite.org/datatype3.html section 2.1. - - This method allows SQLAlchemy to support that algorithm, while still - providing access to smarter reflection utilities by regcognizing - column definitions that SQLite only supports through affinity (like - DATE and DOUBLE). - - """ - match = re.match(r'([\w ]+)(\(.*?\))?', type_) - if match: - coltype = match.group(1) - args = match.group(2) - else: - coltype = '' - args = '' - - if coltype in self.ischema_names: - coltype = self.ischema_names[coltype] - elif 'INT' in coltype: - coltype = sqltypes.INTEGER - elif 'CHAR' in coltype or 'CLOB' in coltype or 'TEXT' in coltype: - coltype = sqltypes.TEXT - elif 'BLOB' in coltype or not coltype: - coltype = sqltypes.NullType - elif 'REAL' in coltype or 'FLOA' in coltype or 'DOUB' in coltype: - coltype = sqltypes.REAL - else: - coltype = sqltypes.NUMERIC - - if args is not None: - args = re.findall(r'(\d+)', args) - try: - coltype = coltype(*[int(a) for a in args]) - except TypeError: - util.warn( - "Could not instantiate type %s with " - "reflected arguments %s; using no arguments." % - (coltype, args)) - coltype = coltype() - else: - coltype = coltype() - - return coltype - - @reflection.cache - def get_pk_constraint(self, connection, table_name, schema=None, **kw): - cols = self.get_columns(connection, table_name, schema, **kw) - pkeys = [] - for col in cols: - if col['primary_key']: - pkeys.append(col['name']) - return {'constrained_columns': pkeys, 'name': None} - - @reflection.cache - def get_foreign_keys(self, connection, table_name, schema=None, **kw): - # sqlite makes this *extremely difficult*. - # First, use the pragma to get the actual FKs. - pragma_fks = self._get_table_pragma( - connection, "foreign_key_list", - table_name, schema=schema - ) - - fks = {} - - for row in pragma_fks: - (numerical_id, rtbl, lcol, rcol) = ( - row[0], row[2], row[3], row[4]) - - if rcol is None: - rcol = lcol - - if self._broken_fk_pragma_quotes: - rtbl = re.sub(r'^[\"\[`\']|[\"\]`\']$', '', rtbl) - - if numerical_id in fks: - fk = fks[numerical_id] - else: - fk = fks[numerical_id] = { - 'name': None, - 'constrained_columns': [], - 'referred_schema': None, - 'referred_table': rtbl, - 'referred_columns': [], - } - fks[numerical_id] = fk - - fk['constrained_columns'].append(lcol) - fk['referred_columns'].append(rcol) - - def fk_sig(constrained_columns, referred_table, referred_columns): - return tuple(constrained_columns) + (referred_table,) + \ - tuple(referred_columns) - - # then, parse the actual SQL and attempt to find DDL that matches - # the names as well. SQLite saves the DDL in whatever format - # it was typed in as, so need to be liberal here. - - keys_by_signature = dict( - ( - fk_sig( - fk['constrained_columns'], - fk['referred_table'], fk['referred_columns']), - fk - ) for fk in fks.values() - ) - - table_data = self._get_table_sql(connection, table_name, schema=schema) - if table_data is None: - # system tables, etc. - return [] - - def parse_fks(): - FK_PATTERN = ( - '(?:CONSTRAINT (\w+) +)?' - 'FOREIGN KEY *\( *(.+?) *\) +' - 'REFERENCES +(?:(?:"(.+?)")|([a-z0-9_]+)) *\((.+?)\)' - ) - - for match in re.finditer(FK_PATTERN, table_data, re.I): - ( - constraint_name, constrained_columns, - referred_quoted_name, referred_name, - referred_columns) = match.group(1, 2, 3, 4, 5) - constrained_columns = list( - self._find_cols_in_sig(constrained_columns)) - if not referred_columns: - referred_columns = constrained_columns - else: - referred_columns = list( - self._find_cols_in_sig(referred_columns)) - referred_name = referred_quoted_name or referred_name - yield ( - constraint_name, constrained_columns, - referred_name, referred_columns) - fkeys = [] - - for ( - constraint_name, constrained_columns, - referred_name, referred_columns) in parse_fks(): - sig = fk_sig( - constrained_columns, referred_name, referred_columns) - if sig not in keys_by_signature: - util.warn( - "WARNING: SQL-parsed foreign key constraint " - "'%s' could not be located in PRAGMA " - "foreign_keys for table %s" % ( - sig, - table_name - )) - continue - key = keys_by_signature.pop(sig) - key['name'] = constraint_name - fkeys.append(key) - # assume the remainders are the unnamed, inline constraints, just - # use them as is as it's extremely difficult to parse inline - # constraints - fkeys.extend(keys_by_signature.values()) - return fkeys - - def _find_cols_in_sig(self, sig): - for match in re.finditer(r'(?:"(.+?)")|([a-z0-9_]+)', sig, re.I): - yield match.group(1) or match.group(2) - - @reflection.cache - def get_unique_constraints(self, connection, table_name, - schema=None, **kw): - - auto_index_by_sig = {} - for idx in self.get_indexes( - connection, table_name, schema=schema, - include_auto_indexes=True, **kw): - if not idx['name'].startswith("sqlite_autoindex"): - continue - sig = tuple(idx['column_names']) - auto_index_by_sig[sig] = idx - - table_data = self._get_table_sql( - connection, table_name, schema=schema, **kw) - if not table_data: - return [] - - unique_constraints = [] - - def parse_uqs(): - UNIQUE_PATTERN = '(?:CONSTRAINT "?(.+?)"? +)?UNIQUE *\((.+?)\)' - INLINE_UNIQUE_PATTERN = ( - '(?:(".+?")|([a-z0-9]+)) ' - '+[a-z0-9_ ]+? +UNIQUE') - - for match in re.finditer(UNIQUE_PATTERN, table_data, re.I): - name, cols = match.group(1, 2) - yield name, list(self._find_cols_in_sig(cols)) - - # we need to match inlines as well, as we seek to differentiate - # a UNIQUE constraint from a UNIQUE INDEX, even though these - # are kind of the same thing :) - for match in re.finditer(INLINE_UNIQUE_PATTERN, table_data, re.I): - cols = list( - self._find_cols_in_sig(match.group(1) or match.group(2))) - yield None, cols - - for name, cols in parse_uqs(): - sig = tuple(cols) - if sig in auto_index_by_sig: - auto_index_by_sig.pop(sig) - parsed_constraint = { - 'name': name, - 'column_names': cols - } - unique_constraints.append(parsed_constraint) - # NOTE: auto_index_by_sig might not be empty here, - # the PRIMARY KEY may have an entry. - return unique_constraints - - @reflection.cache - def get_indexes(self, connection, table_name, schema=None, **kw): - pragma_indexes = self._get_table_pragma( - connection, "index_list", table_name, schema=schema) - indexes = [] - - include_auto_indexes = kw.pop('include_auto_indexes', False) - for row in pragma_indexes: - # ignore implicit primary key index. - # http://www.mail-archive.com/sqlite-users@sqlite.org/msg30517.html - if (not include_auto_indexes and - row[1].startswith('sqlite_autoindex')): - continue - - indexes.append(dict(name=row[1], column_names=[], unique=row[2])) - - # loop thru unique indexes to get the column names. - for idx in indexes: - pragma_index = self._get_table_pragma( - connection, "index_info", idx['name']) - - for row in pragma_index: - idx['column_names'].append(row[2]) - return indexes - - @reflection.cache - def _get_table_sql(self, connection, table_name, schema=None, **kw): - try: - s = ("SELECT sql FROM " - " (SELECT * FROM sqlite_master UNION ALL " - " SELECT * FROM sqlite_temp_master) " - "WHERE name = '%s' " - "AND type = 'table'") % table_name - rs = connection.execute(s) - except exc.DBAPIError: - s = ("SELECT sql FROM sqlite_master WHERE name = '%s' " - "AND type = 'table'") % table_name - rs = connection.execute(s) - return rs.scalar() - - def _get_table_pragma(self, connection, pragma, table_name, schema=None): - quote = self.identifier_preparer.quote_identifier - if schema is not None: - statement = "PRAGMA %s." % quote(schema) - else: - statement = "PRAGMA " - qtable = quote(table_name) - statement = "%s%s(%s)" % (statement, pragma, qtable) - cursor = connection.execute(statement) - if not cursor._soft_closed: - # work around SQLite issue whereby cursor.description - # is blank when PRAGMA returns no rows: - # http://www.sqlite.org/cvstrac/tktview?tn=1884 - result = cursor.fetchall() - else: - result = [] - return result diff --git a/python/sqlalchemy/dialects/sqlite/pysqlcipher.py b/python/sqlalchemy/dialects/sqlite/pysqlcipher.py deleted file mode 100644 index 9166e36b..00000000 --- a/python/sqlalchemy/dialects/sqlite/pysqlcipher.py +++ /dev/null @@ -1,116 +0,0 @@ -# sqlite/pysqlcipher.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" -.. dialect:: sqlite+pysqlcipher - :name: pysqlcipher - :dbapi: pysqlcipher - :connectstring: sqlite+pysqlcipher://:passphrase/file_path[?kdf_iter=] - :url: https://pypi.python.org/pypi/pysqlcipher - - ``pysqlcipher`` is a fork of the standard ``pysqlite`` driver to make - use of the `SQLCipher `_ backend. - - .. versionadded:: 0.9.9 - -Driver ------- - -The driver here is the `pysqlcipher `_ -driver, which makes use of the SQLCipher engine. This system essentially -introduces new PRAGMA commands to SQLite which allows the setting of a -passphrase and other encryption parameters, allowing the database -file to be encrypted. - -Connect Strings ---------------- - -The format of the connect string is in every way the same as that -of the :mod:`~sqlalchemy.dialects.sqlite.pysqlite` driver, except that the -"password" field is now accepted, which should contain a passphrase:: - - e = create_engine('sqlite+pysqlcipher://:testing@/foo.db') - -For an absolute file path, two leading slashes should be used for the -database name:: - - e = create_engine('sqlite+pysqlcipher://:testing@//path/to/foo.db') - -A selection of additional encryption-related pragmas supported by SQLCipher -as documented at https://www.zetetic.net/sqlcipher/sqlcipher-api/ can be passed -in the query string, and will result in that PRAGMA being called for each -new connection. Currently, ``cipher``, ``kdf_iter`` -``cipher_page_size`` and ``cipher_use_hmac`` are supported:: - - e = create_engine('sqlite+pysqlcipher://:testing@/foo.db?cipher=aes-256-cfb&kdf_iter=64000') - - -Pooling Behavior ----------------- - -The driver makes a change to the default pool behavior of pysqlite -as described in :ref:`pysqlite_threading_pooling`. The pysqlcipher driver -has been observed to be significantly slower on connection than the -pysqlite driver, most likely due to the encryption overhead, so the -dialect here defaults to using the :class:`.SingletonThreadPool` -implementation, -instead of the :class:`.NullPool` pool used by pysqlite. As always, the pool -implementation is entirely configurable using the -:paramref:`.create_engine.poolclass` parameter; the :class:`.StaticPool` may -be more feasible for single-threaded use, or :class:`.NullPool` may be used -to prevent unencrypted connections from being held open for long periods of -time, at the expense of slower startup time for new connections. - - -""" -from __future__ import absolute_import -from .pysqlite import SQLiteDialect_pysqlite -from ...engine import url as _url -from ... import pool - - -class SQLiteDialect_pysqlcipher(SQLiteDialect_pysqlite): - driver = 'pysqlcipher' - - pragmas = ('kdf_iter', 'cipher', 'cipher_page_size', 'cipher_use_hmac') - - @classmethod - def dbapi(cls): - from pysqlcipher import dbapi2 as sqlcipher - return sqlcipher - - @classmethod - def get_pool_class(cls, url): - return pool.SingletonThreadPool - - def connect(self, *cargs, **cparams): - passphrase = cparams.pop('passphrase', '') - - pragmas = dict( - (key, cparams.pop(key, None)) for key in - self.pragmas - ) - - conn = super(SQLiteDialect_pysqlcipher, self).\ - connect(*cargs, **cparams) - conn.execute('pragma key="%s"' % passphrase) - for prag, value in pragmas.items(): - if value is not None: - conn.execute('pragma %s=%s' % (prag, value)) - - return conn - - def create_connect_args(self, url): - super_url = _url.URL( - url.drivername, username=url.username, - host=url.host, database=url.database, query=url.query) - c_args, opts = super(SQLiteDialect_pysqlcipher, self).\ - create_connect_args(super_url) - opts['passphrase'] = url.password - return c_args, opts - -dialect = SQLiteDialect_pysqlcipher diff --git a/python/sqlalchemy/dialects/sqlite/pysqlite.py b/python/sqlalchemy/dialects/sqlite/pysqlite.py deleted file mode 100644 index e1c44347..00000000 --- a/python/sqlalchemy/dialects/sqlite/pysqlite.py +++ /dev/null @@ -1,377 +0,0 @@ -# sqlite/pysqlite.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" -.. dialect:: sqlite+pysqlite - :name: pysqlite - :dbapi: sqlite3 - :connectstring: sqlite+pysqlite:///file_path - :url: http://docs.python.org/library/sqlite3.html - - Note that ``pysqlite`` is the same driver as the ``sqlite3`` - module included with the Python distribution. - -Driver ------- - -When using Python 2.5 and above, the built in ``sqlite3`` driver is -already installed and no additional installation is needed. Otherwise, -the ``pysqlite2`` driver needs to be present. This is the same driver as -``sqlite3``, just with a different name. - -The ``pysqlite2`` driver will be loaded first, and if not found, ``sqlite3`` -is loaded. This allows an explicitly installed pysqlite driver to take -precedence over the built in one. As with all dialects, a specific -DBAPI module may be provided to :func:`~sqlalchemy.create_engine()` to control -this explicitly:: - - from sqlite3 import dbapi2 as sqlite - e = create_engine('sqlite+pysqlite:///file.db', module=sqlite) - - -Connect Strings ---------------- - -The file specification for the SQLite database is taken as the "database" -portion of the URL. Note that the format of a SQLAlchemy url is:: - - driver://user:pass@host/database - -This means that the actual filename to be used starts with the characters to -the **right** of the third slash. So connecting to a relative filepath -looks like:: - - # relative path - e = create_engine('sqlite:///path/to/database.db') - -An absolute path, which is denoted by starting with a slash, means you -need **four** slashes:: - - # absolute path - e = create_engine('sqlite:////path/to/database.db') - -To use a Windows path, regular drive specifications and backslashes can be -used. Double backslashes are probably needed:: - - # absolute path on Windows - e = create_engine('sqlite:///C:\\\\path\\\\to\\\\database.db') - -The sqlite ``:memory:`` identifier is the default if no filepath is -present. Specify ``sqlite://`` and nothing else:: - - # in-memory database - e = create_engine('sqlite://') - -Compatibility with sqlite3 "native" date and datetime types ------------------------------------------------------------ - -The pysqlite driver includes the sqlite3.PARSE_DECLTYPES and -sqlite3.PARSE_COLNAMES options, which have the effect of any column -or expression explicitly cast as "date" or "timestamp" will be converted -to a Python date or datetime object. The date and datetime types provided -with the pysqlite dialect are not currently compatible with these options, -since they render the ISO date/datetime including microseconds, which -pysqlite's driver does not. Additionally, SQLAlchemy does not at -this time automatically render the "cast" syntax required for the -freestanding functions "current_timestamp" and "current_date" to return -datetime/date types natively. Unfortunately, pysqlite -does not provide the standard DBAPI types in ``cursor.description``, -leaving SQLAlchemy with no way to detect these types on the fly -without expensive per-row type checks. - -Keeping in mind that pysqlite's parsing option is not recommended, -nor should be necessary, for use with SQLAlchemy, usage of PARSE_DECLTYPES -can be forced if one configures "native_datetime=True" on create_engine():: - - engine = create_engine('sqlite://', - connect_args={'detect_types': - sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES}, - native_datetime=True - ) - -With this flag enabled, the DATE and TIMESTAMP types (but note - not the -DATETIME or TIME types...confused yet ?) will not perform any bind parameter -or result processing. Execution of "func.current_date()" will return a string. -"func.current_timestamp()" is registered as returning a DATETIME type in -SQLAlchemy, so this function still receives SQLAlchemy-level result -processing. - -.. _pysqlite_threading_pooling: - -Threading/Pooling Behavior ---------------------------- - -Pysqlite's default behavior is to prohibit the usage of a single connection -in more than one thread. This is originally intended to work with older -versions of SQLite that did not support multithreaded operation under -various circumstances. In particular, older SQLite versions -did not allow a ``:memory:`` database to be used in multiple threads -under any circumstances. - -Pysqlite does include a now-undocumented flag known as -``check_same_thread`` which will disable this check, however note that -pysqlite connections are still not safe to use in concurrently in multiple -threads. In particular, any statement execution calls would need to be -externally mutexed, as Pysqlite does not provide for thread-safe propagation -of error messages among other things. So while even ``:memory:`` databases -can be shared among threads in modern SQLite, Pysqlite doesn't provide enough -thread-safety to make this usage worth it. - -SQLAlchemy sets up pooling to work with Pysqlite's default behavior: - -* When a ``:memory:`` SQLite database is specified, the dialect by default - will use :class:`.SingletonThreadPool`. This pool maintains a single - connection per thread, so that all access to the engine within the current - thread use the same ``:memory:`` database - other threads would access a - different ``:memory:`` database. -* When a file-based database is specified, the dialect will use - :class:`.NullPool` as the source of connections. This pool closes and - discards connections which are returned to the pool immediately. SQLite - file-based connections have extremely low overhead, so pooling is not - necessary. The scheme also prevents a connection from being used again in - a different thread and works best with SQLite's coarse-grained file locking. - - .. versionchanged:: 0.7 - Default selection of :class:`.NullPool` for SQLite file-based databases. - Previous versions select :class:`.SingletonThreadPool` by - default for all SQLite databases. - - -Using a Memory Database in Multiple Threads -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -To use a ``:memory:`` database in a multithreaded scenario, the same -connection object must be shared among threads, since the database exists -only within the scope of that connection. The -:class:`.StaticPool` implementation will maintain a single connection -globally, and the ``check_same_thread`` flag can be passed to Pysqlite -as ``False``:: - - from sqlalchemy.pool import StaticPool - engine = create_engine('sqlite://', - connect_args={'check_same_thread':False}, - poolclass=StaticPool) - -Note that using a ``:memory:`` database in multiple threads requires a recent -version of SQLite. - -Using Temporary Tables with SQLite -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Due to the way SQLite deals with temporary tables, if you wish to use a -temporary table in a file-based SQLite database across multiple checkouts -from the connection pool, such as when using an ORM :class:`.Session` where -the temporary table should continue to remain after :meth:`.Session.commit` or -:meth:`.Session.rollback` is called, a pool which maintains a single -connection must be used. Use :class:`.SingletonThreadPool` if the scope is -only needed within the current thread, or :class:`.StaticPool` is scope is -needed within multiple threads for this case:: - - # maintain the same connection per thread - from sqlalchemy.pool import SingletonThreadPool - engine = create_engine('sqlite:///mydb.db', - poolclass=SingletonThreadPool) - - - # maintain the same connection across all threads - from sqlalchemy.pool import StaticPool - engine = create_engine('sqlite:///mydb.db', - poolclass=StaticPool) - -Note that :class:`.SingletonThreadPool` should be configured for the number -of threads that are to be used; beyond that number, connections will be -closed out in a non deterministic way. - -Unicode -------- - -The pysqlite driver only returns Python ``unicode`` objects in result sets, -never plain strings, and accommodates ``unicode`` objects within bound -parameter values in all cases. Regardless of the SQLAlchemy string type in -use, string-based result values will by Python ``unicode`` in Python 2. -The :class:`.Unicode` type should still be used to indicate those columns that -require unicode, however, so that non-``unicode`` values passed inadvertently -will emit a warning. Pysqlite will emit an error if a non-``unicode`` string -is passed containing non-ASCII characters. - -.. _pysqlite_serializable: - -Serializable isolation / Savepoints / Transactional DDL -------------------------------------------------------- - -In the section :ref:`sqlite_concurrency`, we refer to the pysqlite -driver's assortment of issues that prevent several features of SQLite -from working correctly. The pysqlite DBAPI driver has several -long-standing bugs which impact the correctness of its transactional -behavior. In its default mode of operation, SQLite features such as -SERIALIZABLE isolation, transactional DDL, and SAVEPOINT support are -non-functional, and in order to use these features, workarounds must -be taken. - -The issue is essentially that the driver attempts to second-guess the user's -intent, failing to start transactions and sometimes ending them prematurely, in -an effort to minimize the SQLite databases's file locking behavior, even -though SQLite itself uses "shared" locks for read-only activities. - -SQLAlchemy chooses to not alter this behavior by default, as it is the -long-expected behavior of the pysqlite driver; if and when the pysqlite -driver attempts to repair these issues, that will be more of a driver towards -defaults for SQLAlchemy. - -The good news is that with a few events, we can implement transactional -support fully, by disabling pysqlite's feature entirely and emitting BEGIN -ourselves. This is achieved using two event listeners:: - - from sqlalchemy import create_engine, event - - engine = create_engine("sqlite:///myfile.db") - - @event.listens_for(engine, "connect") - def do_connect(dbapi_connection, connection_record): - # disable pysqlite's emitting of the BEGIN statement entirely. - # also stops it from emitting COMMIT before any DDL. - dbapi_connection.isolation_level = None - - @event.listens_for(engine, "begin") - def do_begin(conn): - # emit our own BEGIN - conn.execute("BEGIN") - -Above, we intercept a new pysqlite connection and disable any transactional -integration. Then, at the point at which SQLAlchemy knows that transaction -scope is to begin, we emit ``"BEGIN"`` ourselves. - -When we take control of ``"BEGIN"``, we can also control directly SQLite's -locking modes, introduced at `BEGIN TRANSACTION `_, -by adding the desired locking mode to our ``"BEGIN"``:: - - @event.listens_for(engine, "begin") - def do_begin(conn): - conn.execute("BEGIN EXCLUSIVE") - -.. seealso:: - - `BEGIN TRANSACTION `_ - on the SQLite site - - `sqlite3 SELECT does not BEGIN a transaction `_ - on the Python bug tracker - - `sqlite3 module breaks transactions and potentially corrupts data `_ - on the Python bug tracker - - -""" - -from sqlalchemy.dialects.sqlite.base import SQLiteDialect, DATETIME, DATE -from sqlalchemy import exc, pool -from sqlalchemy import types as sqltypes -from sqlalchemy import util - -import os - - -class _SQLite_pysqliteTimeStamp(DATETIME): - def bind_processor(self, dialect): - if dialect.native_datetime: - return None - else: - return DATETIME.bind_processor(self, dialect) - - def result_processor(self, dialect, coltype): - if dialect.native_datetime: - return None - else: - return DATETIME.result_processor(self, dialect, coltype) - - -class _SQLite_pysqliteDate(DATE): - def bind_processor(self, dialect): - if dialect.native_datetime: - return None - else: - return DATE.bind_processor(self, dialect) - - def result_processor(self, dialect, coltype): - if dialect.native_datetime: - return None - else: - return DATE.result_processor(self, dialect, coltype) - - -class SQLiteDialect_pysqlite(SQLiteDialect): - default_paramstyle = 'qmark' - - colspecs = util.update_copy( - SQLiteDialect.colspecs, - { - sqltypes.Date: _SQLite_pysqliteDate, - sqltypes.TIMESTAMP: _SQLite_pysqliteTimeStamp, - } - ) - - if not util.py2k: - description_encoding = None - - driver = 'pysqlite' - - def __init__(self, **kwargs): - SQLiteDialect.__init__(self, **kwargs) - - if self.dbapi is not None: - sqlite_ver = self.dbapi.version_info - if sqlite_ver < (2, 1, 3): - util.warn( - ("The installed version of pysqlite2 (%s) is out-dated " - "and will cause errors in some cases. Version 2.1.3 " - "or greater is recommended.") % - '.'.join([str(subver) for subver in sqlite_ver])) - - @classmethod - def dbapi(cls): - try: - from pysqlite2 import dbapi2 as sqlite - except ImportError as e: - try: - from sqlite3 import dbapi2 as sqlite # try 2.5+ stdlib name. - except ImportError: - raise e - return sqlite - - @classmethod - def get_pool_class(cls, url): - if url.database and url.database != ':memory:': - return pool.NullPool - else: - return pool.SingletonThreadPool - - def _get_server_version_info(self, connection): - return self.dbapi.sqlite_version_info - - def create_connect_args(self, url): - if url.username or url.password or url.host or url.port: - raise exc.ArgumentError( - "Invalid SQLite URL: %s\n" - "Valid SQLite URL forms are:\n" - " sqlite:///:memory: (or, sqlite://)\n" - " sqlite:///relative/path/to/file.db\n" - " sqlite:////absolute/path/to/file.db" % (url,)) - filename = url.database or ':memory:' - if filename != ':memory:': - filename = os.path.abspath(filename) - - opts = url.query.copy() - util.coerce_kw_type(opts, 'timeout', float) - util.coerce_kw_type(opts, 'isolation_level', str) - util.coerce_kw_type(opts, 'detect_types', int) - util.coerce_kw_type(opts, 'check_same_thread', bool) - util.coerce_kw_type(opts, 'cached_statements', int) - - return ([filename], opts) - - def is_disconnect(self, e, connection, cursor): - return isinstance(e, self.dbapi.ProgrammingError) and \ - "Cannot operate on a closed database." in str(e) - -dialect = SQLiteDialect_pysqlite diff --git a/python/sqlalchemy/dialects/sybase/__init__.py b/python/sqlalchemy/dialects/sybase/__init__.py deleted file mode 100644 index 0c55de1d..00000000 --- a/python/sqlalchemy/dialects/sybase/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -# sybase/__init__.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -from sqlalchemy.dialects.sybase import base, pysybase, pyodbc - -# default dialect -base.dialect = pyodbc.dialect - -from .base import CHAR, VARCHAR, TIME, NCHAR, NVARCHAR,\ - TEXT, DATE, DATETIME, FLOAT, NUMERIC,\ - BIGINT, INT, INTEGER, SMALLINT, BINARY,\ - VARBINARY, UNITEXT, UNICHAR, UNIVARCHAR,\ - IMAGE, BIT, MONEY, SMALLMONEY, TINYINT,\ - dialect - - -__all__ = ( - 'CHAR', 'VARCHAR', 'TIME', 'NCHAR', 'NVARCHAR', - 'TEXT', 'DATE', 'DATETIME', 'FLOAT', 'NUMERIC', - 'BIGINT', 'INT', 'INTEGER', 'SMALLINT', 'BINARY', - 'VARBINARY', 'UNITEXT', 'UNICHAR', 'UNIVARCHAR', - 'IMAGE', 'BIT', 'MONEY', 'SMALLMONEY', 'TINYINT', - 'dialect' -) diff --git a/python/sqlalchemy/dialects/sybase/base.py b/python/sqlalchemy/dialects/sybase/base.py deleted file mode 100644 index b3f8e307..00000000 --- a/python/sqlalchemy/dialects/sybase/base.py +++ /dev/null @@ -1,825 +0,0 @@ -# sybase/base.py -# Copyright (C) 2010-2015 the SQLAlchemy authors and contributors -# -# get_select_precolumns(), limit_clause() implementation -# copyright (C) 2007 Fisch Asset Management -# AG http://www.fam.ch, with coding by Alexander Houben -# alexander.houben@thor-solutions.ch -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" - -.. dialect:: sybase - :name: Sybase - -.. note:: - - The Sybase dialect functions on current SQLAlchemy versions - but is not regularly tested, and may have many issues and - caveats not currently handled. - -""" -import operator -import re - -from sqlalchemy.sql import compiler, expression, text, bindparam -from sqlalchemy.engine import default, base, reflection -from sqlalchemy import types as sqltypes -from sqlalchemy.sql import operators as sql_operators -from sqlalchemy import schema as sa_schema -from sqlalchemy import util, sql, exc - -from sqlalchemy.types import CHAR, VARCHAR, TIME, NCHAR, NVARCHAR,\ - TEXT, DATE, DATETIME, FLOAT, NUMERIC,\ - BIGINT, INT, INTEGER, SMALLINT, BINARY,\ - VARBINARY, DECIMAL, TIMESTAMP, Unicode,\ - UnicodeText, REAL - -RESERVED_WORDS = set([ - "add", "all", "alter", "and", - "any", "as", "asc", "backup", - "begin", "between", "bigint", "binary", - "bit", "bottom", "break", "by", - "call", "capability", "cascade", "case", - "cast", "char", "char_convert", "character", - "check", "checkpoint", "close", "comment", - "commit", "connect", "constraint", "contains", - "continue", "convert", "create", "cross", - "cube", "current", "current_timestamp", "current_user", - "cursor", "date", "dbspace", "deallocate", - "dec", "decimal", "declare", "default", - "delete", "deleting", "desc", "distinct", - "do", "double", "drop", "dynamic", - "else", "elseif", "encrypted", "end", - "endif", "escape", "except", "exception", - "exec", "execute", "existing", "exists", - "externlogin", "fetch", "first", "float", - "for", "force", "foreign", "forward", - "from", "full", "goto", "grant", - "group", "having", "holdlock", "identified", - "if", "in", "index", "index_lparen", - "inner", "inout", "insensitive", "insert", - "inserting", "install", "instead", "int", - "integer", "integrated", "intersect", "into", - "iq", "is", "isolation", "join", - "key", "lateral", "left", "like", - "lock", "login", "long", "match", - "membership", "message", "mode", "modify", - "natural", "new", "no", "noholdlock", - "not", "notify", "null", "numeric", - "of", "off", "on", "open", - "option", "options", "or", "order", - "others", "out", "outer", "over", - "passthrough", "precision", "prepare", "primary", - "print", "privileges", "proc", "procedure", - "publication", "raiserror", "readtext", "real", - "reference", "references", "release", "remote", - "remove", "rename", "reorganize", "resource", - "restore", "restrict", "return", "revoke", - "right", "rollback", "rollup", "save", - "savepoint", "scroll", "select", "sensitive", - "session", "set", "setuser", "share", - "smallint", "some", "sqlcode", "sqlstate", - "start", "stop", "subtrans", "subtransaction", - "synchronize", "syntax_error", "table", "temporary", - "then", "time", "timestamp", "tinyint", - "to", "top", "tran", "trigger", - "truncate", "tsequal", "unbounded", "union", - "unique", "unknown", "unsigned", "update", - "updating", "user", "using", "validate", - "values", "varbinary", "varchar", "variable", - "varying", "view", "wait", "waitfor", - "when", "where", "while", "window", - "with", "with_cube", "with_lparen", "with_rollup", - "within", "work", "writetext", -]) - - -class _SybaseUnitypeMixin(object): - """these types appear to return a buffer object.""" - - def result_processor(self, dialect, coltype): - def process(value): - if value is not None: - return str(value) # decode("ucs-2") - else: - return None - return process - - -class UNICHAR(_SybaseUnitypeMixin, sqltypes.Unicode): - __visit_name__ = 'UNICHAR' - - -class UNIVARCHAR(_SybaseUnitypeMixin, sqltypes.Unicode): - __visit_name__ = 'UNIVARCHAR' - - -class UNITEXT(_SybaseUnitypeMixin, sqltypes.UnicodeText): - __visit_name__ = 'UNITEXT' - - -class TINYINT(sqltypes.Integer): - __visit_name__ = 'TINYINT' - - -class BIT(sqltypes.TypeEngine): - __visit_name__ = 'BIT' - - -class MONEY(sqltypes.TypeEngine): - __visit_name__ = "MONEY" - - -class SMALLMONEY(sqltypes.TypeEngine): - __visit_name__ = "SMALLMONEY" - - -class UNIQUEIDENTIFIER(sqltypes.TypeEngine): - __visit_name__ = "UNIQUEIDENTIFIER" - - -class IMAGE(sqltypes.LargeBinary): - __visit_name__ = 'IMAGE' - - -class SybaseTypeCompiler(compiler.GenericTypeCompiler): - def visit_large_binary(self, type_, **kw): - return self.visit_IMAGE(type_) - - def visit_boolean(self, type_, **kw): - return self.visit_BIT(type_) - - def visit_unicode(self, type_, **kw): - return self.visit_NVARCHAR(type_) - - def visit_UNICHAR(self, type_, **kw): - return "UNICHAR(%d)" % type_.length - - def visit_UNIVARCHAR(self, type_, **kw): - return "UNIVARCHAR(%d)" % type_.length - - def visit_UNITEXT(self, type_, **kw): - return "UNITEXT" - - def visit_TINYINT(self, type_, **kw): - return "TINYINT" - - def visit_IMAGE(self, type_, **kw): - return "IMAGE" - - def visit_BIT(self, type_, **kw): - return "BIT" - - def visit_MONEY(self, type_, **kw): - return "MONEY" - - def visit_SMALLMONEY(self, type_, **kw): - return "SMALLMONEY" - - def visit_UNIQUEIDENTIFIER(self, type_, **kw): - return "UNIQUEIDENTIFIER" - -ischema_names = { - 'bigint': BIGINT, - 'int': INTEGER, - 'integer': INTEGER, - 'smallint': SMALLINT, - 'tinyint': TINYINT, - 'unsigned bigint': BIGINT, # TODO: unsigned flags - 'unsigned int': INTEGER, # TODO: unsigned flags - 'unsigned smallint': SMALLINT, # TODO: unsigned flags - 'numeric': NUMERIC, - 'decimal': DECIMAL, - 'dec': DECIMAL, - 'float': FLOAT, - 'double': NUMERIC, # TODO - 'double precision': NUMERIC, # TODO - 'real': REAL, - 'smallmoney': SMALLMONEY, - 'money': MONEY, - 'smalldatetime': DATETIME, - 'datetime': DATETIME, - 'date': DATE, - 'time': TIME, - 'char': CHAR, - 'character': CHAR, - 'varchar': VARCHAR, - 'character varying': VARCHAR, - 'char varying': VARCHAR, - 'unichar': UNICHAR, - 'unicode character': UNIVARCHAR, - 'nchar': NCHAR, - 'national char': NCHAR, - 'national character': NCHAR, - 'nvarchar': NVARCHAR, - 'nchar varying': NVARCHAR, - 'national char varying': NVARCHAR, - 'national character varying': NVARCHAR, - 'text': TEXT, - 'unitext': UNITEXT, - 'binary': BINARY, - 'varbinary': VARBINARY, - 'image': IMAGE, - 'bit': BIT, - - # not in documentation for ASE 15.7 - 'long varchar': TEXT, # TODO - 'timestamp': TIMESTAMP, - 'uniqueidentifier': UNIQUEIDENTIFIER, - -} - - -class SybaseInspector(reflection.Inspector): - - def __init__(self, conn): - reflection.Inspector.__init__(self, conn) - - def get_table_id(self, table_name, schema=None): - """Return the table id from `table_name` and `schema`.""" - - return self.dialect.get_table_id(self.bind, table_name, schema, - info_cache=self.info_cache) - - -class SybaseExecutionContext(default.DefaultExecutionContext): - _enable_identity_insert = False - - def set_ddl_autocommit(self, connection, value): - """Must be implemented by subclasses to accommodate DDL executions. - - "connection" is the raw unwrapped DBAPI connection. "value" - is True or False. when True, the connection should be configured - such that a DDL can take place subsequently. when False, - a DDL has taken place and the connection should be resumed - into non-autocommit mode. - - """ - raise NotImplementedError() - - def pre_exec(self): - if self.isinsert: - tbl = self.compiled.statement.table - seq_column = tbl._autoincrement_column - insert_has_sequence = seq_column is not None - - if insert_has_sequence: - self._enable_identity_insert = \ - seq_column.key in self.compiled_parameters[0] - else: - self._enable_identity_insert = False - - if self._enable_identity_insert: - self.cursor.execute( - "SET IDENTITY_INSERT %s ON" % - self.dialect.identifier_preparer.format_table(tbl)) - - if self.isddl: - # TODO: to enhance this, we can detect "ddl in tran" on the - # database settings. this error message should be improved to - # include a note about that. - if not self.should_autocommit: - raise exc.InvalidRequestError( - "The Sybase dialect only supports " - "DDL in 'autocommit' mode at this time.") - - self.root_connection.engine.logger.info( - "AUTOCOMMIT (Assuming no Sybase 'ddl in tran')") - - self.set_ddl_autocommit( - self.root_connection.connection.connection, - True) - - def post_exec(self): - if self.isddl: - self.set_ddl_autocommit(self.root_connection, False) - - if self._enable_identity_insert: - self.cursor.execute( - "SET IDENTITY_INSERT %s OFF" % - self.dialect.identifier_preparer. - format_table(self.compiled.statement.table) - ) - - def get_lastrowid(self): - cursor = self.create_cursor() - cursor.execute("SELECT @@identity AS lastrowid") - lastrowid = cursor.fetchone()[0] - cursor.close() - return lastrowid - - -class SybaseSQLCompiler(compiler.SQLCompiler): - ansi_bind_rules = True - - extract_map = util.update_copy( - compiler.SQLCompiler.extract_map, - { - 'doy': 'dayofyear', - 'dow': 'weekday', - 'milliseconds': 'millisecond' - }) - - def get_select_precolumns(self, select, **kw): - s = select._distinct and "DISTINCT " or "" - # TODO: don't think Sybase supports - # bind params for FIRST / TOP - limit = select._limit - if limit: - # if select._limit == 1: - # s += "FIRST " - # else: - # s += "TOP %s " % (select._limit,) - s += "TOP %s " % (limit,) - offset = select._offset - if offset: - if not limit: - # FIXME: sybase doesn't allow an offset without a limit - # so use a huge value for TOP here - s += "TOP 1000000 " - s += "START AT %s " % (offset + 1,) - return s - - def get_from_hint_text(self, table, text): - return text - - def limit_clause(self, select, **kw): - # Limit in sybase is after the select keyword - return "" - - def visit_extract(self, extract, **kw): - field = self.extract_map.get(extract.field, extract.field) - return 'DATEPART("%s", %s)' % ( - field, self.process(extract.expr, **kw)) - - def visit_now_func(self, fn, **kw): - return "GETDATE()" - - def for_update_clause(self, select): - # "FOR UPDATE" is only allowed on "DECLARE CURSOR" - # which SQLAlchemy doesn't use - return '' - - def order_by_clause(self, select, **kw): - kw['literal_binds'] = True - order_by = self.process(select._order_by_clause, **kw) - - # SybaseSQL only allows ORDER BY in subqueries if there is a LIMIT - if order_by and (not self.is_subquery() or select._limit): - return " ORDER BY " + order_by - else: - return "" - - -class SybaseDDLCompiler(compiler.DDLCompiler): - def get_column_specification(self, column, **kwargs): - colspec = self.preparer.format_column(column) + " " + \ - self.dialect.type_compiler.process( - column.type, type_expression=column) - - if column.table is None: - raise exc.CompileError( - "The Sybase dialect requires Table-bound " - "columns in order to generate DDL") - seq_col = column.table._autoincrement_column - - # install a IDENTITY Sequence if we have an implicit IDENTITY column - if seq_col is column: - sequence = isinstance(column.default, sa_schema.Sequence) \ - and column.default - if sequence: - start, increment = sequence.start or 1, \ - sequence.increment or 1 - else: - start, increment = 1, 1 - if (start, increment) == (1, 1): - colspec += " IDENTITY" - else: - # TODO: need correct syntax for this - colspec += " IDENTITY(%s,%s)" % (start, increment) - else: - default = self.get_column_default_string(column) - if default is not None: - colspec += " DEFAULT " + default - - if column.nullable is not None: - if not column.nullable or column.primary_key: - colspec += " NOT NULL" - else: - colspec += " NULL" - - return colspec - - def visit_drop_index(self, drop): - index = drop.element - return "\nDROP INDEX %s.%s" % ( - self.preparer.quote_identifier(index.table.name), - self._prepared_index_name(drop.element, - include_schema=False) - ) - - -class SybaseIdentifierPreparer(compiler.IdentifierPreparer): - reserved_words = RESERVED_WORDS - - -class SybaseDialect(default.DefaultDialect): - name = 'sybase' - supports_unicode_statements = False - supports_sane_rowcount = False - supports_sane_multi_rowcount = False - - supports_native_boolean = False - supports_unicode_binds = False - postfetch_lastrowid = True - - colspecs = {} - ischema_names = ischema_names - - type_compiler = SybaseTypeCompiler - statement_compiler = SybaseSQLCompiler - ddl_compiler = SybaseDDLCompiler - preparer = SybaseIdentifierPreparer - inspector = SybaseInspector - - construct_arguments = [] - - def _get_default_schema_name(self, connection): - return connection.scalar( - text("SELECT user_name() as user_name", - typemap={'user_name': Unicode}) - ) - - def initialize(self, connection): - super(SybaseDialect, self).initialize(connection) - if self.server_version_info is not None and\ - self.server_version_info < (15, ): - self.max_identifier_length = 30 - else: - self.max_identifier_length = 255 - - def get_table_id(self, connection, table_name, schema=None, **kw): - """Fetch the id for schema.table_name. - - Several reflection methods require the table id. The idea for using - this method is that it can be fetched one time and cached for - subsequent calls. - - """ - - table_id = None - if schema is None: - schema = self.default_schema_name - - TABLEID_SQL = text(""" - SELECT o.id AS id - FROM sysobjects o JOIN sysusers u ON o.uid=u.uid - WHERE u.name = :schema_name - AND o.name = :table_name - AND o.type in ('U', 'V') - """) - - if util.py2k: - if isinstance(schema, unicode): - schema = schema.encode("ascii") - if isinstance(table_name, unicode): - table_name = table_name.encode("ascii") - result = connection.execute(TABLEID_SQL, - schema_name=schema, - table_name=table_name) - table_id = result.scalar() - if table_id is None: - raise exc.NoSuchTableError(table_name) - return table_id - - @reflection.cache - def get_columns(self, connection, table_name, schema=None, **kw): - table_id = self.get_table_id(connection, table_name, schema, - info_cache=kw.get("info_cache")) - - COLUMN_SQL = text(""" - SELECT col.name AS name, - t.name AS type, - (col.status & 8) AS nullable, - (col.status & 128) AS autoincrement, - com.text AS 'default', - col.prec AS precision, - col.scale AS scale, - col.length AS length - FROM systypes t, syscolumns col LEFT OUTER JOIN syscomments com ON - col.cdefault = com.id - WHERE col.usertype = t.usertype - AND col.id = :table_id - ORDER BY col.colid - """) - - results = connection.execute(COLUMN_SQL, table_id=table_id) - - columns = [] - for (name, type_, nullable, autoincrement, default, precision, scale, - length) in results: - col_info = self._get_column_info(name, type_, bool(nullable), - bool(autoincrement), - default, precision, scale, - length) - columns.append(col_info) - - return columns - - def _get_column_info(self, name, type_, nullable, autoincrement, default, - precision, scale, length): - - coltype = self.ischema_names.get(type_, None) - - kwargs = {} - - if coltype in (NUMERIC, DECIMAL): - args = (precision, scale) - elif coltype == FLOAT: - args = (precision,) - elif coltype in (CHAR, VARCHAR, UNICHAR, UNIVARCHAR, NCHAR, NVARCHAR): - args = (length,) - else: - args = () - - if coltype: - coltype = coltype(*args, **kwargs) - # is this necessary - # if is_array: - # coltype = ARRAY(coltype) - else: - util.warn("Did not recognize type '%s' of column '%s'" % - (type_, name)) - coltype = sqltypes.NULLTYPE - - if default: - default = re.sub("DEFAULT", "", default).strip() - default = re.sub("^'(.*)'$", lambda m: m.group(1), default) - else: - default = None - - column_info = dict(name=name, type=coltype, nullable=nullable, - default=default, autoincrement=autoincrement) - return column_info - - @reflection.cache - def get_foreign_keys(self, connection, table_name, schema=None, **kw): - - table_id = self.get_table_id(connection, table_name, schema, - info_cache=kw.get("info_cache")) - - table_cache = {} - column_cache = {} - foreign_keys = [] - - table_cache[table_id] = {"name": table_name, "schema": schema} - - COLUMN_SQL = text(""" - SELECT c.colid AS id, c.name AS name - FROM syscolumns c - WHERE c.id = :table_id - """) - - results = connection.execute(COLUMN_SQL, table_id=table_id) - columns = {} - for col in results: - columns[col["id"]] = col["name"] - column_cache[table_id] = columns - - REFCONSTRAINT_SQL = text(""" - SELECT o.name AS name, r.reftabid AS reftable_id, - r.keycnt AS 'count', - r.fokey1 AS fokey1, r.fokey2 AS fokey2, r.fokey3 AS fokey3, - r.fokey4 AS fokey4, r.fokey5 AS fokey5, r.fokey6 AS fokey6, - r.fokey7 AS fokey7, r.fokey1 AS fokey8, r.fokey9 AS fokey9, - r.fokey10 AS fokey10, r.fokey11 AS fokey11, r.fokey12 AS fokey12, - r.fokey13 AS fokey13, r.fokey14 AS fokey14, r.fokey15 AS fokey15, - r.fokey16 AS fokey16, - r.refkey1 AS refkey1, r.refkey2 AS refkey2, r.refkey3 AS refkey3, - r.refkey4 AS refkey4, r.refkey5 AS refkey5, r.refkey6 AS refkey6, - r.refkey7 AS refkey7, r.refkey1 AS refkey8, r.refkey9 AS refkey9, - r.refkey10 AS refkey10, r.refkey11 AS refkey11, - r.refkey12 AS refkey12, r.refkey13 AS refkey13, - r.refkey14 AS refkey14, r.refkey15 AS refkey15, - r.refkey16 AS refkey16 - FROM sysreferences r JOIN sysobjects o on r.tableid = o.id - WHERE r.tableid = :table_id - """) - referential_constraints = connection.execute( - REFCONSTRAINT_SQL, table_id=table_id).fetchall() - - REFTABLE_SQL = text(""" - SELECT o.name AS name, u.name AS 'schema' - FROM sysobjects o JOIN sysusers u ON o.uid = u.uid - WHERE o.id = :table_id - """) - - for r in referential_constraints: - reftable_id = r["reftable_id"] - - if reftable_id not in table_cache: - c = connection.execute(REFTABLE_SQL, table_id=reftable_id) - reftable = c.fetchone() - c.close() - table_info = {"name": reftable["name"], "schema": None} - if (schema is not None or - reftable["schema"] != self.default_schema_name): - table_info["schema"] = reftable["schema"] - - table_cache[reftable_id] = table_info - results = connection.execute(COLUMN_SQL, table_id=reftable_id) - reftable_columns = {} - for col in results: - reftable_columns[col["id"]] = col["name"] - column_cache[reftable_id] = reftable_columns - - reftable = table_cache[reftable_id] - reftable_columns = column_cache[reftable_id] - - constrained_columns = [] - referred_columns = [] - for i in range(1, r["count"] + 1): - constrained_columns.append(columns[r["fokey%i" % i]]) - referred_columns.append(reftable_columns[r["refkey%i" % i]]) - - fk_info = { - "constrained_columns": constrained_columns, - "referred_schema": reftable["schema"], - "referred_table": reftable["name"], - "referred_columns": referred_columns, - "name": r["name"] - } - - foreign_keys.append(fk_info) - - return foreign_keys - - @reflection.cache - def get_indexes(self, connection, table_name, schema=None, **kw): - table_id = self.get_table_id(connection, table_name, schema, - info_cache=kw.get("info_cache")) - - INDEX_SQL = text(""" - SELECT object_name(i.id) AS table_name, - i.keycnt AS 'count', - i.name AS name, - (i.status & 0x2) AS 'unique', - index_col(object_name(i.id), i.indid, 1) AS col_1, - index_col(object_name(i.id), i.indid, 2) AS col_2, - index_col(object_name(i.id), i.indid, 3) AS col_3, - index_col(object_name(i.id), i.indid, 4) AS col_4, - index_col(object_name(i.id), i.indid, 5) AS col_5, - index_col(object_name(i.id), i.indid, 6) AS col_6, - index_col(object_name(i.id), i.indid, 7) AS col_7, - index_col(object_name(i.id), i.indid, 8) AS col_8, - index_col(object_name(i.id), i.indid, 9) AS col_9, - index_col(object_name(i.id), i.indid, 10) AS col_10, - index_col(object_name(i.id), i.indid, 11) AS col_11, - index_col(object_name(i.id), i.indid, 12) AS col_12, - index_col(object_name(i.id), i.indid, 13) AS col_13, - index_col(object_name(i.id), i.indid, 14) AS col_14, - index_col(object_name(i.id), i.indid, 15) AS col_15, - index_col(object_name(i.id), i.indid, 16) AS col_16 - FROM sysindexes i, sysobjects o - WHERE o.id = i.id - AND o.id = :table_id - AND (i.status & 2048) = 0 - AND i.indid BETWEEN 1 AND 254 - """) - - results = connection.execute(INDEX_SQL, table_id=table_id) - indexes = [] - for r in results: - column_names = [] - for i in range(1, r["count"]): - column_names.append(r["col_%i" % (i,)]) - index_info = {"name": r["name"], - "unique": bool(r["unique"]), - "column_names": column_names} - indexes.append(index_info) - - return indexes - - @reflection.cache - def get_pk_constraint(self, connection, table_name, schema=None, **kw): - table_id = self.get_table_id(connection, table_name, schema, - info_cache=kw.get("info_cache")) - - PK_SQL = text(""" - SELECT object_name(i.id) AS table_name, - i.keycnt AS 'count', - i.name AS name, - index_col(object_name(i.id), i.indid, 1) AS pk_1, - index_col(object_name(i.id), i.indid, 2) AS pk_2, - index_col(object_name(i.id), i.indid, 3) AS pk_3, - index_col(object_name(i.id), i.indid, 4) AS pk_4, - index_col(object_name(i.id), i.indid, 5) AS pk_5, - index_col(object_name(i.id), i.indid, 6) AS pk_6, - index_col(object_name(i.id), i.indid, 7) AS pk_7, - index_col(object_name(i.id), i.indid, 8) AS pk_8, - index_col(object_name(i.id), i.indid, 9) AS pk_9, - index_col(object_name(i.id), i.indid, 10) AS pk_10, - index_col(object_name(i.id), i.indid, 11) AS pk_11, - index_col(object_name(i.id), i.indid, 12) AS pk_12, - index_col(object_name(i.id), i.indid, 13) AS pk_13, - index_col(object_name(i.id), i.indid, 14) AS pk_14, - index_col(object_name(i.id), i.indid, 15) AS pk_15, - index_col(object_name(i.id), i.indid, 16) AS pk_16 - FROM sysindexes i, sysobjects o - WHERE o.id = i.id - AND o.id = :table_id - AND (i.status & 2048) = 2048 - AND i.indid BETWEEN 1 AND 254 - """) - - results = connection.execute(PK_SQL, table_id=table_id) - pks = results.fetchone() - results.close() - - constrained_columns = [] - if pks: - for i in range(1, pks["count"] + 1): - constrained_columns.append(pks["pk_%i" % (i,)]) - return {"constrained_columns": constrained_columns, - "name": pks["name"]} - else: - return {"constrained_columns": [], "name": None} - - @reflection.cache - def get_schema_names(self, connection, **kw): - - SCHEMA_SQL = text("SELECT u.name AS name FROM sysusers u") - - schemas = connection.execute(SCHEMA_SQL) - - return [s["name"] for s in schemas] - - @reflection.cache - def get_table_names(self, connection, schema=None, **kw): - if schema is None: - schema = self.default_schema_name - - TABLE_SQL = text(""" - SELECT o.name AS name - FROM sysobjects o JOIN sysusers u ON o.uid = u.uid - WHERE u.name = :schema_name - AND o.type = 'U' - """) - - if util.py2k: - if isinstance(schema, unicode): - schema = schema.encode("ascii") - - tables = connection.execute(TABLE_SQL, schema_name=schema) - - return [t["name"] for t in tables] - - @reflection.cache - def get_view_definition(self, connection, view_name, schema=None, **kw): - if schema is None: - schema = self.default_schema_name - - VIEW_DEF_SQL = text(""" - SELECT c.text - FROM syscomments c JOIN sysobjects o ON c.id = o.id - WHERE o.name = :view_name - AND o.type = 'V' - """) - - if util.py2k: - if isinstance(view_name, unicode): - view_name = view_name.encode("ascii") - - view = connection.execute(VIEW_DEF_SQL, view_name=view_name) - - return view.scalar() - - @reflection.cache - def get_view_names(self, connection, schema=None, **kw): - if schema is None: - schema = self.default_schema_name - - VIEW_SQL = text(""" - SELECT o.name AS name - FROM sysobjects o JOIN sysusers u ON o.uid = u.uid - WHERE u.name = :schema_name - AND o.type = 'V' - """) - - if util.py2k: - if isinstance(schema, unicode): - schema = schema.encode("ascii") - views = connection.execute(VIEW_SQL, schema_name=schema) - - return [v["name"] for v in views] - - def has_table(self, connection, table_name, schema=None): - try: - self.get_table_id(connection, table_name, schema) - except exc.NoSuchTableError: - return False - else: - return True diff --git a/python/sqlalchemy/dialects/sybase/mxodbc.py b/python/sqlalchemy/dialects/sybase/mxodbc.py deleted file mode 100644 index 240b634d..00000000 --- a/python/sqlalchemy/dialects/sybase/mxodbc.py +++ /dev/null @@ -1,33 +0,0 @@ -# sybase/mxodbc.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php -""" - -.. dialect:: sybase+mxodbc - :name: mxODBC - :dbapi: mxodbc - :connectstring: sybase+mxodbc://:@ - :url: http://www.egenix.com/ - -.. note:: - - This dialect is a stub only and is likely non functional at this time. - - -""" -from sqlalchemy.dialects.sybase.base import SybaseDialect -from sqlalchemy.dialects.sybase.base import SybaseExecutionContext -from sqlalchemy.connectors.mxodbc import MxODBCConnector - - -class SybaseExecutionContext_mxodbc(SybaseExecutionContext): - pass - - -class SybaseDialect_mxodbc(MxODBCConnector, SybaseDialect): - execution_ctx_cls = SybaseExecutionContext_mxodbc - -dialect = SybaseDialect_mxodbc diff --git a/python/sqlalchemy/dialects/sybase/pyodbc.py b/python/sqlalchemy/dialects/sybase/pyodbc.py deleted file mode 100644 index 16899707..00000000 --- a/python/sqlalchemy/dialects/sybase/pyodbc.py +++ /dev/null @@ -1,86 +0,0 @@ -# sybase/pyodbc.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" -.. dialect:: sybase+pyodbc - :name: PyODBC - :dbapi: pyodbc - :connectstring: sybase+pyodbc://:@\ -[/] - :url: http://pypi.python.org/pypi/pyodbc/ - - -Unicode Support ---------------- - -The pyodbc driver currently supports usage of these Sybase types with -Unicode or multibyte strings:: - - CHAR - NCHAR - NVARCHAR - TEXT - VARCHAR - -Currently *not* supported are:: - - UNICHAR - UNITEXT - UNIVARCHAR - -""" - -from sqlalchemy.dialects.sybase.base import SybaseDialect,\ - SybaseExecutionContext -from sqlalchemy.connectors.pyodbc import PyODBCConnector -from sqlalchemy import types as sqltypes, processors -import decimal - - -class _SybNumeric_pyodbc(sqltypes.Numeric): - """Turns Decimals with adjusted() < -6 into floats. - - It's not yet known how to get decimals with many - significant digits or very large adjusted() into Sybase - via pyodbc. - - """ - - def bind_processor(self, dialect): - super_process = super(_SybNumeric_pyodbc, self).\ - bind_processor(dialect) - - def process(value): - if self.asdecimal and \ - isinstance(value, decimal.Decimal): - - if value.adjusted() < -6: - return processors.to_float(value) - - if super_process: - return super_process(value) - else: - return value - return process - - -class SybaseExecutionContext_pyodbc(SybaseExecutionContext): - def set_ddl_autocommit(self, connection, value): - if value: - connection.autocommit = True - else: - connection.autocommit = False - - -class SybaseDialect_pyodbc(PyODBCConnector, SybaseDialect): - execution_ctx_cls = SybaseExecutionContext_pyodbc - - colspecs = { - sqltypes.Numeric: _SybNumeric_pyodbc, - } - -dialect = SybaseDialect_pyodbc diff --git a/python/sqlalchemy/dialects/sybase/pysybase.py b/python/sqlalchemy/dialects/sybase/pysybase.py deleted file mode 100644 index a3073944..00000000 --- a/python/sqlalchemy/dialects/sybase/pysybase.py +++ /dev/null @@ -1,102 +0,0 @@ -# sybase/pysybase.py -# Copyright (C) 2010-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" -.. dialect:: sybase+pysybase - :name: Python-Sybase - :dbapi: Sybase - :connectstring: sybase+pysybase://:@/\ -[database name] - :url: http://python-sybase.sourceforge.net/ - -Unicode Support ---------------- - -The python-sybase driver does not appear to support non-ASCII strings of any -kind at this time. - -""" - -from sqlalchemy import types as sqltypes, processors -from sqlalchemy.dialects.sybase.base import SybaseDialect, \ - SybaseExecutionContext, SybaseSQLCompiler - - -class _SybNumeric(sqltypes.Numeric): - def result_processor(self, dialect, type_): - if not self.asdecimal: - return processors.to_float - else: - return sqltypes.Numeric.result_processor(self, dialect, type_) - - -class SybaseExecutionContext_pysybase(SybaseExecutionContext): - - def set_ddl_autocommit(self, dbapi_connection, value): - if value: - # call commit() on the Sybase connection directly, - # to avoid any side effects of calling a Connection - # transactional method inside of pre_exec() - dbapi_connection.commit() - - def pre_exec(self): - SybaseExecutionContext.pre_exec(self) - - for param in self.parameters: - for key in list(param): - param["@" + key] = param[key] - del param[key] - - -class SybaseSQLCompiler_pysybase(SybaseSQLCompiler): - def bindparam_string(self, name, **kw): - return "@" + name - - -class SybaseDialect_pysybase(SybaseDialect): - driver = 'pysybase' - execution_ctx_cls = SybaseExecutionContext_pysybase - statement_compiler = SybaseSQLCompiler_pysybase - - colspecs = { - sqltypes.Numeric: _SybNumeric, - sqltypes.Float: sqltypes.Float - } - - @classmethod - def dbapi(cls): - import Sybase - return Sybase - - def create_connect_args(self, url): - opts = url.translate_connect_args(username='user', password='passwd') - - return ([opts.pop('host')], opts) - - def do_executemany(self, cursor, statement, parameters, context=None): - # calling python-sybase executemany yields: - # TypeError: string too long for buffer - for param in parameters: - cursor.execute(statement, param) - - def _get_server_version_info(self, connection): - vers = connection.scalar("select @@version_number") - # i.e. 15500, 15000, 12500 == (15, 5, 0, 0), (15, 0, 0, 0), - # (12, 5, 0, 0) - return (vers / 1000, vers % 1000 / 100, vers % 100 / 10, vers % 10) - - def is_disconnect(self, e, connection, cursor): - if isinstance(e, (self.dbapi.OperationalError, - self.dbapi.ProgrammingError)): - msg = str(e) - return ('Unable to complete network request to host' in msg or - 'Invalid connection state' in msg or - 'Invalid cursor state' in msg) - else: - return False - -dialect = SybaseDialect_pysybase diff --git a/python/sqlalchemy/dialects/type_migration_guidelines.txt b/python/sqlalchemy/dialects/type_migration_guidelines.txt deleted file mode 100644 index 2d06cf69..00000000 --- a/python/sqlalchemy/dialects/type_migration_guidelines.txt +++ /dev/null @@ -1,145 +0,0 @@ -Rules for Migrating TypeEngine classes to 0.6 ---------------------------------------------- - -1. the TypeEngine classes are used for: - - a. Specifying behavior which needs to occur for bind parameters - or result row columns. - - b. Specifying types that are entirely specific to the database - in use and have no analogue in the sqlalchemy.types package. - - c. Specifying types where there is an analogue in sqlalchemy.types, - but the database in use takes vendor-specific flags for those - types. - - d. If a TypeEngine class doesn't provide any of this, it should be - *removed* from the dialect. - -2. the TypeEngine classes are *no longer* used for generating DDL. Dialects -now have a TypeCompiler subclass which uses the same visit_XXX model as -other compilers. - -3. the "ischema_names" and "colspecs" dictionaries are now required members on -the Dialect class. - -4. The names of types within dialects are now important. If a dialect-specific type -is a subclass of an existing generic type and is only provided for bind/result behavior, -the current mixed case naming can remain, i.e. _PGNumeric for Numeric - in this case, -end users would never need to use _PGNumeric directly. However, if a dialect-specific -type is specifying a type *or* arguments that are not present generically, it should -match the real name of the type on that backend, in uppercase. E.g. postgresql.INET, -mysql.ENUM, postgresql.ARRAY. - -Or follow this handy flowchart: - - is the type meant to provide bind/result is the type the same name as an - behavior to a generic type (i.e. MixedCase) ---- no ---> UPPERCASE type in types.py ? - type in types.py ? | | - | no yes - yes | | - | | does your type need special - | +<--- yes --- behavior or arguments ? - | | | - | | no - name the type using | | - _MixedCase, i.e. v V - _OracleBoolean. it name the type don't make a - stays private to the dialect identically as that type, make sure the dialect's - and is invoked *only* via within the DB, base.py imports the types.py - the colspecs dict. using UPPERCASE UPPERCASE name into its namespace - | (i.e. BIT, NCHAR, INTERVAL). - | Users can import it. - | | - v v - subclass the closest is the name of this type - MixedCase type types.py, identical to an UPPERCASE - i.e. <--- no ------- name in types.py ? - class _DateTime(types.DateTime), - class DATETIME2(types.DateTime), | - class BIT(types.TypeEngine). yes - | - v - the type should - subclass the - UPPERCASE - type in types.py - (i.e. class BLOB(types.BLOB)) - - -Example 1. pysqlite needs bind/result processing for the DateTime type in types.py, -which applies to all DateTimes and subclasses. It's named _SLDateTime and -subclasses types.DateTime. - -Example 2. MS-SQL has a TIME type which takes a non-standard "precision" argument -that is rendered within DDL. So it's named TIME in the MS-SQL dialect's base.py, -and subclasses types.TIME. Users can then say mssql.TIME(precision=10). - -Example 3. MS-SQL dialects also need special bind/result processing for date -But its DATE type doesn't render DDL differently than that of a plain -DATE, i.e. it takes no special arguments. Therefore we are just adding behavior -to types.Date, so it's named _MSDate in the MS-SQL dialect's base.py, and subclasses -types.Date. - -Example 4. MySQL has a SET type, there's no analogue for this in types.py. So -MySQL names it SET in the dialect's base.py, and it subclasses types.String, since -it ultimately deals with strings. - -Example 5. Postgresql has a DATETIME type. The DBAPIs handle dates correctly, -and no special arguments are used in PG's DDL beyond what types.py provides. -Postgresql dialect therefore imports types.DATETIME into its base.py. - -Ideally one should be able to specify a schema using names imported completely from a -dialect, all matching the real name on that backend: - - from sqlalchemy.dialects.postgresql import base as pg - - t = Table('mytable', metadata, - Column('id', pg.INTEGER, primary_key=True), - Column('name', pg.VARCHAR(300)), - Column('inetaddr', pg.INET) - ) - -where above, the INTEGER and VARCHAR types are ultimately from sqlalchemy.types, -but the PG dialect makes them available in its own namespace. - -5. "colspecs" now is a dictionary of generic or uppercased types from sqlalchemy.types -linked to types specified in the dialect. Again, if a type in the dialect does not -specify any special behavior for bind_processor() or result_processor() and does not -indicate a special type only available in this database, it must be *removed* from the -module and from this dictionary. - -6. "ischema_names" indicates string descriptions of types as returned from the database -linked to TypeEngine classes. - - a. The string name should be matched to the most specific type possible within - sqlalchemy.types, unless there is no matching type within sqlalchemy.types in which - case it points to a dialect type. *It doesn't matter* if the dialect has its - own subclass of that type with special bind/result behavior - reflect to the types.py - UPPERCASE type as much as possible. With very few exceptions, all types - should reflect to an UPPERCASE type. - - b. If the dialect contains a matching dialect-specific type that takes extra arguments - which the generic one does not, then point to the dialect-specific type. E.g. - mssql.VARCHAR takes a "collation" parameter which should be preserved. - -5. DDL, or what was formerly issued by "get_col_spec()", is now handled exclusively by -a subclass of compiler.GenericTypeCompiler. - - a. your TypeCompiler class will receive generic and uppercase types from - sqlalchemy.types. Do not assume the presence of dialect-specific attributes on - these types. - - b. the visit_UPPERCASE methods on GenericTypeCompiler should *not* be overridden with - methods that produce a different DDL name. Uppercase types don't do any kind of - "guessing" - if visit_TIMESTAMP is called, the DDL should render as TIMESTAMP in - all cases, regardless of whether or not that type is legal on the backend database. - - c. the visit_UPPERCASE methods *should* be overridden with methods that add additional - arguments and flags to those types. - - d. the visit_lowercase methods are overridden to provide an interpretation of a generic - type. E.g. visit_large_binary() might be overridden to say "return self.visit_BIT(type_)". - - e. visit_lowercase methods should *never* render strings directly - it should always - be via calling a visit_UPPERCASE() method. diff --git a/python/sqlalchemy/engine/__init__.py b/python/sqlalchemy/engine/__init__.py deleted file mode 100644 index 0b0d5032..00000000 --- a/python/sqlalchemy/engine/__init__.py +++ /dev/null @@ -1,433 +0,0 @@ -# engine/__init__.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""SQL connections, SQL execution and high-level DB-API interface. - -The engine package defines the basic components used to interface -DB-API modules with higher-level statement construction, -connection-management, execution and result contexts. The primary -"entry point" class into this package is the Engine and its public -constructor ``create_engine()``. - -This package includes: - -base.py - Defines interface classes and some implementation classes which - comprise the basic components used to interface between a DB-API, - constructed and plain-text statements, connections, transactions, - and results. - -default.py - Contains default implementations of some of the components defined - in base.py. All current database dialects use the classes in - default.py as base classes for their own database-specific - implementations. - -strategies.py - The mechanics of constructing ``Engine`` objects are represented - here. Defines the ``EngineStrategy`` class which represents how - to go from arguments specified to the ``create_engine()`` - function, to a fully constructed ``Engine``, including - initialization of connection pooling, dialects, and specific - subclasses of ``Engine``. - -threadlocal.py - The ``TLEngine`` class is defined here, which is a subclass of - the generic ``Engine`` and tracks ``Connection`` and - ``Transaction`` objects against the identity of the current - thread. This allows certain programming patterns based around - the concept of a "thread-local connection" to be possible. - The ``TLEngine`` is created by using the "threadlocal" engine - strategy in conjunction with the ``create_engine()`` function. - -url.py - Defines the ``URL`` class which represents the individual - components of a string URL passed to ``create_engine()``. Also - defines a basic module-loading strategy for the dialect specifier - within a URL. -""" - -from .interfaces import ( - Connectable, - Dialect, - ExecutionContext, - ExceptionContext, - - # backwards compat - Compiled, - TypeCompiler -) - -from .base import ( - Connection, - Engine, - NestedTransaction, - RootTransaction, - Transaction, - TwoPhaseTransaction, -) - -from .result import ( - BaseRowProxy, - BufferedColumnResultProxy, - BufferedColumnRow, - BufferedRowResultProxy, - FullyBufferedResultProxy, - ResultProxy, - RowProxy, -) - -from .util import ( - connection_memoize -) - - -from . import util, strategies - -# backwards compat -from ..sql import ddl - -default_strategy = 'plain' - - -def create_engine(*args, **kwargs): - """Create a new :class:`.Engine` instance. - - The standard calling form is to send the URL as the - first positional argument, usually a string - that indicates database dialect and connection arguments:: - - - engine = create_engine("postgresql://scott:tiger@localhost/test") - - Additional keyword arguments may then follow it which - establish various options on the resulting :class:`.Engine` - and its underlying :class:`.Dialect` and :class:`.Pool` - constructs:: - - engine = create_engine("mysql://scott:tiger@hostname/dbname", - encoding='latin1', echo=True) - - The string form of the URL is - ``dialect[+driver]://user:password@host/dbname[?key=value..]``, where - ``dialect`` is a database name such as ``mysql``, ``oracle``, - ``postgresql``, etc., and ``driver`` the name of a DBAPI, such as - ``psycopg2``, ``pyodbc``, ``cx_oracle``, etc. Alternatively, - the URL can be an instance of :class:`~sqlalchemy.engine.url.URL`. - - ``**kwargs`` takes a wide variety of options which are routed - towards their appropriate components. Arguments may be specific to - the :class:`.Engine`, the underlying :class:`.Dialect`, as well as the - :class:`.Pool`. Specific dialects also accept keyword arguments that - are unique to that dialect. Here, we describe the parameters - that are common to most :func:`.create_engine()` usage. - - Once established, the newly resulting :class:`.Engine` will - request a connection from the underlying :class:`.Pool` once - :meth:`.Engine.connect` is called, or a method which depends on it - such as :meth:`.Engine.execute` is invoked. The :class:`.Pool` in turn - will establish the first actual DBAPI connection when this request - is received. The :func:`.create_engine` call itself does **not** - establish any actual DBAPI connections directly. - - .. seealso:: - - :doc:`/core/engines` - - :doc:`/dialects/index` - - :ref:`connections_toplevel` - - :param case_sensitive=True: if False, result column names - will match in a case-insensitive fashion, that is, - ``row['SomeColumn']``. - - .. versionchanged:: 0.8 - By default, result row names match case-sensitively. - In version 0.7 and prior, all matches were case-insensitive. - - :param connect_args: a dictionary of options which will be - passed directly to the DBAPI's ``connect()`` method as - additional keyword arguments. See the example - at :ref:`custom_dbapi_args`. - - :param convert_unicode=False: if set to True, sets - the default behavior of ``convert_unicode`` on the - :class:`.String` type to ``True``, regardless - of a setting of ``False`` on an individual - :class:`.String` type, thus causing all :class:`.String` - -based columns - to accommodate Python ``unicode`` objects. This flag - is useful as an engine-wide setting when using a - DBAPI that does not natively support Python - ``unicode`` objects and raises an error when - one is received (such as pyodbc with FreeTDS). - - See :class:`.String` for further details on - what this flag indicates. - - :param creator: a callable which returns a DBAPI connection. - This creation function will be passed to the underlying - connection pool and will be used to create all new database - connections. Usage of this function causes connection - parameters specified in the URL argument to be bypassed. - - :param echo=False: if True, the Engine will log all statements - as well as a repr() of their parameter lists to the engines - logger, which defaults to sys.stdout. The ``echo`` attribute of - ``Engine`` can be modified at any time to turn logging on and - off. If set to the string ``"debug"``, result rows will be - printed to the standard output as well. This flag ultimately - controls a Python logger; see :ref:`dbengine_logging` for - information on how to configure logging directly. - - :param echo_pool=False: if True, the connection pool will log - all checkouts/checkins to the logging stream, which defaults to - sys.stdout. This flag ultimately controls a Python logger; see - :ref:`dbengine_logging` for information on how to configure logging - directly. - - :param encoding: Defaults to ``utf-8``. This is the string - encoding used by SQLAlchemy for string encode/decode - operations which occur within SQLAlchemy, **outside of - the DBAPI.** Most modern DBAPIs feature some degree of - direct support for Python ``unicode`` objects, - what you see in Python 2 as a string of the form - ``u'some string'``. For those scenarios where the - DBAPI is detected as not supporting a Python ``unicode`` - object, this encoding is used to determine the - source/destination encoding. It is **not used** - for those cases where the DBAPI handles unicode - directly. - - To properly configure a system to accommodate Python - ``unicode`` objects, the DBAPI should be - configured to handle unicode to the greatest - degree as is appropriate - see - the notes on unicode pertaining to the specific - target database in use at :ref:`dialect_toplevel`. - - Areas where string encoding may need to be accommodated - outside of the DBAPI include zero or more of: - - * the values passed to bound parameters, corresponding to - the :class:`.Unicode` type or the :class:`.String` type - when ``convert_unicode`` is ``True``; - * the values returned in result set columns corresponding - to the :class:`.Unicode` type or the :class:`.String` - type when ``convert_unicode`` is ``True``; - * the string SQL statement passed to the DBAPI's - ``cursor.execute()`` method; - * the string names of the keys in the bound parameter - dictionary passed to the DBAPI's ``cursor.execute()`` - as well as ``cursor.setinputsizes()`` methods; - * the string column names retrieved from the DBAPI's - ``cursor.description`` attribute. - - When using Python 3, the DBAPI is required to support - *all* of the above values as Python ``unicode`` objects, - which in Python 3 are just known as ``str``. In Python 2, - the DBAPI does not specify unicode behavior at all, - so SQLAlchemy must make decisions for each of the above - values on a per-DBAPI basis - implementations are - completely inconsistent in their behavior. - - :param execution_options: Dictionary execution options which will - be applied to all connections. See - :meth:`~sqlalchemy.engine.Connection.execution_options` - - :param implicit_returning=True: When ``True``, a RETURNING- - compatible construct, if available, will be used to - fetch newly generated primary key values when a single row - INSERT statement is emitted with no existing returning() - clause. This applies to those backends which support RETURNING - or a compatible construct, including Postgresql, Firebird, Oracle, - Microsoft SQL Server. Set this to ``False`` to disable - the automatic usage of RETURNING. - - :param isolation_level: this string parameter is interpreted by various - dialects in order to affect the transaction isolation level of the - database connection. The parameter essentially accepts some subset of - these string arguments: ``"SERIALIZABLE"``, ``"REPEATABLE_READ"``, - ``"READ_COMMITTED"``, ``"READ_UNCOMMITTED"`` and ``"AUTOCOMMIT"``. - Behavior here varies per backend, and - individual dialects should be consulted directly. - - Note that the isolation level can also be set on a per-:class:`.Connection` - basis as well, using the - :paramref:`.Connection.execution_options.isolation_level` - feature. - - .. seealso:: - - :attr:`.Connection.default_isolation_level` - view default level - - :paramref:`.Connection.execution_options.isolation_level` - - set per :class:`.Connection` isolation level - - :ref:`SQLite Transaction Isolation ` - - :ref:`Postgresql Transaction Isolation ` - - :ref:`MySQL Transaction Isolation ` - - :ref:`session_transaction_isolation` - for the ORM - - :param label_length=None: optional integer value which limits - the size of dynamically generated column labels to that many - characters. If less than 6, labels are generated as - "_(counter)". If ``None``, the value of - ``dialect.max_identifier_length`` is used instead. - - :param listeners: A list of one or more - :class:`~sqlalchemy.interfaces.PoolListener` objects which will - receive connection pool events. - - :param logging_name: String identifier which will be used within - the "name" field of logging records generated within the - "sqlalchemy.engine" logger. Defaults to a hexstring of the - object's id. - - :param max_overflow=10: the number of connections to allow in - connection pool "overflow", that is connections that can be - opened above and beyond the pool_size setting, which defaults - to five. this is only used with :class:`~sqlalchemy.pool.QueuePool`. - - :param module=None: reference to a Python module object (the module - itself, not its string name). Specifies an alternate DBAPI module to - be used by the engine's dialect. Each sub-dialect references a - specific DBAPI which will be imported before first connect. This - parameter causes the import to be bypassed, and the given module to - be used instead. Can be used for testing of DBAPIs as well as to - inject "mock" DBAPI implementations into the :class:`.Engine`. - - :param paramstyle=None: The `paramstyle `_ - to use when rendering bound parameters. This style defaults to the - one recommended by the DBAPI itself, which is retrieved from the - ``.paramstyle`` attribute of the DBAPI. However, most DBAPIs accept - more than one paramstyle, and in particular it may be desirable - to change a "named" paramstyle into a "positional" one, or vice versa. - When this attribute is passed, it should be one of the values - ``"qmark"``, ``"numeric"``, ``"named"``, ``"format"`` or - ``"pyformat"``, and should correspond to a parameter style known - to be supported by the DBAPI in use. - - :param pool=None: an already-constructed instance of - :class:`~sqlalchemy.pool.Pool`, such as a - :class:`~sqlalchemy.pool.QueuePool` instance. If non-None, this - pool will be used directly as the underlying connection pool - for the engine, bypassing whatever connection parameters are - present in the URL argument. For information on constructing - connection pools manually, see :ref:`pooling_toplevel`. - - :param poolclass=None: a :class:`~sqlalchemy.pool.Pool` - subclass, which will be used to create a connection pool - instance using the connection parameters given in the URL. Note - this differs from ``pool`` in that you don't actually - instantiate the pool in this case, you just indicate what type - of pool to be used. - - :param pool_logging_name: String identifier which will be used within - the "name" field of logging records generated within the - "sqlalchemy.pool" logger. Defaults to a hexstring of the object's - id. - - :param pool_size=5: the number of connections to keep open - inside the connection pool. This used with - :class:`~sqlalchemy.pool.QueuePool` as - well as :class:`~sqlalchemy.pool.SingletonThreadPool`. With - :class:`~sqlalchemy.pool.QueuePool`, a ``pool_size`` setting - of 0 indicates no limit; to disable pooling, set ``poolclass`` to - :class:`~sqlalchemy.pool.NullPool` instead. - - :param pool_recycle=-1: this setting causes the pool to recycle - connections after the given number of seconds has passed. It - defaults to -1, or no timeout. For example, setting to 3600 - means connections will be recycled after one hour. Note that - MySQL in particular will disconnect automatically if no - activity is detected on a connection for eight hours (although - this is configurable with the MySQLDB connection itself and the - server configuration as well). - - :param pool_reset_on_return='rollback': set the "reset on return" - behavior of the pool, which is whether ``rollback()``, - ``commit()``, or nothing is called upon connections - being returned to the pool. See the docstring for - ``reset_on_return`` at :class:`.Pool`. - - .. versionadded:: 0.7.6 - - :param pool_timeout=30: number of seconds to wait before giving - up on getting a connection from the pool. This is only used - with :class:`~sqlalchemy.pool.QueuePool`. - - :param strategy='plain': selects alternate engine implementations. - Currently available are: - - * the ``threadlocal`` strategy, which is described in - :ref:`threadlocal_strategy`; - * the ``mock`` strategy, which dispatches all statement - execution to a function passed as the argument ``executor``. - See `example in the FAQ - `_. - - :param executor=None: a function taking arguments - ``(sql, *multiparams, **params)``, to which the ``mock`` strategy will - dispatch all statement execution. Used only by ``strategy='mock'``. - - """ - - strategy = kwargs.pop('strategy', default_strategy) - strategy = strategies.strategies[strategy] - return strategy.create(*args, **kwargs) - - -def engine_from_config(configuration, prefix='sqlalchemy.', **kwargs): - """Create a new Engine instance using a configuration dictionary. - - The dictionary is typically produced from a config file. - - The keys of interest to ``engine_from_config()`` should be prefixed, e.g. - ``sqlalchemy.url``, ``sqlalchemy.echo``, etc. The 'prefix' argument - indicates the prefix to be searched for. Each matching key (after the - prefix is stripped) is treated as though it were the corresponding keyword - argument to a :func:`.create_engine` call. - - The only required key is (assuming the default prefix) ``sqlalchemy.url``, - which provides the :ref:`database URL `. - - A select set of keyword arguments will be "coerced" to their - expected type based on string values. The set of arguments - is extensible per-dialect using the ``engine_config_types`` accessor. - - :param configuration: A dictionary (typically produced from a config file, - but this is not a requirement). Items whose keys start with the value - of 'prefix' will have that prefix stripped, and will then be passed to - :ref:`create_engine`. - - :param prefix: Prefix to match and then strip from keys - in 'configuration'. - - :param kwargs: Each keyword argument to ``engine_from_config()`` itself - overrides the corresponding item taken from the 'configuration' - dictionary. Keyword arguments should *not* be prefixed. - - """ - - options = dict((key[len(prefix):], configuration[key]) - for key in configuration - if key.startswith(prefix)) - options['_coerce_config'] = True - options.update(kwargs) - url = options.pop('url') - return create_engine(url, **options) - - -__all__ = ( - 'create_engine', - 'engine_from_config', -) diff --git a/python/sqlalchemy/engine/base.py b/python/sqlalchemy/engine/base.py deleted file mode 100644 index eaa435d4..00000000 --- a/python/sqlalchemy/engine/base.py +++ /dev/null @@ -1,2134 +0,0 @@ -# engine/base.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php -from __future__ import with_statement - -"""Defines :class:`.Connection` and :class:`.Engine`. - -""" - - -import sys -from .. import exc, util, log, interfaces -from ..sql import util as sql_util -from .interfaces import Connectable, ExceptionContext -from .util import _distill_params -import contextlib - - -class Connection(Connectable): - """Provides high-level functionality for a wrapped DB-API connection. - - Provides execution support for string-based SQL statements as well as - :class:`.ClauseElement`, :class:`.Compiled` and :class:`.DefaultGenerator` - objects. Provides a :meth:`begin` method to return :class:`.Transaction` - objects. - - The Connection object is **not** thread-safe. While a Connection can be - shared among threads using properly synchronized access, it is still - possible that the underlying DBAPI connection may not support shared - access between threads. Check the DBAPI documentation for details. - - The Connection object represents a single dbapi connection checked out - from the connection pool. In this state, the connection pool has no affect - upon the connection, including its expiration or timeout state. For the - connection pool to properly manage connections, connections should be - returned to the connection pool (i.e. ``connection.close()``) whenever the - connection is not in use. - - .. index:: - single: thread safety; Connection - - """ - - def __init__(self, engine, connection=None, close_with_result=False, - _branch_from=None, _execution_options=None, - _dispatch=None, - _has_events=None): - """Construct a new Connection. - - The constructor here is not public and is only called only by an - :class:`.Engine`. See :meth:`.Engine.connect` and - :meth:`.Engine.contextual_connect` methods. - - """ - self.engine = engine - self.dialect = engine.dialect - self.__branch_from = _branch_from - self.__branch = _branch_from is not None - - if _branch_from: - self.__connection = connection - self._execution_options = _execution_options - self._echo = _branch_from._echo - self.should_close_with_result = False - self.dispatch = _dispatch - self._has_events = _branch_from._has_events - else: - self.__connection = connection \ - if connection is not None else engine.raw_connection() - self.__transaction = None - self.__savepoint_seq = 0 - self.should_close_with_result = close_with_result - self.__invalid = False - self.__can_reconnect = True - self._echo = self.engine._should_log_info() - - if _has_events is None: - # if _has_events is sent explicitly as False, - # then don't join the dispatch of the engine; we don't - # want to handle any of the engine's events in that case. - self.dispatch = self.dispatch._join(engine.dispatch) - self._has_events = _has_events or ( - _has_events is None and engine._has_events) - - assert not _execution_options - self._execution_options = engine._execution_options - - if self._has_events or self.engine._has_events: - self.dispatch.engine_connect(self, self.__branch) - - def _branch(self): - """Return a new Connection which references this Connection's - engine and connection; but does not have close_with_result enabled, - and also whose close() method does nothing. - - The Core uses this very sparingly, only in the case of - custom SQL default functions that are to be INSERTed as the - primary key of a row where we need to get the value back, so we have - to invoke it distinctly - this is a very uncommon case. - - Userland code accesses _branch() when the connect() or - contextual_connect() methods are called. The branched connection - acts as much as possible like the parent, except that it stays - connected when a close() event occurs. - - """ - if self.__branch_from: - return self.__branch_from._branch() - else: - return self.engine._connection_cls( - self.engine, - self.__connection, - _branch_from=self, - _execution_options=self._execution_options, - _has_events=self._has_events, - _dispatch=self.dispatch) - - @property - def _root(self): - """return the 'root' connection. - - Returns 'self' if this connection is not a branch, else - returns the root connection from which we ultimately branched. - - """ - - if self.__branch_from: - return self.__branch_from - else: - return self - - def _clone(self): - """Create a shallow copy of this Connection. - - """ - c = self.__class__.__new__(self.__class__) - c.__dict__ = self.__dict__.copy() - return c - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - self.close() - - def execution_options(self, **opt): - """ Set non-SQL options for the connection which take effect - during execution. - - The method returns a copy of this :class:`.Connection` which references - the same underlying DBAPI connection, but also defines the given - execution options which will take effect for a call to - :meth:`execute`. As the new :class:`.Connection` references the same - underlying resource, it's usually a good idea to ensure that the copies - will be discarded immediately, which is implicit if used as in:: - - result = connection.execution_options(stream_results=True).\\ - execute(stmt) - - Note that any key/value can be passed to - :meth:`.Connection.execution_options`, and it will be stored in the - ``_execution_options`` dictionary of the :class:`.Connection`. It - is suitable for usage by end-user schemes to communicate with - event listeners, for example. - - The keywords that are currently recognized by SQLAlchemy itself - include all those listed under :meth:`.Executable.execution_options`, - as well as others that are specific to :class:`.Connection`. - - :param autocommit: Available on: Connection, statement. - When True, a COMMIT will be invoked after execution - when executed in 'autocommit' mode, i.e. when an explicit - transaction is not begun on the connection. Note that DBAPI - connections by default are always in a transaction - SQLAlchemy uses - rules applied to different kinds of statements to determine if - COMMIT will be invoked in order to provide its "autocommit" feature. - Typically, all INSERT/UPDATE/DELETE statements as well as - CREATE/DROP statements have autocommit behavior enabled; SELECT - constructs do not. Use this option when invoking a SELECT or other - specific SQL construct where COMMIT is desired (typically when - calling stored procedures and such), and an explicit - transaction is not in progress. - - :param compiled_cache: Available on: Connection. - A dictionary where :class:`.Compiled` objects - will be cached when the :class:`.Connection` compiles a clause - expression into a :class:`.Compiled` object. - It is the user's responsibility to - manage the size of this dictionary, which will have keys - corresponding to the dialect, clause element, the column - names within the VALUES or SET clause of an INSERT or UPDATE, - as well as the "batch" mode for an INSERT or UPDATE statement. - The format of this dictionary is not guaranteed to stay the - same in future releases. - - Note that the ORM makes use of its own "compiled" caches for - some operations, including flush operations. The caching - used by the ORM internally supersedes a cache dictionary - specified here. - - :param isolation_level: Available on: :class:`.Connection`. - Set the transaction isolation level for - the lifespan of this :class:`.Connection` object (*not* the - underyling DBAPI connection, for which the level is reset - to its original setting upon termination of this - :class:`.Connection` object). - - Valid values include - those string values accepted by the - :paramref:`.create_engine.isolation_level` - parameter passed to :func:`.create_engine`. These levels are - semi-database specific; see individual dialect documentation for - valid levels. - - Note that this option necessarily affects the underlying - DBAPI connection for the lifespan of the originating - :class:`.Connection`, and is not per-execution. This - setting is not removed until the underlying DBAPI connection - is returned to the connection pool, i.e. - the :meth:`.Connection.close` method is called. - - .. warning:: The ``isolation_level`` execution option should - **not** be used when a transaction is already established, that - is, the :meth:`.Connection.begin` method or similar has been - called. A database cannot change the isolation level on a - transaction in progress, and different DBAPIs and/or - SQLAlchemy dialects may implicitly roll back or commit - the transaction, or not affect the connection at all. - - .. versionchanged:: 0.9.9 A warning is emitted when the - ``isolation_level`` execution option is used after a - transaction has been started with :meth:`.Connection.begin` - or similar. - - .. note:: The ``isolation_level`` execution option is implicitly - reset if the :class:`.Connection` is invalidated, e.g. via - the :meth:`.Connection.invalidate` method, or if a - disconnection error occurs. The new connection produced after - the invalidation will not have the isolation level re-applied - to it automatically. - - .. seealso:: - - :paramref:`.create_engine.isolation_level` - - set per :class:`.Engine` isolation level - - :meth:`.Connection.get_isolation_level` - view current level - - :ref:`SQLite Transaction Isolation ` - - :ref:`Postgresql Transaction Isolation ` - - :ref:`MySQL Transaction Isolation ` - - :ref:`session_transaction_isolation` - for the ORM - - :param no_parameters: When ``True``, if the final parameter - list or dictionary is totally empty, will invoke the - statement on the cursor as ``cursor.execute(statement)``, - not passing the parameter collection at all. - Some DBAPIs such as psycopg2 and mysql-python consider - percent signs as significant only when parameters are - present; this option allows code to generate SQL - containing percent signs (and possibly other characters) - that is neutral regarding whether it's executed by the DBAPI - or piped into a script that's later invoked by - command line tools. - - .. versionadded:: 0.7.6 - - :param stream_results: Available on: Connection, statement. - Indicate to the dialect that results should be - "streamed" and not pre-buffered, if possible. This is a limitation - of many DBAPIs. The flag is currently understood only by the - psycopg2 dialect. - - """ - c = self._clone() - c._execution_options = c._execution_options.union(opt) - if self._has_events or self.engine._has_events: - self.dispatch.set_connection_execution_options(c, opt) - self.dialect.set_connection_execution_options(c, opt) - return c - - @property - def closed(self): - """Return True if this connection is closed.""" - - return '_Connection__connection' not in self.__dict__ \ - and not self.__can_reconnect - - @property - def invalidated(self): - """Return True if this connection was invalidated.""" - - return self._root.__invalid - - @property - def connection(self): - """The underlying DB-API connection managed by this Connection. - - .. seealso:: - - - :ref:`dbapi_connections` - - """ - - try: - return self.__connection - except AttributeError: - try: - return self._revalidate_connection() - except Exception as e: - self._handle_dbapi_exception(e, None, None, None, None) - - def get_isolation_level(self): - """Return the current isolation level assigned to this - :class:`.Connection`. - - This will typically be the default isolation level as determined - by the dialect, unless if the - :paramref:`.Connection.execution_options.isolation_level` - feature has been used to alter the isolation level on a - per-:class:`.Connection` basis. - - This attribute will typically perform a live SQL operation in order - to procure the current isolation level, so the value returned is the - actual level on the underlying DBAPI connection regardless of how - this state was set. Compare to the - :attr:`.Connection.default_isolation_level` accessor - which returns the dialect-level setting without performing a SQL - query. - - .. versionadded:: 0.9.9 - - .. seealso:: - - :attr:`.Connection.default_isolation_level` - view default level - - :paramref:`.create_engine.isolation_level` - - set per :class:`.Engine` isolation level - - :paramref:`.Connection.execution_options.isolation_level` - - set per :class:`.Connection` isolation level - - """ - try: - return self.dialect.get_isolation_level(self.connection) - except Exception as e: - self._handle_dbapi_exception(e, None, None, None, None) - - @property - def default_isolation_level(self): - """The default isolation level assigned to this :class:`.Connection`. - - This is the isolation level setting that the :class:`.Connection` - has when first procured via the :meth:`.Engine.connect` method. - This level stays in place until the - :paramref:`.Connection.execution_options.isolation_level` is used - to change the setting on a per-:class:`.Connection` basis. - - Unlike :meth:`.Connection.get_isolation_level`, this attribute is set - ahead of time from the first connection procured by the dialect, - so SQL query is not invoked when this accessor is called. - - .. versionadded:: 0.9.9 - - .. seealso:: - - :meth:`.Connection.get_isolation_level` - view current level - - :paramref:`.create_engine.isolation_level` - - set per :class:`.Engine` isolation level - - :paramref:`.Connection.execution_options.isolation_level` - - set per :class:`.Connection` isolation level - - """ - return self.dialect.default_isolation_level - - def _revalidate_connection(self): - if self.__branch_from: - return self.__branch_from._revalidate_connection() - if self.__can_reconnect and self.__invalid: - if self.__transaction is not None: - raise exc.InvalidRequestError( - "Can't reconnect until invalid " - "transaction is rolled back") - self.__connection = self.engine.raw_connection(_connection=self) - self.__invalid = False - return self.__connection - raise exc.ResourceClosedError("This Connection is closed") - - @property - def _connection_is_valid(self): - # use getattr() for is_valid to support exceptions raised in - # dialect initializer, where the connection is not wrapped in - # _ConnectionFairy - - return getattr(self.__connection, 'is_valid', False) - - @property - def _still_open_and_connection_is_valid(self): - return \ - not self.closed and \ - not self.invalidated and \ - getattr(self.__connection, 'is_valid', False) - - @property - def info(self): - """Info dictionary associated with the underlying DBAPI connection - referred to by this :class:`.Connection`, allowing user-defined - data to be associated with the connection. - - The data here will follow along with the DBAPI connection including - after it is returned to the connection pool and used again - in subsequent instances of :class:`.Connection`. - - """ - - return self.connection.info - - def connect(self): - """Returns a branched version of this :class:`.Connection`. - - The :meth:`.Connection.close` method on the returned - :class:`.Connection` can be called and this - :class:`.Connection` will remain open. - - This method provides usage symmetry with - :meth:`.Engine.connect`, including for usage - with context managers. - - """ - - return self._branch() - - def contextual_connect(self, **kwargs): - """Returns a branched version of this :class:`.Connection`. - - The :meth:`.Connection.close` method on the returned - :class:`.Connection` can be called and this - :class:`.Connection` will remain open. - - This method provides usage symmetry with - :meth:`.Engine.contextual_connect`, including for usage - with context managers. - - """ - - return self._branch() - - def invalidate(self, exception=None): - """Invalidate the underlying DBAPI connection associated with - this :class:`.Connection`. - - The underlying DBAPI connection is literally closed (if - possible), and is discarded. Its source connection pool will - typically lazily create a new connection to replace it. - - Upon the next use (where "use" typically means using the - :meth:`.Connection.execute` method or similar), - this :class:`.Connection` will attempt to - procure a new DBAPI connection using the services of the - :class:`.Pool` as a source of connectivty (e.g. a "reconnection"). - - If a transaction was in progress (e.g. the - :meth:`.Connection.begin` method has been called) when - :meth:`.Connection.invalidate` method is called, at the DBAPI - level all state associated with this transaction is lost, as - the DBAPI connection is closed. The :class:`.Connection` - will not allow a reconnection to proceed until the - :class:`.Transaction` object is ended, by calling the - :meth:`.Transaction.rollback` method; until that point, any attempt at - continuing to use the :class:`.Connection` will raise an - :class:`~sqlalchemy.exc.InvalidRequestError`. - This is to prevent applications from accidentally - continuing an ongoing transactional operations despite the - fact that the transaction has been lost due to an - invalidation. - - The :meth:`.Connection.invalidate` method, just like auto-invalidation, - will at the connection pool level invoke the - :meth:`.PoolEvents.invalidate` event. - - .. seealso:: - - :ref:`pool_connection_invalidation` - - """ - - if self.invalidated: - return - - if self.closed: - raise exc.ResourceClosedError("This Connection is closed") - - if self._root._connection_is_valid: - self._root.__connection.invalidate(exception) - del self._root.__connection - self._root.__invalid = True - - def detach(self): - """Detach the underlying DB-API connection from its connection pool. - - E.g.:: - - with engine.connect() as conn: - conn.detach() - conn.execute("SET search_path TO schema1, schema2") - - # work with connection - - # connection is fully closed (since we used "with:", can - # also call .close()) - - This :class:`.Connection` instance will remain usable. When closed - (or exited from a context manager context as above), - the DB-API connection will be literally closed and not - returned to its originating pool. - - This method can be used to insulate the rest of an application - from a modified state on a connection (such as a transaction - isolation level or similar). - - """ - - self.__connection.detach() - - def begin(self): - """Begin a transaction and return a transaction handle. - - The returned object is an instance of :class:`.Transaction`. - This object represents the "scope" of the transaction, - which completes when either the :meth:`.Transaction.rollback` - or :meth:`.Transaction.commit` method is called. - - Nested calls to :meth:`.begin` on the same :class:`.Connection` - will return new :class:`.Transaction` objects that represent - an emulated transaction within the scope of the enclosing - transaction, that is:: - - trans = conn.begin() # outermost transaction - trans2 = conn.begin() # "nested" - trans2.commit() # does nothing - trans.commit() # actually commits - - Calls to :meth:`.Transaction.commit` only have an effect - when invoked via the outermost :class:`.Transaction` object, though the - :meth:`.Transaction.rollback` method of any of the - :class:`.Transaction` objects will roll back the - transaction. - - See also: - - :meth:`.Connection.begin_nested` - use a SAVEPOINT - - :meth:`.Connection.begin_twophase` - use a two phase /XID transaction - - :meth:`.Engine.begin` - context manager available from - :class:`.Engine`. - - """ - if self.__branch_from: - return self.__branch_from.begin() - - if self.__transaction is None: - self.__transaction = RootTransaction(self) - return self.__transaction - else: - return Transaction(self, self.__transaction) - - def begin_nested(self): - """Begin a nested transaction and return a transaction handle. - - The returned object is an instance of :class:`.NestedTransaction`. - - Nested transactions require SAVEPOINT support in the - underlying database. Any transaction in the hierarchy may - ``commit`` and ``rollback``, however the outermost transaction - still controls the overall ``commit`` or ``rollback`` of the - transaction of a whole. - - See also :meth:`.Connection.begin`, - :meth:`.Connection.begin_twophase`. - """ - if self.__branch_from: - return self.__branch_from.begin_nested() - - if self.__transaction is None: - self.__transaction = RootTransaction(self) - else: - self.__transaction = NestedTransaction(self, self.__transaction) - return self.__transaction - - def begin_twophase(self, xid=None): - """Begin a two-phase or XA transaction and return a transaction - handle. - - The returned object is an instance of :class:`.TwoPhaseTransaction`, - which in addition to the methods provided by - :class:`.Transaction`, also provides a - :meth:`~.TwoPhaseTransaction.prepare` method. - - :param xid: the two phase transaction id. If not supplied, a - random id will be generated. - - See also :meth:`.Connection.begin`, - :meth:`.Connection.begin_twophase`. - - """ - - if self.__branch_from: - return self.__branch_from.begin_twophase(xid=xid) - - if self.__transaction is not None: - raise exc.InvalidRequestError( - "Cannot start a two phase transaction when a transaction " - "is already in progress.") - if xid is None: - xid = self.engine.dialect.create_xid() - self.__transaction = TwoPhaseTransaction(self, xid) - return self.__transaction - - def recover_twophase(self): - return self.engine.dialect.do_recover_twophase(self) - - def rollback_prepared(self, xid, recover=False): - self.engine.dialect.do_rollback_twophase(self, xid, recover=recover) - - def commit_prepared(self, xid, recover=False): - self.engine.dialect.do_commit_twophase(self, xid, recover=recover) - - def in_transaction(self): - """Return True if a transaction is in progress.""" - return self._root.__transaction is not None - - def _begin_impl(self, transaction): - assert not self.__branch_from - - if self._echo: - self.engine.logger.info("BEGIN (implicit)") - - if self._has_events or self.engine._has_events: - self.dispatch.begin(self) - - try: - self.engine.dialect.do_begin(self.connection) - if self.connection._reset_agent is None: - self.connection._reset_agent = transaction - except Exception as e: - self._handle_dbapi_exception(e, None, None, None, None) - - def _rollback_impl(self): - assert not self.__branch_from - - if self._has_events or self.engine._has_events: - self.dispatch.rollback(self) - - if self._still_open_and_connection_is_valid: - if self._echo: - self.engine.logger.info("ROLLBACK") - try: - self.engine.dialect.do_rollback(self.connection) - except Exception as e: - self._handle_dbapi_exception(e, None, None, None, None) - finally: - if not self.__invalid and \ - self.connection._reset_agent is self.__transaction: - self.connection._reset_agent = None - self.__transaction = None - else: - self.__transaction = None - - def _commit_impl(self, autocommit=False): - assert not self.__branch_from - - if self._has_events or self.engine._has_events: - self.dispatch.commit(self) - - if self._echo: - self.engine.logger.info("COMMIT") - try: - self.engine.dialect.do_commit(self.connection) - except Exception as e: - self._handle_dbapi_exception(e, None, None, None, None) - finally: - if not self.__invalid and \ - self.connection._reset_agent is self.__transaction: - self.connection._reset_agent = None - self.__transaction = None - - def _savepoint_impl(self, name=None): - assert not self.__branch_from - - if self._has_events or self.engine._has_events: - self.dispatch.savepoint(self, name) - - if name is None: - self.__savepoint_seq += 1 - name = 'sa_savepoint_%s' % self.__savepoint_seq - if self._still_open_and_connection_is_valid: - self.engine.dialect.do_savepoint(self, name) - return name - - def _rollback_to_savepoint_impl(self, name, context): - assert not self.__branch_from - - if self._has_events or self.engine._has_events: - self.dispatch.rollback_savepoint(self, name, context) - - if self._still_open_and_connection_is_valid: - self.engine.dialect.do_rollback_to_savepoint(self, name) - self.__transaction = context - - def _release_savepoint_impl(self, name, context): - assert not self.__branch_from - - if self._has_events or self.engine._has_events: - self.dispatch.release_savepoint(self, name, context) - - if self._still_open_and_connection_is_valid: - self.engine.dialect.do_release_savepoint(self, name) - self.__transaction = context - - def _begin_twophase_impl(self, transaction): - assert not self.__branch_from - - if self._echo: - self.engine.logger.info("BEGIN TWOPHASE (implicit)") - if self._has_events or self.engine._has_events: - self.dispatch.begin_twophase(self, transaction.xid) - - if self._still_open_and_connection_is_valid: - self.engine.dialect.do_begin_twophase(self, transaction.xid) - - if self.connection._reset_agent is None: - self.connection._reset_agent = transaction - - def _prepare_twophase_impl(self, xid): - assert not self.__branch_from - - if self._has_events or self.engine._has_events: - self.dispatch.prepare_twophase(self, xid) - - if self._still_open_and_connection_is_valid: - assert isinstance(self.__transaction, TwoPhaseTransaction) - self.engine.dialect.do_prepare_twophase(self, xid) - - def _rollback_twophase_impl(self, xid, is_prepared): - assert not self.__branch_from - - if self._has_events or self.engine._has_events: - self.dispatch.rollback_twophase(self, xid, is_prepared) - - if self._still_open_and_connection_is_valid: - assert isinstance(self.__transaction, TwoPhaseTransaction) - try: - self.engine.dialect.do_rollback_twophase( - self, xid, is_prepared) - finally: - if self.connection._reset_agent is self.__transaction: - self.connection._reset_agent = None - self.__transaction = None - else: - self.__transaction = None - - def _commit_twophase_impl(self, xid, is_prepared): - assert not self.__branch_from - - if self._has_events or self.engine._has_events: - self.dispatch.commit_twophase(self, xid, is_prepared) - - if self._still_open_and_connection_is_valid: - assert isinstance(self.__transaction, TwoPhaseTransaction) - try: - self.engine.dialect.do_commit_twophase(self, xid, is_prepared) - finally: - if self.connection._reset_agent is self.__transaction: - self.connection._reset_agent = None - self.__transaction = None - else: - self.__transaction = None - - def _autorollback(self): - if not self._root.in_transaction(): - self._root._rollback_impl() - - def close(self): - """Close this :class:`.Connection`. - - This results in a release of the underlying database - resources, that is, the DBAPI connection referenced - internally. The DBAPI connection is typically restored - back to the connection-holding :class:`.Pool` referenced - by the :class:`.Engine` that produced this - :class:`.Connection`. Any transactional state present on - the DBAPI connection is also unconditionally released via - the DBAPI connection's ``rollback()`` method, regardless - of any :class:`.Transaction` object that may be - outstanding with regards to this :class:`.Connection`. - - After :meth:`~.Connection.close` is called, the - :class:`.Connection` is permanently in a closed state, - and will allow no further operations. - - """ - if self.__branch_from: - try: - del self.__connection - except AttributeError: - pass - finally: - self.__can_reconnect = False - return - try: - conn = self.__connection - except AttributeError: - pass - else: - - conn.close() - if conn._reset_agent is self.__transaction: - conn._reset_agent = None - - # the close() process can end up invalidating us, - # as the pool will call our transaction as the "reset_agent" - # for rollback(), which can then cause an invalidation - if not self.__invalid: - del self.__connection - self.__can_reconnect = False - self.__transaction = None - - def scalar(self, object, *multiparams, **params): - """Executes and returns the first column of the first row. - - The underlying result/cursor is closed after execution. - """ - - return self.execute(object, *multiparams, **params).scalar() - - def execute(self, object, *multiparams, **params): - """Executes the a SQL statement construct and returns a - :class:`.ResultProxy`. - - :param object: The statement to be executed. May be - one of: - - * a plain string - * any :class:`.ClauseElement` construct that is also - a subclass of :class:`.Executable`, such as a - :func:`~.expression.select` construct - * a :class:`.FunctionElement`, such as that generated - by :data:`.func`, will be automatically wrapped in - a SELECT statement, which is then executed. - * a :class:`.DDLElement` object - * a :class:`.DefaultGenerator` object - * a :class:`.Compiled` object - - :param \*multiparams/\**params: represent bound parameter - values to be used in the execution. Typically, - the format is either a collection of one or more - dictionaries passed to \*multiparams:: - - conn.execute( - table.insert(), - {"id":1, "value":"v1"}, - {"id":2, "value":"v2"} - ) - - ...or individual key/values interpreted by \**params:: - - conn.execute( - table.insert(), id=1, value="v1" - ) - - In the case that a plain SQL string is passed, and the underlying - DBAPI accepts positional bind parameters, a collection of tuples - or individual values in \*multiparams may be passed:: - - conn.execute( - "INSERT INTO table (id, value) VALUES (?, ?)", - (1, "v1"), (2, "v2") - ) - - conn.execute( - "INSERT INTO table (id, value) VALUES (?, ?)", - 1, "v1" - ) - - Note above, the usage of a question mark "?" or other - symbol is contingent upon the "paramstyle" accepted by the DBAPI - in use, which may be any of "qmark", "named", "pyformat", "format", - "numeric". See `pep-249 `_ - for details on paramstyle. - - To execute a textual SQL statement which uses bound parameters in a - DBAPI-agnostic way, use the :func:`~.expression.text` construct. - - """ - if isinstance(object, util.string_types[0]): - return self._execute_text(object, multiparams, params) - try: - meth = object._execute_on_connection - except AttributeError: - raise exc.InvalidRequestError( - "Unexecutable object type: %s" % - type(object)) - else: - return meth(self, multiparams, params) - - def _execute_function(self, func, multiparams, params): - """Execute a sql.FunctionElement object.""" - - return self._execute_clauseelement(func.select(), - multiparams, params) - - def _execute_default(self, default, multiparams, params): - """Execute a schema.ColumnDefault object.""" - - if self._has_events or self.engine._has_events: - for fn in self.dispatch.before_execute: - default, multiparams, params = \ - fn(self, default, multiparams, params) - - try: - try: - conn = self.__connection - except AttributeError: - conn = self._revalidate_connection() - - dialect = self.dialect - ctx = dialect.execution_ctx_cls._init_default( - dialect, self, conn) - except Exception as e: - self._handle_dbapi_exception(e, None, None, None, None) - - ret = ctx._exec_default(default, None) - if self.should_close_with_result: - self.close() - - if self._has_events or self.engine._has_events: - self.dispatch.after_execute(self, - default, multiparams, params, ret) - - return ret - - def _execute_ddl(self, ddl, multiparams, params): - """Execute a schema.DDL object.""" - - if self._has_events or self.engine._has_events: - for fn in self.dispatch.before_execute: - ddl, multiparams, params = \ - fn(self, ddl, multiparams, params) - - dialect = self.dialect - - compiled = ddl.compile(dialect=dialect) - ret = self._execute_context( - dialect, - dialect.execution_ctx_cls._init_ddl, - compiled, - None, - compiled - ) - if self._has_events or self.engine._has_events: - self.dispatch.after_execute(self, - ddl, multiparams, params, ret) - return ret - - def _execute_clauseelement(self, elem, multiparams, params): - """Execute a sql.ClauseElement object.""" - - if self._has_events or self.engine._has_events: - for fn in self.dispatch.before_execute: - elem, multiparams, params = \ - fn(self, elem, multiparams, params) - - distilled_params = _distill_params(multiparams, params) - if distilled_params: - # note this is usually dict but we support RowProxy - # as well; but dict.keys() as an iterable is OK - keys = distilled_params[0].keys() - else: - keys = [] - - dialect = self.dialect - if 'compiled_cache' in self._execution_options: - key = dialect, elem, tuple(sorted(keys)), len(distilled_params) > 1 - compiled_sql = self._execution_options['compiled_cache'].get(key) - if compiled_sql is None: - compiled_sql = elem.compile( - dialect=dialect, column_keys=keys, - inline=len(distilled_params) > 1) - self._execution_options['compiled_cache'][key] = compiled_sql - else: - compiled_sql = elem.compile( - dialect=dialect, column_keys=keys, - inline=len(distilled_params) > 1) - - ret = self._execute_context( - dialect, - dialect.execution_ctx_cls._init_compiled, - compiled_sql, - distilled_params, - compiled_sql, distilled_params - ) - if self._has_events or self.engine._has_events: - self.dispatch.after_execute(self, - elem, multiparams, params, ret) - return ret - - def _execute_compiled(self, compiled, multiparams, params): - """Execute a sql.Compiled object.""" - - if self._has_events or self.engine._has_events: - for fn in self.dispatch.before_execute: - compiled, multiparams, params = \ - fn(self, compiled, multiparams, params) - - dialect = self.dialect - parameters = _distill_params(multiparams, params) - ret = self._execute_context( - dialect, - dialect.execution_ctx_cls._init_compiled, - compiled, - parameters, - compiled, parameters - ) - if self._has_events or self.engine._has_events: - self.dispatch.after_execute(self, - compiled, multiparams, params, ret) - return ret - - def _execute_text(self, statement, multiparams, params): - """Execute a string SQL statement.""" - - if self._has_events or self.engine._has_events: - for fn in self.dispatch.before_execute: - statement, multiparams, params = \ - fn(self, statement, multiparams, params) - - dialect = self.dialect - parameters = _distill_params(multiparams, params) - ret = self._execute_context( - dialect, - dialect.execution_ctx_cls._init_statement, - statement, - parameters, - statement, parameters - ) - if self._has_events or self.engine._has_events: - self.dispatch.after_execute(self, - statement, multiparams, params, ret) - return ret - - def _execute_context(self, dialect, constructor, - statement, parameters, - *args): - """Create an :class:`.ExecutionContext` and execute, returning - a :class:`.ResultProxy`.""" - - try: - try: - conn = self.__connection - except AttributeError: - conn = self._revalidate_connection() - - context = constructor(dialect, self, conn, *args) - except Exception as e: - self._handle_dbapi_exception( - e, - util.text_type(statement), parameters, - None, None) - - if context.compiled: - context.pre_exec() - - cursor, statement, parameters = context.cursor, \ - context.statement, \ - context.parameters - - if not context.executemany: - parameters = parameters[0] - - if self._has_events or self.engine._has_events: - for fn in self.dispatch.before_cursor_execute: - statement, parameters = \ - fn(self, cursor, statement, parameters, - context, context.executemany) - - if self._echo: - self.engine.logger.info(statement) - self.engine.logger.info( - "%r", - sql_util._repr_params(parameters, batches=10) - ) - - evt_handled = False - try: - if context.executemany: - if self.dialect._has_events: - for fn in self.dialect.dispatch.do_executemany: - if fn(cursor, statement, parameters, context): - evt_handled = True - break - if not evt_handled: - self.dialect.do_executemany( - cursor, - statement, - parameters, - context) - elif not parameters and context.no_parameters: - if self.dialect._has_events: - for fn in self.dialect.dispatch.do_execute_no_params: - if fn(cursor, statement, context): - evt_handled = True - break - if not evt_handled: - self.dialect.do_execute_no_params( - cursor, - statement, - context) - else: - if self.dialect._has_events: - for fn in self.dialect.dispatch.do_execute: - if fn(cursor, statement, parameters, context): - evt_handled = True - break - if not evt_handled: - self.dialect.do_execute( - cursor, - statement, - parameters, - context) - except Exception as e: - self._handle_dbapi_exception( - e, - statement, - parameters, - cursor, - context) - - if self._has_events or self.engine._has_events: - self.dispatch.after_cursor_execute(self, cursor, - statement, - parameters, - context, - context.executemany) - - if context.compiled: - context.post_exec() - - if context.is_crud: - result = context._setup_crud_result_proxy() - else: - result = context.get_result_proxy() - if result._metadata is None: - result._soft_close(_autoclose_connection=False) - - if context.should_autocommit and self._root.__transaction is None: - self._root._commit_impl(autocommit=True) - - if result._soft_closed and self.should_close_with_result: - self.close() - - return result - - def _cursor_execute(self, cursor, statement, parameters, context=None): - """Execute a statement + params on the given cursor. - - Adds appropriate logging and exception handling. - - This method is used by DefaultDialect for special-case - executions, such as for sequences and column defaults. - The path of statement execution in the majority of cases - terminates at _execute_context(). - - """ - if self._has_events or self.engine._has_events: - for fn in self.dispatch.before_cursor_execute: - statement, parameters = \ - fn(self, cursor, statement, parameters, - context, - False) - - if self._echo: - self.engine.logger.info(statement) - self.engine.logger.info("%r", parameters) - try: - for fn in () if not self.dialect._has_events \ - else self.dialect.dispatch.do_execute: - if fn(cursor, statement, parameters, context): - break - else: - self.dialect.do_execute( - cursor, - statement, - parameters, - context) - except Exception as e: - self._handle_dbapi_exception( - e, - statement, - parameters, - cursor, - context) - - if self._has_events or self.engine._has_events: - self.dispatch.after_cursor_execute(self, cursor, - statement, - parameters, - context, - False) - - def _safe_close_cursor(self, cursor): - """Close the given cursor, catching exceptions - and turning into log warnings. - - """ - try: - cursor.close() - except Exception: - # log the error through the connection pool's logger. - self.engine.pool.logger.error( - "Error closing cursor", exc_info=True) - - _reentrant_error = False - _is_disconnect = False - - def _handle_dbapi_exception(self, - e, - statement, - parameters, - cursor, - context): - exc_info = sys.exc_info() - - if context and context.exception is None: - context.exception = e - - if not self._is_disconnect: - self._is_disconnect = \ - isinstance(e, self.dialect.dbapi.Error) and \ - not self.closed and \ - self.dialect.is_disconnect( - e, - self.__connection if not self.invalidated else None, - cursor) - if context: - context.is_disconnect = self._is_disconnect - - invalidate_pool_on_disconnect = True - - if self._reentrant_error: - util.raise_from_cause( - exc.DBAPIError.instance(statement, - parameters, - e, - self.dialect.dbapi.Error, - dialect=self.dialect), - exc_info - ) - self._reentrant_error = True - try: - # non-DBAPI error - if we already got a context, - # or there's no string statement, don't wrap it - should_wrap = isinstance(e, self.dialect.dbapi.Error) or \ - (statement is not None and context is None) - - if should_wrap: - sqlalchemy_exception = exc.DBAPIError.instance( - statement, - parameters, - e, - self.dialect.dbapi.Error, - connection_invalidated=self._is_disconnect, - dialect=self.dialect) - else: - sqlalchemy_exception = None - - newraise = None - - if (self._has_events or self.engine._has_events) and \ - not self._execution_options.get( - 'skip_user_error_events', False): - # legacy dbapi_error event - if should_wrap and context: - self.dispatch.dbapi_error(self, - cursor, - statement, - parameters, - context, - e) - - # new handle_error event - ctx = ExceptionContextImpl( - e, sqlalchemy_exception, self.engine, - self, cursor, statement, - parameters, context, self._is_disconnect) - - for fn in self.dispatch.handle_error: - try: - # handler returns an exception; - # call next handler in a chain - per_fn = fn(ctx) - if per_fn is not None: - ctx.chained_exception = newraise = per_fn - except Exception as _raised: - # handler raises an exception - stop processing - newraise = _raised - break - - if sqlalchemy_exception and \ - self._is_disconnect != ctx.is_disconnect: - sqlalchemy_exception.connection_invalidated = \ - self._is_disconnect = ctx.is_disconnect - - # set up potentially user-defined value for - # invalidate pool. - invalidate_pool_on_disconnect = \ - ctx.invalidate_pool_on_disconnect - - if should_wrap and context: - context.handle_dbapi_exception(e) - - if not self._is_disconnect: - if cursor: - self._safe_close_cursor(cursor) - self._autorollback() - - if newraise: - util.raise_from_cause(newraise, exc_info) - elif should_wrap: - util.raise_from_cause( - sqlalchemy_exception, - exc_info - ) - else: - util.reraise(*exc_info) - - finally: - del self._reentrant_error - if self._is_disconnect: - del self._is_disconnect - if not self.invalidated: - dbapi_conn_wrapper = self.__connection - if invalidate_pool_on_disconnect: - self.engine.pool._invalidate(dbapi_conn_wrapper, e) - self.invalidate(e) - if self.should_close_with_result: - self.close() - - @classmethod - def _handle_dbapi_exception_noconnection(cls, e, dialect, engine): - - exc_info = sys.exc_info() - - is_disconnect = dialect.is_disconnect(e, None, None) - - should_wrap = isinstance(e, dialect.dbapi.Error) - - if should_wrap: - sqlalchemy_exception = exc.DBAPIError.instance( - None, - None, - e, - dialect.dbapi.Error, - connection_invalidated=is_disconnect) - else: - sqlalchemy_exception = None - - newraise = None - - if engine._has_events: - ctx = ExceptionContextImpl( - e, sqlalchemy_exception, engine, None, None, None, - None, None, is_disconnect) - for fn in engine.dispatch.handle_error: - try: - # handler returns an exception; - # call next handler in a chain - per_fn = fn(ctx) - if per_fn is not None: - ctx.chained_exception = newraise = per_fn - except Exception as _raised: - # handler raises an exception - stop processing - newraise = _raised - break - - if sqlalchemy_exception and \ - is_disconnect != ctx.is_disconnect: - sqlalchemy_exception.connection_invalidated = \ - is_disconnect = ctx.is_disconnect - - if newraise: - util.raise_from_cause(newraise, exc_info) - elif should_wrap: - util.raise_from_cause( - sqlalchemy_exception, - exc_info - ) - else: - util.reraise(*exc_info) - - def default_schema_name(self): - return self.engine.dialect.get_default_schema_name(self) - - def transaction(self, callable_, *args, **kwargs): - """Execute the given function within a transaction boundary. - - The function is passed this :class:`.Connection` - as the first argument, followed by the given \*args and \**kwargs, - e.g.:: - - def do_something(conn, x, y): - conn.execute("some statement", {'x':x, 'y':y}) - - conn.transaction(do_something, 5, 10) - - The operations inside the function are all invoked within the - context of a single :class:`.Transaction`. - Upon success, the transaction is committed. If an - exception is raised, the transaction is rolled back - before propagating the exception. - - .. note:: - - The :meth:`.transaction` method is superseded by - the usage of the Python ``with:`` statement, which can - be used with :meth:`.Connection.begin`:: - - with conn.begin(): - conn.execute("some statement", {'x':5, 'y':10}) - - As well as with :meth:`.Engine.begin`:: - - with engine.begin() as conn: - conn.execute("some statement", {'x':5, 'y':10}) - - See also: - - :meth:`.Engine.begin` - engine-level transactional - context - - :meth:`.Engine.transaction` - engine-level version of - :meth:`.Connection.transaction` - - """ - - trans = self.begin() - try: - ret = self.run_callable(callable_, *args, **kwargs) - trans.commit() - return ret - except: - with util.safe_reraise(): - trans.rollback() - - def run_callable(self, callable_, *args, **kwargs): - """Given a callable object or function, execute it, passing - a :class:`.Connection` as the first argument. - - The given \*args and \**kwargs are passed subsequent - to the :class:`.Connection` argument. - - This function, along with :meth:`.Engine.run_callable`, - allows a function to be run with a :class:`.Connection` - or :class:`.Engine` object without the need to know - which one is being dealt with. - - """ - return callable_(self, *args, **kwargs) - - def _run_visitor(self, visitorcallable, element, **kwargs): - visitorcallable(self.dialect, self, - **kwargs).traverse_single(element) - - -class ExceptionContextImpl(ExceptionContext): - """Implement the :class:`.ExceptionContext` interface.""" - - def __init__(self, exception, sqlalchemy_exception, - engine, connection, cursor, statement, parameters, - context, is_disconnect): - self.engine = engine - self.connection = connection - self.sqlalchemy_exception = sqlalchemy_exception - self.original_exception = exception - self.execution_context = context - self.statement = statement - self.parameters = parameters - self.is_disconnect = is_disconnect - - -class Transaction(object): - """Represent a database transaction in progress. - - The :class:`.Transaction` object is procured by - calling the :meth:`~.Connection.begin` method of - :class:`.Connection`:: - - from sqlalchemy import create_engine - engine = create_engine("postgresql://scott:tiger@localhost/test") - connection = engine.connect() - trans = connection.begin() - connection.execute("insert into x (a, b) values (1, 2)") - trans.commit() - - The object provides :meth:`.rollback` and :meth:`.commit` - methods in order to control transaction boundaries. It - also implements a context manager interface so that - the Python ``with`` statement can be used with the - :meth:`.Connection.begin` method:: - - with connection.begin(): - connection.execute("insert into x (a, b) values (1, 2)") - - The Transaction object is **not** threadsafe. - - See also: :meth:`.Connection.begin`, :meth:`.Connection.begin_twophase`, - :meth:`.Connection.begin_nested`. - - .. index:: - single: thread safety; Transaction - """ - - def __init__(self, connection, parent): - self.connection = connection - self._actual_parent = parent - self.is_active = True - - @property - def _parent(self): - return self._actual_parent or self - - def close(self): - """Close this :class:`.Transaction`. - - If this transaction is the base transaction in a begin/commit - nesting, the transaction will rollback(). Otherwise, the - method returns. - - This is used to cancel a Transaction without affecting the scope of - an enclosing transaction. - - """ - if not self._parent.is_active: - return - if self._parent is self: - self.rollback() - - def rollback(self): - """Roll back this :class:`.Transaction`. - - """ - if not self._parent.is_active: - return - self._do_rollback() - self.is_active = False - - def _do_rollback(self): - self._parent.rollback() - - def commit(self): - """Commit this :class:`.Transaction`.""" - - if not self._parent.is_active: - raise exc.InvalidRequestError("This transaction is inactive") - self._do_commit() - self.is_active = False - - def _do_commit(self): - pass - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - if type is None and self.is_active: - try: - self.commit() - except: - with util.safe_reraise(): - self.rollback() - else: - self.rollback() - - -class RootTransaction(Transaction): - def __init__(self, connection): - super(RootTransaction, self).__init__(connection, None) - self.connection._begin_impl(self) - - def _do_rollback(self): - if self.is_active: - self.connection._rollback_impl() - - def _do_commit(self): - if self.is_active: - self.connection._commit_impl() - - -class NestedTransaction(Transaction): - """Represent a 'nested', or SAVEPOINT transaction. - - A new :class:`.NestedTransaction` object may be procured - using the :meth:`.Connection.begin_nested` method. - - The interface is the same as that of :class:`.Transaction`. - - """ - - def __init__(self, connection, parent): - super(NestedTransaction, self).__init__(connection, parent) - self._savepoint = self.connection._savepoint_impl() - - def _do_rollback(self): - if self.is_active: - self.connection._rollback_to_savepoint_impl( - self._savepoint, self._parent) - - def _do_commit(self): - if self.is_active: - self.connection._release_savepoint_impl( - self._savepoint, self._parent) - - -class TwoPhaseTransaction(Transaction): - """Represent a two-phase transaction. - - A new :class:`.TwoPhaseTransaction` object may be procured - using the :meth:`.Connection.begin_twophase` method. - - The interface is the same as that of :class:`.Transaction` - with the addition of the :meth:`prepare` method. - - """ - - def __init__(self, connection, xid): - super(TwoPhaseTransaction, self).__init__(connection, None) - self._is_prepared = False - self.xid = xid - self.connection._begin_twophase_impl(self) - - def prepare(self): - """Prepare this :class:`.TwoPhaseTransaction`. - - After a PREPARE, the transaction can be committed. - - """ - if not self._parent.is_active: - raise exc.InvalidRequestError("This transaction is inactive") - self.connection._prepare_twophase_impl(self.xid) - self._is_prepared = True - - def _do_rollback(self): - self.connection._rollback_twophase_impl(self.xid, self._is_prepared) - - def _do_commit(self): - self.connection._commit_twophase_impl(self.xid, self._is_prepared) - - -class Engine(Connectable, log.Identified): - """ - Connects a :class:`~sqlalchemy.pool.Pool` and - :class:`~sqlalchemy.engine.interfaces.Dialect` together to provide a - source of database connectivity and behavior. - - An :class:`.Engine` object is instantiated publicly using the - :func:`~sqlalchemy.create_engine` function. - - See also: - - :doc:`/core/engines` - - :ref:`connections_toplevel` - - """ - - _execution_options = util.immutabledict() - _has_events = False - _connection_cls = Connection - - def __init__(self, pool, dialect, url, - logging_name=None, echo=None, proxy=None, - execution_options=None - ): - self.pool = pool - self.url = url - self.dialect = dialect - self.pool._dialect = dialect - if logging_name: - self.logging_name = logging_name - self.echo = echo - self.engine = self - log.instance_logger(self, echoflag=echo) - if proxy: - interfaces.ConnectionProxy._adapt_listener(self, proxy) - if execution_options: - self.update_execution_options(**execution_options) - - def update_execution_options(self, **opt): - """Update the default execution_options dictionary - of this :class:`.Engine`. - - The given keys/values in \**opt are added to the - default execution options that will be used for - all connections. The initial contents of this dictionary - can be sent via the ``execution_options`` parameter - to :func:`.create_engine`. - - .. seealso:: - - :meth:`.Connection.execution_options` - - :meth:`.Engine.execution_options` - - """ - self._execution_options = \ - self._execution_options.union(opt) - self.dispatch.set_engine_execution_options(self, opt) - self.dialect.set_engine_execution_options(self, opt) - - def execution_options(self, **opt): - """Return a new :class:`.Engine` that will provide - :class:`.Connection` objects with the given execution options. - - The returned :class:`.Engine` remains related to the original - :class:`.Engine` in that it shares the same connection pool and - other state: - - * The :class:`.Pool` used by the new :class:`.Engine` is the - same instance. The :meth:`.Engine.dispose` method will replace - the connection pool instance for the parent engine as well - as this one. - * Event listeners are "cascaded" - meaning, the new :class:`.Engine` - inherits the events of the parent, and new events can be associated - with the new :class:`.Engine` individually. - * The logging configuration and logging_name is copied from the parent - :class:`.Engine`. - - The intent of the :meth:`.Engine.execution_options` method is - to implement "sharding" schemes where multiple :class:`.Engine` - objects refer to the same connection pool, but are differentiated - by options that would be consumed by a custom event:: - - primary_engine = create_engine("mysql://") - shard1 = primary_engine.execution_options(shard_id="shard1") - shard2 = primary_engine.execution_options(shard_id="shard2") - - Above, the ``shard1`` engine serves as a factory for - :class:`.Connection` objects that will contain the execution option - ``shard_id=shard1``, and ``shard2`` will produce :class:`.Connection` - objects that contain the execution option ``shard_id=shard2``. - - An event handler can consume the above execution option to perform - a schema switch or other operation, given a connection. Below - we emit a MySQL ``use`` statement to switch databases, at the same - time keeping track of which database we've established using the - :attr:`.Connection.info` dictionary, which gives us a persistent - storage space that follows the DBAPI connection:: - - from sqlalchemy import event - from sqlalchemy.engine import Engine - - shards = {"default": "base", shard_1: "db1", "shard_2": "db2"} - - @event.listens_for(Engine, "before_cursor_execute") - def _switch_shard(conn, cursor, stmt, - params, context, executemany): - shard_id = conn._execution_options.get('shard_id', "default") - current_shard = conn.info.get("current_shard", None) - - if current_shard != shard_id: - cursor.execute("use %s" % shards[shard_id]) - conn.info["current_shard"] = shard_id - - .. versionadded:: 0.8 - - .. seealso:: - - :meth:`.Connection.execution_options` - update execution options - on a :class:`.Connection` object. - - :meth:`.Engine.update_execution_options` - update the execution - options for a given :class:`.Engine` in place. - - """ - return OptionEngine(self, opt) - - @property - def name(self): - """String name of the :class:`~sqlalchemy.engine.interfaces.Dialect` - in use by this :class:`Engine`.""" - - return self.dialect.name - - @property - def driver(self): - """Driver name of the :class:`~sqlalchemy.engine.interfaces.Dialect` - in use by this :class:`Engine`.""" - - return self.dialect.driver - - echo = log.echo_property() - - def __repr__(self): - return 'Engine(%r)' % self.url - - def dispose(self): - """Dispose of the connection pool used by this :class:`.Engine`. - - This has the effect of fully closing all **currently checked in** - database connections. Connections that are still checked out - will **not** be closed, however they will no longer be associated - with this :class:`.Engine`, so when they are closed individually, - eventually the :class:`.Pool` which they are associated with will - be garbage collected and they will be closed out fully, if - not already closed on checkin. - - A new connection pool is created immediately after the old one has - been disposed. This new pool, like all SQLAlchemy connection pools, - does not make any actual connections to the database until one is - first requested, so as long as the :class:`.Engine` isn't used again, - no new connections will be made. - - .. seealso:: - - :ref:`engine_disposal` - - """ - self.pool.dispose() - self.pool = self.pool.recreate() - self.dispatch.engine_disposed(self) - - def _execute_default(self, default): - with self.contextual_connect() as conn: - return conn._execute_default(default, (), {}) - - @contextlib.contextmanager - def _optional_conn_ctx_manager(self, connection=None): - if connection is None: - with self.contextual_connect() as conn: - yield conn - else: - yield connection - - def _run_visitor(self, visitorcallable, element, - connection=None, **kwargs): - with self._optional_conn_ctx_manager(connection) as conn: - conn._run_visitor(visitorcallable, element, **kwargs) - - class _trans_ctx(object): - def __init__(self, conn, transaction, close_with_result): - self.conn = conn - self.transaction = transaction - self.close_with_result = close_with_result - - def __enter__(self): - return self.conn - - def __exit__(self, type, value, traceback): - if type is not None: - self.transaction.rollback() - else: - self.transaction.commit() - if not self.close_with_result: - self.conn.close() - - def begin(self, close_with_result=False): - """Return a context manager delivering a :class:`.Connection` - with a :class:`.Transaction` established. - - E.g.:: - - with engine.begin() as conn: - conn.execute("insert into table (x, y, z) values (1, 2, 3)") - conn.execute("my_special_procedure(5)") - - Upon successful operation, the :class:`.Transaction` - is committed. If an error is raised, the :class:`.Transaction` - is rolled back. - - The ``close_with_result`` flag is normally ``False``, and indicates - that the :class:`.Connection` will be closed when the operation - is complete. When set to ``True``, it indicates the - :class:`.Connection` is in "single use" mode, where the - :class:`.ResultProxy` returned by the first call to - :meth:`.Connection.execute` will close the :class:`.Connection` when - that :class:`.ResultProxy` has exhausted all result rows. - - .. versionadded:: 0.7.6 - - See also: - - :meth:`.Engine.connect` - procure a :class:`.Connection` from - an :class:`.Engine`. - - :meth:`.Connection.begin` - start a :class:`.Transaction` - for a particular :class:`.Connection`. - - """ - conn = self.contextual_connect(close_with_result=close_with_result) - try: - trans = conn.begin() - except: - with util.safe_reraise(): - conn.close() - return Engine._trans_ctx(conn, trans, close_with_result) - - def transaction(self, callable_, *args, **kwargs): - """Execute the given function within a transaction boundary. - - The function is passed a :class:`.Connection` newly procured - from :meth:`.Engine.contextual_connect` as the first argument, - followed by the given \*args and \**kwargs. - - e.g.:: - - def do_something(conn, x, y): - conn.execute("some statement", {'x':x, 'y':y}) - - engine.transaction(do_something, 5, 10) - - The operations inside the function are all invoked within the - context of a single :class:`.Transaction`. - Upon success, the transaction is committed. If an - exception is raised, the transaction is rolled back - before propagating the exception. - - .. note:: - - The :meth:`.transaction` method is superseded by - the usage of the Python ``with:`` statement, which can - be used with :meth:`.Engine.begin`:: - - with engine.begin() as conn: - conn.execute("some statement", {'x':5, 'y':10}) - - See also: - - :meth:`.Engine.begin` - engine-level transactional - context - - :meth:`.Connection.transaction` - connection-level version of - :meth:`.Engine.transaction` - - """ - - with self.contextual_connect() as conn: - return conn.transaction(callable_, *args, **kwargs) - - def run_callable(self, callable_, *args, **kwargs): - """Given a callable object or function, execute it, passing - a :class:`.Connection` as the first argument. - - The given \*args and \**kwargs are passed subsequent - to the :class:`.Connection` argument. - - This function, along with :meth:`.Connection.run_callable`, - allows a function to be run with a :class:`.Connection` - or :class:`.Engine` object without the need to know - which one is being dealt with. - - """ - with self.contextual_connect() as conn: - return conn.run_callable(callable_, *args, **kwargs) - - def execute(self, statement, *multiparams, **params): - """Executes the given construct and returns a :class:`.ResultProxy`. - - The arguments are the same as those used by - :meth:`.Connection.execute`. - - Here, a :class:`.Connection` is acquired using the - :meth:`~.Engine.contextual_connect` method, and the statement executed - with that connection. The returned :class:`.ResultProxy` is flagged - such that when the :class:`.ResultProxy` is exhausted and its - underlying cursor is closed, the :class:`.Connection` created here - will also be closed, which allows its associated DBAPI connection - resource to be returned to the connection pool. - - """ - - connection = self.contextual_connect(close_with_result=True) - return connection.execute(statement, *multiparams, **params) - - def scalar(self, statement, *multiparams, **params): - return self.execute(statement, *multiparams, **params).scalar() - - def _execute_clauseelement(self, elem, multiparams=None, params=None): - connection = self.contextual_connect(close_with_result=True) - return connection._execute_clauseelement(elem, multiparams, params) - - def _execute_compiled(self, compiled, multiparams, params): - connection = self.contextual_connect(close_with_result=True) - return connection._execute_compiled(compiled, multiparams, params) - - def connect(self, **kwargs): - """Return a new :class:`.Connection` object. - - The :class:`.Connection` object is a facade that uses a DBAPI - connection internally in order to communicate with the database. This - connection is procured from the connection-holding :class:`.Pool` - referenced by this :class:`.Engine`. When the - :meth:`~.Connection.close` method of the :class:`.Connection` object - is called, the underlying DBAPI connection is then returned to the - connection pool, where it may be used again in a subsequent call to - :meth:`~.Engine.connect`. - - """ - - return self._connection_cls(self, **kwargs) - - def contextual_connect(self, close_with_result=False, **kwargs): - """Return a :class:`.Connection` object which may be part of some - ongoing context. - - By default, this method does the same thing as :meth:`.Engine.connect`. - Subclasses of :class:`.Engine` may override this method - to provide contextual behavior. - - :param close_with_result: When True, the first :class:`.ResultProxy` - created by the :class:`.Connection` will call the - :meth:`.Connection.close` method of that connection as soon as any - pending result rows are exhausted. This is used to supply the - "connectionless execution" behavior provided by the - :meth:`.Engine.execute` method. - - """ - - return self._connection_cls( - self, - self._wrap_pool_connect(self.pool.connect, None), - close_with_result=close_with_result, - **kwargs) - - def table_names(self, schema=None, connection=None): - """Return a list of all table names available in the database. - - :param schema: Optional, retrieve names from a non-default schema. - - :param connection: Optional, use a specified connection. Default is - the ``contextual_connect`` for this ``Engine``. - """ - - with self._optional_conn_ctx_manager(connection) as conn: - if not schema: - schema = self.dialect.default_schema_name - return self.dialect.get_table_names(conn, schema) - - def has_table(self, table_name, schema=None): - """Return True if the given backend has a table of the given name. - - .. seealso:: - - :ref:`metadata_reflection_inspector` - detailed schema inspection - using the :class:`.Inspector` interface. - - :class:`.quoted_name` - used to pass quoting information along - with a schema identifier. - - """ - return self.run_callable(self.dialect.has_table, table_name, schema) - - def _wrap_pool_connect(self, fn, connection): - dialect = self.dialect - try: - return fn() - except dialect.dbapi.Error as e: - if connection is None: - Connection._handle_dbapi_exception_noconnection( - e, dialect, self) - else: - util.reraise(*sys.exc_info()) - - def raw_connection(self, _connection=None): - """Return a "raw" DBAPI connection from the connection pool. - - The returned object is a proxied version of the DBAPI - connection object used by the underlying driver in use. - The object will have all the same behavior as the real DBAPI - connection, except that its ``close()`` method will result in the - connection being returned to the pool, rather than being closed - for real. - - This method provides direct DBAPI connection access for - special situations when the API provided by :class:`.Connection` - is not needed. When a :class:`.Connection` object is already - present, the DBAPI connection is available using - the :attr:`.Connection.connection` accessor. - - .. seealso:: - - :ref:`dbapi_connections` - - """ - return self._wrap_pool_connect( - self.pool.unique_connection, _connection) - - -class OptionEngine(Engine): - def __init__(self, proxied, execution_options): - self._proxied = proxied - self.url = proxied.url - self.dialect = proxied.dialect - self.logging_name = proxied.logging_name - self.echo = proxied.echo - log.instance_logger(self, echoflag=self.echo) - self.dispatch = self.dispatch._join(proxied.dispatch) - self._execution_options = proxied._execution_options - self.update_execution_options(**execution_options) - - def _get_pool(self): - return self._proxied.pool - - def _set_pool(self, pool): - self._proxied.pool = pool - - pool = property(_get_pool, _set_pool) - - def _get_has_events(self): - return self._proxied._has_events or \ - self.__dict__.get('_has_events', False) - - def _set_has_events(self, value): - self.__dict__['_has_events'] = value - - _has_events = property(_get_has_events, _set_has_events) diff --git a/python/sqlalchemy/engine/default.py b/python/sqlalchemy/engine/default.py deleted file mode 100644 index 9a7b80bf..00000000 --- a/python/sqlalchemy/engine/default.py +++ /dev/null @@ -1,1023 +0,0 @@ -# engine/default.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Default implementations of per-dialect sqlalchemy.engine classes. - -These are semi-private implementation classes which are only of importance -to database dialect authors; dialects will usually use the classes here -as the base class for their own corresponding classes. - -""" - -import re -import random -from . import reflection, interfaces, result -from ..sql import compiler, expression -from .. import types as sqltypes -from .. import exc, util, pool, processors -import codecs -import weakref -from .. import event - -AUTOCOMMIT_REGEXP = re.compile( - r'\s*(?:UPDATE|INSERT|CREATE|DELETE|DROP|ALTER)', - re.I | re.UNICODE) - - -class DefaultDialect(interfaces.Dialect): - """Default implementation of Dialect""" - - statement_compiler = compiler.SQLCompiler - ddl_compiler = compiler.DDLCompiler - type_compiler = compiler.GenericTypeCompiler - preparer = compiler.IdentifierPreparer - supports_alter = True - - # the first value we'd get for an autoincrement - # column. - default_sequence_base = 1 - - # most DBAPIs happy with this for execute(). - # not cx_oracle. - execute_sequence_format = tuple - - supports_views = True - supports_sequences = False - sequences_optional = False - preexecute_autoincrement_sequences = False - postfetch_lastrowid = True - implicit_returning = False - - supports_right_nested_joins = True - - supports_native_enum = False - supports_native_boolean = False - - supports_simple_order_by_label = True - - engine_config_types = util.immutabledict([ - ('convert_unicode', util.bool_or_str('force')), - ('pool_timeout', util.asint), - ('echo', util.bool_or_str('debug')), - ('echo_pool', util.bool_or_str('debug')), - ('pool_recycle', util.asint), - ('pool_size', util.asint), - ('max_overflow', util.asint), - ('pool_threadlocal', util.asbool), - ]) - - # if the NUMERIC type - # returns decimal.Decimal. - # *not* the FLOAT type however. - supports_native_decimal = False - - if util.py3k: - supports_unicode_statements = True - supports_unicode_binds = True - returns_unicode_strings = True - description_encoding = None - else: - supports_unicode_statements = False - supports_unicode_binds = False - returns_unicode_strings = False - description_encoding = 'use_encoding' - - name = 'default' - - # length at which to truncate - # any identifier. - max_identifier_length = 9999 - - # length at which to truncate - # the name of an index. - # Usually None to indicate - # 'use max_identifier_length'. - # thanks to MySQL, sigh - max_index_name_length = None - - supports_sane_rowcount = True - supports_sane_multi_rowcount = True - dbapi_type_map = {} - colspecs = {} - default_paramstyle = 'named' - supports_default_values = False - supports_empty_insert = True - supports_multivalues_insert = False - - server_version_info = None - - construct_arguments = None - """Optional set of argument specifiers for various SQLAlchemy - constructs, typically schema items. - - To implement, establish as a series of tuples, as in:: - - construct_arguments = [ - (schema.Index, { - "using": False, - "where": None, - "ops": None - }) - ] - - If the above construct is established on the Postgresql dialect, - the :class:`.Index` construct will now accept the keyword arguments - ``postgresql_using``, ``postgresql_where``, nad ``postgresql_ops``. - Any other argument specified to the constructor of :class:`.Index` - which is prefixed with ``postgresql_`` will raise :class:`.ArgumentError`. - - A dialect which does not include a ``construct_arguments`` member will - not participate in the argument validation system. For such a dialect, - any argument name is accepted by all participating constructs, within - the namespace of arguments prefixed with that dialect name. The rationale - here is so that third-party dialects that haven't yet implemented this - feature continue to function in the old way. - - .. versionadded:: 0.9.2 - - .. seealso:: - - :class:`.DialectKWArgs` - implementing base class which consumes - :attr:`.DefaultDialect.construct_arguments` - - - """ - - # indicates symbol names are - # UPPERCASEd if they are case insensitive - # within the database. - # if this is True, the methods normalize_name() - # and denormalize_name() must be provided. - requires_name_normalize = False - - reflection_options = () - - dbapi_exception_translation_map = util.immutabledict() - """mapping used in the extremely unusual case that a DBAPI's - published exceptions don't actually have the __name__ that they - are linked towards. - - .. versionadded:: 1.0.5 - - """ - - def __init__(self, convert_unicode=False, - encoding='utf-8', paramstyle=None, dbapi=None, - implicit_returning=None, - supports_right_nested_joins=None, - case_sensitive=True, - supports_native_boolean=None, - label_length=None, **kwargs): - - if not getattr(self, 'ported_sqla_06', True): - util.warn( - "The %s dialect is not yet ported to the 0.6 format" % - self.name) - - self.convert_unicode = convert_unicode - self.encoding = encoding - self.positional = False - self._ischema = None - self.dbapi = dbapi - if paramstyle is not None: - self.paramstyle = paramstyle - elif self.dbapi is not None: - self.paramstyle = self.dbapi.paramstyle - else: - self.paramstyle = self.default_paramstyle - if implicit_returning is not None: - self.implicit_returning = implicit_returning - self.positional = self.paramstyle in ('qmark', 'format', 'numeric') - self.identifier_preparer = self.preparer(self) - self.type_compiler = self.type_compiler(self) - if supports_right_nested_joins is not None: - self.supports_right_nested_joins = supports_right_nested_joins - if supports_native_boolean is not None: - self.supports_native_boolean = supports_native_boolean - self.case_sensitive = case_sensitive - - if label_length and label_length > self.max_identifier_length: - raise exc.ArgumentError( - "Label length of %d is greater than this dialect's" - " maximum identifier length of %d" % - (label_length, self.max_identifier_length)) - self.label_length = label_length - - if self.description_encoding == 'use_encoding': - self._description_decoder = \ - processors.to_unicode_processor_factory( - encoding - ) - elif self.description_encoding is not None: - self._description_decoder = \ - processors.to_unicode_processor_factory( - self.description_encoding - ) - self._encoder = codecs.getencoder(self.encoding) - self._decoder = processors.to_unicode_processor_factory(self.encoding) - - @util.memoized_property - def _type_memos(self): - return weakref.WeakKeyDictionary() - - @property - def dialect_description(self): - return self.name + "+" + self.driver - - @classmethod - def get_pool_class(cls, url): - return getattr(cls, 'poolclass', pool.QueuePool) - - def initialize(self, connection): - try: - self.server_version_info = \ - self._get_server_version_info(connection) - except NotImplementedError: - self.server_version_info = None - try: - self.default_schema_name = \ - self._get_default_schema_name(connection) - except NotImplementedError: - self.default_schema_name = None - - try: - self.default_isolation_level = \ - self.get_isolation_level(connection.connection) - except NotImplementedError: - self.default_isolation_level = None - - self.returns_unicode_strings = self._check_unicode_returns(connection) - - if self.description_encoding is not None and \ - self._check_unicode_description(connection): - self._description_decoder = self.description_encoding = None - - self.do_rollback(connection.connection) - - def on_connect(self): - """return a callable which sets up a newly created DBAPI connection. - - This is used to set dialect-wide per-connection options such as - isolation modes, unicode modes, etc. - - If a callable is returned, it will be assembled into a pool listener - that receives the direct DBAPI connection, with all wrappers removed. - - If None is returned, no listener will be generated. - - """ - return None - - def _check_unicode_returns(self, connection, additional_tests=None): - if util.py2k and not self.supports_unicode_statements: - cast_to = util.binary_type - else: - cast_to = util.text_type - - if self.positional: - parameters = self.execute_sequence_format() - else: - parameters = {} - - def check_unicode(test): - statement = cast_to( - expression.select([test]).compile(dialect=self)) - try: - cursor = connection.connection.cursor() - connection._cursor_execute(cursor, statement, parameters) - row = cursor.fetchone() - cursor.close() - except exc.DBAPIError as de: - # note that _cursor_execute() will have closed the cursor - # if an exception is thrown. - util.warn("Exception attempting to " - "detect unicode returns: %r" % de) - return False - else: - return isinstance(row[0], util.text_type) - - tests = [ - # detect plain VARCHAR - expression.cast( - expression.literal_column("'test plain returns'"), - sqltypes.VARCHAR(60) - ), - # detect if there's an NVARCHAR type with different behavior - # available - expression.cast( - expression.literal_column("'test unicode returns'"), - sqltypes.Unicode(60) - ), - ] - - if additional_tests: - tests += additional_tests - - results = set([check_unicode(test) for test in tests]) - - if results.issuperset([True, False]): - return "conditional" - else: - return results == set([True]) - - def _check_unicode_description(self, connection): - # all DBAPIs on Py2K return cursor.description as encoded, - # until pypy2.1beta2 with sqlite, so let's just check it - - # it's likely others will start doing this too in Py2k. - - if util.py2k and not self.supports_unicode_statements: - cast_to = util.binary_type - else: - cast_to = util.text_type - - cursor = connection.connection.cursor() - try: - cursor.execute( - cast_to( - expression.select([ - expression.literal_column("'x'").label("some_label") - ]).compile(dialect=self) - ) - ) - return isinstance(cursor.description[0][0], util.text_type) - finally: - cursor.close() - - def type_descriptor(self, typeobj): - """Provide a database-specific :class:`.TypeEngine` object, given - the generic object which comes from the types module. - - This method looks for a dictionary called - ``colspecs`` as a class or instance-level variable, - and passes on to :func:`.types.adapt_type`. - - """ - return sqltypes.adapt_type(typeobj, self.colspecs) - - def reflecttable( - self, connection, table, include_columns, exclude_columns): - insp = reflection.Inspector.from_engine(connection) - return insp.reflecttable(table, include_columns, exclude_columns) - - def get_pk_constraint(self, conn, table_name, schema=None, **kw): - """Compatibility method, adapts the result of get_primary_keys() - for those dialects which don't implement get_pk_constraint(). - - """ - return { - 'constrained_columns': - self.get_primary_keys(conn, table_name, - schema=schema, **kw) - } - - def validate_identifier(self, ident): - if len(ident) > self.max_identifier_length: - raise exc.IdentifierError( - "Identifier '%s' exceeds maximum length of %d characters" % - (ident, self.max_identifier_length) - ) - - def connect(self, *cargs, **cparams): - return self.dbapi.connect(*cargs, **cparams) - - def create_connect_args(self, url): - opts = url.translate_connect_args() - opts.update(url.query) - return [[], opts] - - def set_engine_execution_options(self, engine, opts): - if 'isolation_level' in opts: - isolation_level = opts['isolation_level'] - - @event.listens_for(engine, "engine_connect") - def set_isolation(connection, branch): - if not branch: - self._set_connection_isolation(connection, isolation_level) - - def set_connection_execution_options(self, connection, opts): - if 'isolation_level' in opts: - self._set_connection_isolation(connection, opts['isolation_level']) - - def _set_connection_isolation(self, connection, level): - if connection.in_transaction(): - util.warn( - "Connection is already established with a Transaction; " - "setting isolation_level may implicitly rollback or commit " - "the existing transaction, or have no effect until " - "next transaction") - self.set_isolation_level(connection.connection, level) - connection.connection._connection_record.\ - finalize_callback.append(self.reset_isolation_level) - - def do_begin(self, dbapi_connection): - pass - - def do_rollback(self, dbapi_connection): - dbapi_connection.rollback() - - def do_commit(self, dbapi_connection): - dbapi_connection.commit() - - def do_close(self, dbapi_connection): - dbapi_connection.close() - - def create_xid(self): - """Create a random two-phase transaction ID. - - This id will be passed to do_begin_twophase(), do_rollback_twophase(), - do_commit_twophase(). Its format is unspecified. - """ - - return "_sa_%032x" % random.randint(0, 2 ** 128) - - def do_savepoint(self, connection, name): - connection.execute(expression.SavepointClause(name)) - - def do_rollback_to_savepoint(self, connection, name): - connection.execute(expression.RollbackToSavepointClause(name)) - - def do_release_savepoint(self, connection, name): - connection.execute(expression.ReleaseSavepointClause(name)) - - def do_executemany(self, cursor, statement, parameters, context=None): - cursor.executemany(statement, parameters) - - def do_execute(self, cursor, statement, parameters, context=None): - cursor.execute(statement, parameters) - - def do_execute_no_params(self, cursor, statement, context=None): - cursor.execute(statement) - - def is_disconnect(self, e, connection, cursor): - return False - - def reset_isolation_level(self, dbapi_conn): - # default_isolation_level is read from the first connection - # after the initial set of 'isolation_level', if any, so is - # the configured default of this dialect. - self.set_isolation_level(dbapi_conn, self.default_isolation_level) - - -class DefaultExecutionContext(interfaces.ExecutionContext): - isinsert = False - isupdate = False - isdelete = False - is_crud = False - isddl = False - executemany = False - compiled = None - statement = None - result_column_struct = None - _is_implicit_returning = False - _is_explicit_returning = False - - # a hook for SQLite's translation of - # result column names - _translate_colname = None - - @classmethod - def _init_ddl(cls, dialect, connection, dbapi_connection, compiled_ddl): - """Initialize execution context for a DDLElement construct.""" - - self = cls.__new__(cls) - self.root_connection = connection - self._dbapi_connection = dbapi_connection - self.dialect = connection.dialect - - self.compiled = compiled = compiled_ddl - self.isddl = True - - self.execution_options = compiled.statement._execution_options - if connection._execution_options: - self.execution_options = dict(self.execution_options) - self.execution_options.update(connection._execution_options) - - if not dialect.supports_unicode_statements: - self.unicode_statement = util.text_type(compiled) - self.statement = dialect._encoder(self.unicode_statement)[0] - else: - self.statement = self.unicode_statement = util.text_type(compiled) - - self.cursor = self.create_cursor() - self.compiled_parameters = [] - - if dialect.positional: - self.parameters = [dialect.execute_sequence_format()] - else: - self.parameters = [{}] - - return self - - @classmethod - def _init_compiled(cls, dialect, connection, dbapi_connection, - compiled, parameters): - """Initialize execution context for a Compiled construct.""" - - self = cls.__new__(cls) - self.root_connection = connection - self._dbapi_connection = dbapi_connection - self.dialect = connection.dialect - - self.compiled = compiled - - if not compiled.can_execute: - raise exc.ArgumentError("Not an executable clause") - - self.execution_options = compiled.statement._execution_options.union( - connection._execution_options) - - self.result_column_struct = ( - compiled._result_columns, compiled._ordered_columns) - - self.unicode_statement = util.text_type(compiled) - if not dialect.supports_unicode_statements: - self.statement = self.unicode_statement.encode( - self.dialect.encoding) - else: - self.statement = self.unicode_statement - - self.isinsert = compiled.isinsert - self.isupdate = compiled.isupdate - self.isdelete = compiled.isdelete - - if not parameters: - self.compiled_parameters = [compiled.construct_params()] - else: - self.compiled_parameters = \ - [compiled.construct_params(m, _group_number=grp) for - grp, m in enumerate(parameters)] - - self.executemany = len(parameters) > 1 - - self.cursor = self.create_cursor() - - if self.isinsert or self.isupdate or self.isdelete: - self.is_crud = True - self._is_explicit_returning = bool(compiled.statement._returning) - self._is_implicit_returning = bool( - compiled.returning and not compiled.statement._returning) - - if not self.isdelete: - if self.compiled.prefetch: - if self.executemany: - self._process_executemany_defaults() - else: - self._process_executesingle_defaults() - - processors = compiled._bind_processors - - # Convert the dictionary of bind parameter values - # into a dict or list to be sent to the DBAPI's - # execute() or executemany() method. - parameters = [] - if dialect.positional: - for compiled_params in self.compiled_parameters: - param = [] - for key in self.compiled.positiontup: - if key in processors: - param.append(processors[key](compiled_params[key])) - else: - param.append(compiled_params[key]) - parameters.append(dialect.execute_sequence_format(param)) - else: - encode = not dialect.supports_unicode_statements - for compiled_params in self.compiled_parameters: - - if encode: - param = dict( - ( - dialect._encoder(key)[0], - processors[key](compiled_params[key]) - if key in processors - else compiled_params[key] - ) - for key in compiled_params - ) - else: - param = dict( - ( - key, - processors[key](compiled_params[key]) - if key in processors - else compiled_params[key] - ) - for key in compiled_params - ) - - parameters.append(param) - self.parameters = dialect.execute_sequence_format(parameters) - - return self - - @classmethod - def _init_statement(cls, dialect, connection, dbapi_connection, - statement, parameters): - """Initialize execution context for a string SQL statement.""" - - self = cls.__new__(cls) - self.root_connection = connection - self._dbapi_connection = dbapi_connection - self.dialect = connection.dialect - - # plain text statement - self.execution_options = connection._execution_options - - if not parameters: - if self.dialect.positional: - self.parameters = [dialect.execute_sequence_format()] - else: - self.parameters = [{}] - elif isinstance(parameters[0], dialect.execute_sequence_format): - self.parameters = parameters - elif isinstance(parameters[0], dict): - if dialect.supports_unicode_statements: - self.parameters = parameters - else: - self.parameters = [ - dict((dialect._encoder(k)[0], d[k]) for k in d) - for d in parameters - ] or [{}] - else: - self.parameters = [dialect.execute_sequence_format(p) - for p in parameters] - - self.executemany = len(parameters) > 1 - - if not dialect.supports_unicode_statements and \ - isinstance(statement, util.text_type): - self.unicode_statement = statement - self.statement = dialect._encoder(statement)[0] - else: - self.statement = self.unicode_statement = statement - - self.cursor = self.create_cursor() - return self - - @classmethod - def _init_default(cls, dialect, connection, dbapi_connection): - """Initialize execution context for a ColumnDefault construct.""" - - self = cls.__new__(cls) - self.root_connection = connection - self._dbapi_connection = dbapi_connection - self.dialect = connection.dialect - self.execution_options = connection._execution_options - self.cursor = self.create_cursor() - return self - - @util.memoized_property - def engine(self): - return self.root_connection.engine - - @util.memoized_property - def postfetch_cols(self): - return self.compiled.postfetch - - @util.memoized_property - def prefetch_cols(self): - return self.compiled.prefetch - - @util.memoized_property - def returning_cols(self): - self.compiled.returning - - @util.memoized_property - def no_parameters(self): - return self.execution_options.get("no_parameters", False) - - @util.memoized_property - def should_autocommit(self): - autocommit = self.execution_options.get('autocommit', - not self.compiled and - self.statement and - expression.PARSE_AUTOCOMMIT - or False) - - if autocommit is expression.PARSE_AUTOCOMMIT: - return self.should_autocommit_text(self.unicode_statement) - else: - return autocommit - - def _execute_scalar(self, stmt, type_): - """Execute a string statement on the current cursor, returning a - scalar result. - - Used to fire off sequences, default phrases, and "select lastrowid" - types of statements individually or in the context of a parent INSERT - or UPDATE statement. - - """ - - conn = self.root_connection - if isinstance(stmt, util.text_type) and \ - not self.dialect.supports_unicode_statements: - stmt = self.dialect._encoder(stmt)[0] - - if self.dialect.positional: - default_params = self.dialect.execute_sequence_format() - else: - default_params = {} - - conn._cursor_execute(self.cursor, stmt, default_params, context=self) - r = self.cursor.fetchone()[0] - if type_ is not None: - # apply type post processors to the result - proc = type_._cached_result_processor( - self.dialect, - self.cursor.description[0][1] - ) - if proc: - return proc(r) - return r - - @property - def connection(self): - return self.root_connection._branch() - - def should_autocommit_text(self, statement): - return AUTOCOMMIT_REGEXP.match(statement) - - def create_cursor(self): - return self._dbapi_connection.cursor() - - def pre_exec(self): - pass - - def post_exec(self): - pass - - def get_result_processor(self, type_, colname, coltype): - """Return a 'result processor' for a given type as present in - cursor.description. - - This has a default implementation that dialects can override - for context-sensitive result type handling. - - """ - return type_._cached_result_processor(self.dialect, coltype) - - def get_lastrowid(self): - """return self.cursor.lastrowid, or equivalent, after an INSERT. - - This may involve calling special cursor functions, - issuing a new SELECT on the cursor (or a new one), - or returning a stored value that was - calculated within post_exec(). - - This function will only be called for dialects - which support "implicit" primary key generation, - keep preexecute_autoincrement_sequences set to False, - and when no explicit id value was bound to the - statement. - - The function is called once, directly after - post_exec() and before the transaction is committed - or ResultProxy is generated. If the post_exec() - method assigns a value to `self._lastrowid`, the - value is used in place of calling get_lastrowid(). - - Note that this method is *not* equivalent to the - ``lastrowid`` method on ``ResultProxy``, which is a - direct proxy to the DBAPI ``lastrowid`` accessor - in all cases. - - """ - return self.cursor.lastrowid - - def handle_dbapi_exception(self, e): - pass - - def get_result_proxy(self): - return result.ResultProxy(self) - - @property - def rowcount(self): - return self.cursor.rowcount - - def supports_sane_rowcount(self): - return self.dialect.supports_sane_rowcount - - def supports_sane_multi_rowcount(self): - return self.dialect.supports_sane_multi_rowcount - - def _setup_crud_result_proxy(self): - if self.isinsert and \ - not self.executemany: - if not self._is_implicit_returning and \ - not self.compiled.inline and \ - self.dialect.postfetch_lastrowid: - - self._setup_ins_pk_from_lastrowid() - - elif not self._is_implicit_returning: - self._setup_ins_pk_from_empty() - - result = self.get_result_proxy() - - if self.isinsert: - if self._is_implicit_returning: - row = result.fetchone() - self.returned_defaults = row - self._setup_ins_pk_from_implicit_returning(row) - result._soft_close(_autoclose_connection=False) - result._metadata = None - elif not self._is_explicit_returning: - result._soft_close(_autoclose_connection=False) - result._metadata = None - elif self.isupdate and self._is_implicit_returning: - row = result.fetchone() - self.returned_defaults = row - result._soft_close(_autoclose_connection=False) - result._metadata = None - - elif result._metadata is None: - # no results, get rowcount - # (which requires open cursor on some drivers - # such as kintersbasdb, mxodbc) - result.rowcount - result._soft_close(_autoclose_connection=False) - return result - - def _setup_ins_pk_from_lastrowid(self): - key_getter = self.compiled._key_getters_for_crud_column[2] - table = self.compiled.statement.table - compiled_params = self.compiled_parameters[0] - - lastrowid = self.get_lastrowid() - if lastrowid is not None: - autoinc_col = table._autoincrement_column - if autoinc_col is not None: - # apply type post processors to the lastrowid - proc = autoinc_col.type._cached_result_processor( - self.dialect, None) - if proc is not None: - lastrowid = proc(lastrowid) - self.inserted_primary_key = [ - lastrowid if c is autoinc_col else - compiled_params.get(key_getter(c), None) - for c in table.primary_key - ] - else: - # don't have a usable lastrowid, so - # do the same as _setup_ins_pk_from_empty - self.inserted_primary_key = [ - compiled_params.get(key_getter(c), None) - for c in table.primary_key - ] - - def _setup_ins_pk_from_empty(self): - key_getter = self.compiled._key_getters_for_crud_column[2] - table = self.compiled.statement.table - compiled_params = self.compiled_parameters[0] - self.inserted_primary_key = [ - compiled_params.get(key_getter(c), None) - for c in table.primary_key - ] - - def _setup_ins_pk_from_implicit_returning(self, row): - key_getter = self.compiled._key_getters_for_crud_column[2] - table = self.compiled.statement.table - compiled_params = self.compiled_parameters[0] - - self.inserted_primary_key = [ - row[col] if value is None else value - for col, value in [ - (col, compiled_params.get(key_getter(col), None)) - for col in table.primary_key - ] - ] - - def lastrow_has_defaults(self): - return (self.isinsert or self.isupdate) and \ - bool(self.compiled.postfetch) - - def set_input_sizes(self, translate=None, exclude_types=None): - """Given a cursor and ClauseParameters, call the appropriate - style of ``setinputsizes()`` on the cursor, using DB-API types - from the bind parameter's ``TypeEngine`` objects. - - This method only called by those dialects which require it, - currently cx_oracle. - - """ - - if not hasattr(self.compiled, 'bind_names'): - return - - types = dict( - (self.compiled.bind_names[bindparam], bindparam.type) - for bindparam in self.compiled.bind_names) - - if self.dialect.positional: - inputsizes = [] - for key in self.compiled.positiontup: - typeengine = types[key] - dbtype = typeengine.dialect_impl(self.dialect).\ - get_dbapi_type(self.dialect.dbapi) - if dbtype is not None and \ - (not exclude_types or dbtype not in exclude_types): - inputsizes.append(dbtype) - try: - self.cursor.setinputsizes(*inputsizes) - except Exception as e: - self.root_connection._handle_dbapi_exception( - e, None, None, None, self) - else: - inputsizes = {} - for key in self.compiled.bind_names.values(): - typeengine = types[key] - dbtype = typeengine.dialect_impl(self.dialect).\ - get_dbapi_type(self.dialect.dbapi) - if dbtype is not None and \ - (not exclude_types or dbtype not in exclude_types): - if translate: - key = translate.get(key, key) - if not self.dialect.supports_unicode_binds: - key = self.dialect._encoder(key)[0] - inputsizes[key] = dbtype - try: - self.cursor.setinputsizes(**inputsizes) - except Exception as e: - self.root_connection._handle_dbapi_exception( - e, None, None, None, self) - - def _exec_default(self, default, type_): - if default.is_sequence: - return self.fire_sequence(default, type_) - elif default.is_callable: - return default.arg(self) - elif default.is_clause_element: - # TODO: expensive branching here should be - # pulled into _exec_scalar() - conn = self.connection - c = expression.select([default.arg]).compile(bind=conn) - return conn._execute_compiled(c, (), {}).scalar() - else: - return default.arg - - def get_insert_default(self, column): - if column.default is None: - return None - else: - return self._exec_default(column.default, column.type) - - def get_update_default(self, column): - if column.onupdate is None: - return None - else: - return self._exec_default(column.onupdate, column.type) - - def _process_executemany_defaults(self): - key_getter = self.compiled._key_getters_for_crud_column[2] - - prefetch = self.compiled.prefetch - scalar_defaults = {} - - # pre-determine scalar Python-side defaults - # to avoid many calls of get_insert_default()/ - # get_update_default() - for c in prefetch: - if self.isinsert and c.default and c.default.is_scalar: - scalar_defaults[c] = c.default.arg - elif self.isupdate and c.onupdate and c.onupdate.is_scalar: - scalar_defaults[c] = c.onupdate.arg - - for param in self.compiled_parameters: - self.current_parameters = param - for c in prefetch: - if c in scalar_defaults: - val = scalar_defaults[c] - elif self.isinsert: - val = self.get_insert_default(c) - else: - val = self.get_update_default(c) - if val is not None: - param[key_getter(c)] = val - del self.current_parameters - - def _process_executesingle_defaults(self): - key_getter = self.compiled._key_getters_for_crud_column[2] - prefetch = self.compiled.prefetch - self.current_parameters = compiled_parameters = \ - self.compiled_parameters[0] - - for c in prefetch: - if self.isinsert: - if c.default and \ - not c.default.is_sequence and c.default.is_scalar: - val = c.default.arg - else: - val = self.get_insert_default(c) - else: - val = self.get_update_default(c) - - if val is not None: - compiled_parameters[key_getter(c)] = val - del self.current_parameters - - -DefaultDialect.execution_ctx_cls = DefaultExecutionContext diff --git a/python/sqlalchemy/engine/interfaces.py b/python/sqlalchemy/engine/interfaces.py deleted file mode 100644 index 3bad765d..00000000 --- a/python/sqlalchemy/engine/interfaces.py +++ /dev/null @@ -1,1152 +0,0 @@ -# engine/interfaces.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Define core interfaces used by the engine system.""" - -from .. import util, event - -# backwards compat -from ..sql.compiler import Compiled, TypeCompiler - - -class Dialect(object): - """Define the behavior of a specific database and DB-API combination. - - Any aspect of metadata definition, SQL query generation, - execution, result-set handling, or anything else which varies - between databases is defined under the general category of the - Dialect. The Dialect acts as a factory for other - database-specific object implementations including - ExecutionContext, Compiled, DefaultGenerator, and TypeEngine. - - All Dialects implement the following attributes: - - name - identifying name for the dialect from a DBAPI-neutral point of view - (i.e. 'sqlite') - - driver - identifying name for the dialect's DBAPI - - positional - True if the paramstyle for this Dialect is positional. - - paramstyle - the paramstyle to be used (some DB-APIs support multiple - paramstyles). - - convert_unicode - True if Unicode conversion should be applied to all ``str`` - types. - - encoding - type of encoding to use for unicode, usually defaults to - 'utf-8'. - - statement_compiler - a :class:`.Compiled` class used to compile SQL statements - - ddl_compiler - a :class:`.Compiled` class used to compile DDL statements - - server_version_info - a tuple containing a version number for the DB backend in use. - This value is only available for supporting dialects, and is - typically populated during the initial connection to the database. - - default_schema_name - the name of the default schema. This value is only available for - supporting dialects, and is typically populated during the - initial connection to the database. - - execution_ctx_cls - a :class:`.ExecutionContext` class used to handle statement execution - - execute_sequence_format - either the 'tuple' or 'list' type, depending on what cursor.execute() - accepts for the second argument (they vary). - - preparer - a :class:`~sqlalchemy.sql.compiler.IdentifierPreparer` class used to - quote identifiers. - - supports_alter - ``True`` if the database supports ``ALTER TABLE``. - - max_identifier_length - The maximum length of identifier names. - - supports_unicode_statements - Indicate whether the DB-API can receive SQL statements as Python - unicode strings - - supports_unicode_binds - Indicate whether the DB-API can receive string bind parameters - as Python unicode strings - - supports_sane_rowcount - Indicate whether the dialect properly implements rowcount for - ``UPDATE`` and ``DELETE`` statements. - - supports_sane_multi_rowcount - Indicate whether the dialect properly implements rowcount for - ``UPDATE`` and ``DELETE`` statements when executed via - executemany. - - preexecute_autoincrement_sequences - True if 'implicit' primary key functions must be executed separately - in order to get their value. This is currently oriented towards - Postgresql. - - implicit_returning - use RETURNING or equivalent during INSERT execution in order to load - newly generated primary keys and other column defaults in one execution, - which are then available via inserted_primary_key. - If an insert statement has returning() specified explicitly, - the "implicit" functionality is not used and inserted_primary_key - will not be available. - - dbapi_type_map - A mapping of DB-API type objects present in this Dialect's - DB-API implementation mapped to TypeEngine implementations used - by the dialect. - - This is used to apply types to result sets based on the DB-API - types present in cursor.description; it only takes effect for - result sets against textual statements where no explicit - typemap was present. - - colspecs - A dictionary of TypeEngine classes from sqlalchemy.types mapped - to subclasses that are specific to the dialect class. This - dictionary is class-level only and is not accessed from the - dialect instance itself. - - supports_default_values - Indicates if the construct ``INSERT INTO tablename DEFAULT - VALUES`` is supported - - supports_sequences - Indicates if the dialect supports CREATE SEQUENCE or similar. - - sequences_optional - If True, indicates if the "optional" flag on the Sequence() construct - should signal to not generate a CREATE SEQUENCE. Applies only to - dialects that support sequences. Currently used only to allow Postgresql - SERIAL to be used on a column that specifies Sequence() for usage on - other backends. - - supports_native_enum - Indicates if the dialect supports a native ENUM construct. - This will prevent types.Enum from generating a CHECK - constraint when that type is used. - - supports_native_boolean - Indicates if the dialect supports a native boolean construct. - This will prevent types.Boolean from generating a CHECK - constraint when that type is used. - - dbapi_exception_translation_map - A dictionary of names that will contain as values the names of - pep-249 exceptions ("IntegrityError", "OperationalError", etc) - keyed to alternate class names, to support the case where a - DBAPI has exception classes that aren't named as they are - referred to (e.g. IntegrityError = MyException). In the vast - majority of cases this dictionary is empty. - - .. versionadded:: 1.0.5 - - """ - - _has_events = False - - def create_connect_args(self, url): - """Build DB-API compatible connection arguments. - - Given a :class:`~sqlalchemy.engine.url.URL` object, returns a tuple - consisting of a `*args`/`**kwargs` suitable to send directly - to the dbapi's connect function. - - """ - - raise NotImplementedError() - - @classmethod - def type_descriptor(cls, typeobj): - """Transform a generic type to a dialect-specific type. - - Dialect classes will usually use the - :func:`.types.adapt_type` function in the types module to - accomplish this. - - The returned result is cached *per dialect class* so can - contain no dialect-instance state. - - """ - - raise NotImplementedError() - - def initialize(self, connection): - """Called during strategized creation of the dialect with a - connection. - - Allows dialects to configure options based on server version info or - other properties. - - The connection passed here is a SQLAlchemy Connection object, - with full capabilities. - - The initialize() method of the base dialect should be called via - super(). - - """ - - pass - - def reflecttable( - self, connection, table, include_columns, exclude_columns): - """Load table description from the database. - - Given a :class:`.Connection` and a - :class:`~sqlalchemy.schema.Table` object, reflect its columns and - properties from the database. - - The implementation of this method is provided by - :meth:`.DefaultDialect.reflecttable`, which makes use of - :class:`.Inspector` to retrieve column information. - - Dialects should **not** seek to implement this method, and should - instead implement individual schema inspection operations such as - :meth:`.Dialect.get_columns`, :meth:`.Dialect.get_pk_constraint`, - etc. - - """ - - raise NotImplementedError() - - def get_columns(self, connection, table_name, schema=None, **kw): - """Return information about columns in `table_name`. - - Given a :class:`.Connection`, a string - `table_name`, and an optional string `schema`, return column - information as a list of dictionaries with these keys: - - name - the column's name - - type - [sqlalchemy.types#TypeEngine] - - nullable - boolean - - default - the column's default value - - autoincrement - boolean - - sequence - a dictionary of the form - {'name' : str, 'start' :int, 'increment': int, 'minvalue': int, - 'maxvalue': int, 'nominvalue': bool, 'nomaxvalue': bool, - 'cycle': bool} - - Additional column attributes may be present. - """ - - raise NotImplementedError() - - def get_primary_keys(self, connection, table_name, schema=None, **kw): - """Return information about primary keys in `table_name`. - - - Deprecated. This method is only called by the default - implementation of :meth:`.Dialect.get_pk_constraint`. Dialects should - instead implement the :meth:`.Dialect.get_pk_constraint` method - directly. - - """ - - raise NotImplementedError() - - def get_pk_constraint(self, connection, table_name, schema=None, **kw): - """Return information about the primary key constraint on - table_name`. - - Given a :class:`.Connection`, a string - `table_name`, and an optional string `schema`, return primary - key information as a dictionary with these keys: - - constrained_columns - a list of column names that make up the primary key - - name - optional name of the primary key constraint. - - """ - raise NotImplementedError() - - def get_foreign_keys(self, connection, table_name, schema=None, **kw): - """Return information about foreign_keys in `table_name`. - - Given a :class:`.Connection`, a string - `table_name`, and an optional string `schema`, return foreign - key information as a list of dicts with these keys: - - name - the constraint's name - - constrained_columns - a list of column names that make up the foreign key - - referred_schema - the name of the referred schema - - referred_table - the name of the referred table - - referred_columns - a list of column names in the referred table that correspond to - constrained_columns - """ - - raise NotImplementedError() - - def get_table_names(self, connection, schema=None, **kw): - """Return a list of table names for `schema`.""" - - raise NotImplementedError() - - def get_temp_table_names(self, connection, schema=None, **kw): - """Return a list of temporary table names on the given connection, - if supported by the underlying backend. - - """ - - raise NotImplementedError() - - def get_view_names(self, connection, schema=None, **kw): - """Return a list of all view names available in the database. - - schema: - Optional, retrieve names from a non-default schema. - """ - - raise NotImplementedError() - - def get_temp_view_names(self, connection, schema=None, **kw): - """Return a list of temporary view names on the given connection, - if supported by the underlying backend. - - """ - - raise NotImplementedError() - - def get_view_definition(self, connection, view_name, schema=None, **kw): - """Return view definition. - - Given a :class:`.Connection`, a string - `view_name`, and an optional string `schema`, return the view - definition. - """ - - raise NotImplementedError() - - def get_indexes(self, connection, table_name, schema=None, **kw): - """Return information about indexes in `table_name`. - - Given a :class:`.Connection`, a string - `table_name` and an optional string `schema`, return index - information as a list of dictionaries with these keys: - - name - the index's name - - column_names - list of column names in order - - unique - boolean - """ - - raise NotImplementedError() - - def get_unique_constraints( - self, connection, table_name, schema=None, **kw): - """Return information about unique constraints in `table_name`. - - Given a string `table_name` and an optional string `schema`, return - unique constraint information as a list of dicts with these keys: - - name - the unique constraint's name - - column_names - list of column names in order - - \**kw - other options passed to the dialect's get_unique_constraints() - method. - - .. versionadded:: 0.9.0 - - """ - - raise NotImplementedError() - - def normalize_name(self, name): - """convert the given name to lowercase if it is detected as - case insensitive. - - this method is only used if the dialect defines - requires_name_normalize=True. - - """ - raise NotImplementedError() - - def denormalize_name(self, name): - """convert the given name to a case insensitive identifier - for the backend if it is an all-lowercase name. - - this method is only used if the dialect defines - requires_name_normalize=True. - - """ - raise NotImplementedError() - - def has_table(self, connection, table_name, schema=None): - """Check the existence of a particular table in the database. - - Given a :class:`.Connection` object and a string - `table_name`, return True if the given table (possibly within - the specified `schema`) exists in the database, False - otherwise. - """ - - raise NotImplementedError() - - def has_sequence(self, connection, sequence_name, schema=None): - """Check the existence of a particular sequence in the database. - - Given a :class:`.Connection` object and a string - `sequence_name`, return True if the given sequence exists in - the database, False otherwise. - """ - - raise NotImplementedError() - - def _get_server_version_info(self, connection): - """Retrieve the server version info from the given connection. - - This is used by the default implementation to populate the - "server_version_info" attribute and is called exactly - once upon first connect. - - """ - - raise NotImplementedError() - - def _get_default_schema_name(self, connection): - """Return the string name of the currently selected schema from - the given connection. - - This is used by the default implementation to populate the - "default_schema_name" attribute and is called exactly - once upon first connect. - - """ - - raise NotImplementedError() - - def do_begin(self, dbapi_connection): - """Provide an implementation of ``connection.begin()``, given a - DB-API connection. - - The DBAPI has no dedicated "begin" method and it is expected - that transactions are implicit. This hook is provided for those - DBAPIs that might need additional help in this area. - - Note that :meth:`.Dialect.do_begin` is not called unless a - :class:`.Transaction` object is in use. The - :meth:`.Dialect.do_autocommit` - hook is provided for DBAPIs that need some extra commands emitted - after a commit in order to enter the next transaction, when the - SQLAlchemy :class:`.Connection` is used in its default "autocommit" - mode. - - :param dbapi_connection: a DBAPI connection, typically - proxied within a :class:`.ConnectionFairy`. - - """ - - raise NotImplementedError() - - def do_rollback(self, dbapi_connection): - """Provide an implementation of ``connection.rollback()``, given - a DB-API connection. - - :param dbapi_connection: a DBAPI connection, typically - proxied within a :class:`.ConnectionFairy`. - - """ - - raise NotImplementedError() - - def do_commit(self, dbapi_connection): - """Provide an implementation of ``connection.commit()``, given a - DB-API connection. - - :param dbapi_connection: a DBAPI connection, typically - proxied within a :class:`.ConnectionFairy`. - - """ - - raise NotImplementedError() - - def do_close(self, dbapi_connection): - """Provide an implementation of ``connection.close()``, given a DBAPI - connection. - - This hook is called by the :class:`.Pool` when a connection has been - detached from the pool, or is being returned beyond the normal - capacity of the pool. - - .. versionadded:: 0.8 - - """ - - raise NotImplementedError() - - def create_xid(self): - """Create a two-phase transaction ID. - - This id will be passed to do_begin_twophase(), - do_rollback_twophase(), do_commit_twophase(). Its format is - unspecified. - """ - - raise NotImplementedError() - - def do_savepoint(self, connection, name): - """Create a savepoint with the given name. - - :param connection: a :class:`.Connection`. - :param name: savepoint name. - - """ - - raise NotImplementedError() - - def do_rollback_to_savepoint(self, connection, name): - """Rollback a connection to the named savepoint. - - :param connection: a :class:`.Connection`. - :param name: savepoint name. - - """ - - raise NotImplementedError() - - def do_release_savepoint(self, connection, name): - """Release the named savepoint on a connection. - - :param connection: a :class:`.Connection`. - :param name: savepoint name. - """ - - raise NotImplementedError() - - def do_begin_twophase(self, connection, xid): - """Begin a two phase transaction on the given connection. - - :param connection: a :class:`.Connection`. - :param xid: xid - - """ - - raise NotImplementedError() - - def do_prepare_twophase(self, connection, xid): - """Prepare a two phase transaction on the given connection. - - :param connection: a :class:`.Connection`. - :param xid: xid - - """ - - raise NotImplementedError() - - def do_rollback_twophase(self, connection, xid, is_prepared=True, - recover=False): - """Rollback a two phase transaction on the given connection. - - :param connection: a :class:`.Connection`. - :param xid: xid - :param is_prepared: whether or not - :meth:`.TwoPhaseTransaction.prepare` was called. - :param recover: if the recover flag was passed. - - """ - - raise NotImplementedError() - - def do_commit_twophase(self, connection, xid, is_prepared=True, - recover=False): - """Commit a two phase transaction on the given connection. - - - :param connection: a :class:`.Connection`. - :param xid: xid - :param is_prepared: whether or not - :meth:`.TwoPhaseTransaction.prepare` was called. - :param recover: if the recover flag was passed. - - """ - - raise NotImplementedError() - - def do_recover_twophase(self, connection): - """Recover list of uncommited prepared two phase transaction - identifiers on the given connection. - - :param connection: a :class:`.Connection`. - - """ - - raise NotImplementedError() - - def do_executemany(self, cursor, statement, parameters, context=None): - """Provide an implementation of ``cursor.executemany(statement, - parameters)``.""" - - raise NotImplementedError() - - def do_execute(self, cursor, statement, parameters, context=None): - """Provide an implementation of ``cursor.execute(statement, - parameters)``.""" - - raise NotImplementedError() - - def do_execute_no_params(self, cursor, statement, parameters, - context=None): - """Provide an implementation of ``cursor.execute(statement)``. - - The parameter collection should not be sent. - - """ - - raise NotImplementedError() - - def is_disconnect(self, e, connection, cursor): - """Return True if the given DB-API error indicates an invalid - connection""" - - raise NotImplementedError() - - def connect(self): - """return a callable which sets up a newly created DBAPI connection. - - The callable accepts a single argument "conn" which is the - DBAPI connection itself. It has no return value. - - This is used to set dialect-wide per-connection options such as - isolation modes, unicode modes, etc. - - If a callable is returned, it will be assembled into a pool listener - that receives the direct DBAPI connection, with all wrappers removed. - - If None is returned, no listener will be generated. - - """ - return None - - def reset_isolation_level(self, dbapi_conn): - """Given a DBAPI connection, revert its isolation to the default. - - Note that this is a dialect-level method which is used as part - of the implementation of the :class:`.Connection` and - :class:`.Engine` - isolation level facilities; these APIs should be preferred for - most typical use cases. - - .. seealso:: - - :meth:`.Connection.get_isolation_level` - view current level - - :attr:`.Connection.default_isolation_level` - view default level - - :paramref:`.Connection.execution_options.isolation_level` - - set per :class:`.Connection` isolation level - - :paramref:`.create_engine.isolation_level` - - set per :class:`.Engine` isolation level - - """ - - raise NotImplementedError() - - def set_isolation_level(self, dbapi_conn, level): - """Given a DBAPI connection, set its isolation level. - - Note that this is a dialect-level method which is used as part - of the implementation of the :class:`.Connection` and - :class:`.Engine` - isolation level facilities; these APIs should be preferred for - most typical use cases. - - .. seealso:: - - :meth:`.Connection.get_isolation_level` - view current level - - :attr:`.Connection.default_isolation_level` - view default level - - :paramref:`.Connection.execution_options.isolation_level` - - set per :class:`.Connection` isolation level - - :paramref:`.create_engine.isolation_level` - - set per :class:`.Engine` isolation level - - """ - - raise NotImplementedError() - - def get_isolation_level(self, dbapi_conn): - """Given a DBAPI connection, return its isolation level. - - When working with a :class:`.Connection` object, the corresponding - DBAPI connection may be procured using the - :attr:`.Connection.connection` accessor. - - Note that this is a dialect-level method which is used as part - of the implementation of the :class:`.Connection` and - :class:`.Engine` isolation level facilities; - these APIs should be preferred for most typical use cases. - - - .. seealso:: - - :meth:`.Connection.get_isolation_level` - view current level - - :attr:`.Connection.default_isolation_level` - view default level - - :paramref:`.Connection.execution_options.isolation_level` - - set per :class:`.Connection` isolation level - - :paramref:`.create_engine.isolation_level` - - set per :class:`.Engine` isolation level - - - """ - - raise NotImplementedError() - - @classmethod - def get_dialect_cls(cls, url): - """Given a URL, return the :class:`.Dialect` that will be used. - - This is a hook that allows an external plugin to provide functionality - around an existing dialect, by allowing the plugin to be loaded - from the url based on an entrypoint, and then the plugin returns - the actual dialect to be used. - - By default this just returns the cls. - - .. versionadded:: 1.0.3 - - """ - return cls - - @classmethod - def engine_created(cls, engine): - """A convenience hook called before returning the final :class:`.Engine`. - - If the dialect returned a different class from the - :meth:`.get_dialect_cls` - method, then the hook is called on both classes, first on - the dialect class returned by the :meth:`.get_dialect_cls` method and - then on the class on which the method was called. - - The hook should be used by dialects and/or wrappers to apply special - events to the engine or its components. In particular, it allows - a dialect-wrapping class to apply dialect-level events. - - .. versionadded:: 1.0.3 - - """ - pass - - -class ExecutionContext(object): - """A messenger object for a Dialect that corresponds to a single - execution. - - ExecutionContext should have these data members: - - connection - Connection object which can be freely used by default value - generators to execute SQL. This Connection should reference the - same underlying connection/transactional resources of - root_connection. - - root_connection - Connection object which is the source of this ExecutionContext. This - Connection may have close_with_result=True set, in which case it can - only be used once. - - dialect - dialect which created this ExecutionContext. - - cursor - DB-API cursor procured from the connection, - - compiled - if passed to constructor, sqlalchemy.engine.base.Compiled object - being executed, - - statement - string version of the statement to be executed. Is either - passed to the constructor, or must be created from the - sql.Compiled object by the time pre_exec() has completed. - - parameters - bind parameters passed to the execute() method. For compiled - statements, this is a dictionary or list of dictionaries. For - textual statements, it should be in a format suitable for the - dialect's paramstyle (i.e. dict or list of dicts for non - positional, list or list of lists/tuples for positional). - - isinsert - True if the statement is an INSERT. - - isupdate - True if the statement is an UPDATE. - - should_autocommit - True if the statement is a "committable" statement. - - prefetch_cols - a list of Column objects for which a client-side default - was fired off. Applies to inserts and updates. - - postfetch_cols - a list of Column objects for which a server-side default or - inline SQL expression value was fired off. Applies to inserts - and updates. - """ - - exception = None - """A DBAPI-level exception that was caught when this ExecutionContext - attempted to execute a statement. - - This attribute is meaningful only within the - :meth:`.ConnectionEvents.dbapi_error` event. - - .. versionadded:: 0.9.7 - - .. seealso:: - - :attr:`.ExecutionContext.is_disconnect` - - :meth:`.ConnectionEvents.dbapi_error` - - """ - - is_disconnect = None - """Boolean flag set to True or False when a DBAPI-level exception - is caught when this ExecutionContext attempted to execute a statement. - - This attribute is meaningful only within the - :meth:`.ConnectionEvents.dbapi_error` event. - - .. versionadded:: 0.9.7 - - .. seealso:: - - :attr:`.ExecutionContext.exception` - - :meth:`.ConnectionEvents.dbapi_error` - - """ - - def create_cursor(self): - """Return a new cursor generated from this ExecutionContext's - connection. - - Some dialects may wish to change the behavior of - connection.cursor(), such as postgresql which may return a PG - "server side" cursor. - """ - - raise NotImplementedError() - - def pre_exec(self): - """Called before an execution of a compiled statement. - - If a compiled statement was passed to this ExecutionContext, - the `statement` and `parameters` datamembers must be - initialized after this statement is complete. - """ - - raise NotImplementedError() - - def post_exec(self): - """Called after the execution of a compiled statement. - - If a compiled statement was passed to this ExecutionContext, - the `last_insert_ids`, `last_inserted_params`, etc. - datamembers should be available after this method completes. - """ - - raise NotImplementedError() - - def result(self): - """Return a result object corresponding to this ExecutionContext. - - Returns a ResultProxy. - """ - - raise NotImplementedError() - - def handle_dbapi_exception(self, e): - """Receive a DBAPI exception which occurred upon execute, result - fetch, etc.""" - - raise NotImplementedError() - - def should_autocommit_text(self, statement): - """Parse the given textual statement and return True if it refers to - a "committable" statement""" - - raise NotImplementedError() - - def lastrow_has_defaults(self): - """Return True if the last INSERT or UPDATE row contained - inlined or database-side defaults. - """ - - raise NotImplementedError() - - def get_rowcount(self): - """Return the DBAPI ``cursor.rowcount`` value, or in some - cases an interpreted value. - - See :attr:`.ResultProxy.rowcount` for details on this. - - """ - - raise NotImplementedError() - - -class Connectable(object): - """Interface for an object which supports execution of SQL constructs. - - The two implementations of :class:`.Connectable` are - :class:`.Connection` and :class:`.Engine`. - - Connectable must also implement the 'dialect' member which references a - :class:`.Dialect` instance. - - """ - - def connect(self, **kwargs): - """Return a :class:`.Connection` object. - - Depending on context, this may be ``self`` if this object - is already an instance of :class:`.Connection`, or a newly - procured :class:`.Connection` if this object is an instance - of :class:`.Engine`. - - """ - - def contextual_connect(self): - """Return a :class:`.Connection` object which may be part of an ongoing - context. - - Depending on context, this may be ``self`` if this object - is already an instance of :class:`.Connection`, or a newly - procured :class:`.Connection` if this object is an instance - of :class:`.Engine`. - - """ - - raise NotImplementedError() - - @util.deprecated("0.7", - "Use the create() method on the given schema " - "object directly, i.e. :meth:`.Table.create`, " - ":meth:`.Index.create`, :meth:`.MetaData.create_all`") - def create(self, entity, **kwargs): - """Emit CREATE statements for the given schema entity. - """ - - raise NotImplementedError() - - @util.deprecated("0.7", - "Use the drop() method on the given schema " - "object directly, i.e. :meth:`.Table.drop`, " - ":meth:`.Index.drop`, :meth:`.MetaData.drop_all`") - def drop(self, entity, **kwargs): - """Emit DROP statements for the given schema entity. - """ - - raise NotImplementedError() - - def execute(self, object, *multiparams, **params): - """Executes the given construct and returns a :class:`.ResultProxy`.""" - raise NotImplementedError() - - def scalar(self, object, *multiparams, **params): - """Executes and returns the first column of the first row. - - The underlying cursor is closed after execution. - """ - raise NotImplementedError() - - def _run_visitor(self, visitorcallable, element, - **kwargs): - raise NotImplementedError() - - def _execute_clauseelement(self, elem, multiparams=None, params=None): - raise NotImplementedError() - - -class ExceptionContext(object): - """Encapsulate information about an error condition in progress. - - This object exists solely to be passed to the - :meth:`.ConnectionEvents.handle_error` event, supporting an interface that - can be extended without backwards-incompatibility. - - .. versionadded:: 0.9.7 - - """ - - connection = None - """The :class:`.Connection` in use during the exception. - - This member is present, except in the case of a failure when - first connecting. - - .. seealso:: - - :attr:`.ExceptionContext.engine` - - - """ - - engine = None - """The :class:`.Engine` in use during the exception. - - This member should always be present, even in the case of a failure - when first connecting. - - .. versionadded:: 1.0.0 - - """ - - cursor = None - """The DBAPI cursor object. - - May be None. - - """ - - statement = None - """String SQL statement that was emitted directly to the DBAPI. - - May be None. - - """ - - parameters = None - """Parameter collection that was emitted directly to the DBAPI. - - May be None. - - """ - - original_exception = None - """The exception object which was caught. - - This member is always present. - - """ - - sqlalchemy_exception = None - """The :class:`sqlalchemy.exc.StatementError` which wraps the original, - and will be raised if exception handling is not circumvented by the event. - - May be None, as not all exception types are wrapped by SQLAlchemy. - For DBAPI-level exceptions that subclass the dbapi's Error class, this - field will always be present. - - """ - - chained_exception = None - """The exception that was returned by the previous handler in the - exception chain, if any. - - If present, this exception will be the one ultimately raised by - SQLAlchemy unless a subsequent handler replaces it. - - May be None. - - """ - - execution_context = None - """The :class:`.ExecutionContext` corresponding to the execution - operation in progress. - - This is present for statement execution operations, but not for - operations such as transaction begin/end. It also is not present when - the exception was raised before the :class:`.ExecutionContext` - could be constructed. - - Note that the :attr:`.ExceptionContext.statement` and - :attr:`.ExceptionContext.parameters` members may represent a - different value than that of the :class:`.ExecutionContext`, - potentially in the case where a - :meth:`.ConnectionEvents.before_cursor_execute` event or similar - modified the statement/parameters to be sent. - - May be None. - - """ - - is_disconnect = None - """Represent whether the exception as occurred represents a "disconnect" - condition. - - This flag will always be True or False within the scope of the - :meth:`.ConnectionEvents.handle_error` handler. - - SQLAlchemy will defer to this flag in order to determine whether or not - the connection should be invalidated subsequently. That is, by - assigning to this flag, a "disconnect" event which then results in - a connection and pool invalidation can be invoked or prevented by - changing this flag. - - """ - - invalidate_pool_on_disconnect = True - """Represent whether all connections in the pool should be invalidated - when a "disconnect" condition is in effect. - - Setting this flag to False within the scope of the - :meth:`.ConnectionEvents.handle_error` event will have the effect such - that the full collection of connections in the pool will not be - invalidated during a disconnect; only the current connection that is the - subject of the error will actually be invalidated. - - The purpose of this flag is for custom disconnect-handling schemes where - the invalidation of other connections in the pool is to be performed - based on other conditions, or even on a per-connection basis. - - .. versionadded:: 1.0.3 - - """ diff --git a/python/sqlalchemy/engine/reflection.py b/python/sqlalchemy/engine/reflection.py deleted file mode 100644 index 59eed51e..00000000 --- a/python/sqlalchemy/engine/reflection.py +++ /dev/null @@ -1,788 +0,0 @@ -# engine/reflection.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Provides an abstraction for obtaining database schema information. - -Usage Notes: - -Here are some general conventions when accessing the low level inspector -methods such as get_table_names, get_columns, etc. - -1. Inspector methods return lists of dicts in most cases for the following - reasons: - - * They're both standard types that can be serialized. - * Using a dict instead of a tuple allows easy expansion of attributes. - * Using a list for the outer structure maintains order and is easy to work - with (e.g. list comprehension [d['name'] for d in cols]). - -2. Records that contain a name, such as the column name in a column record - use the key 'name'. So for most return values, each record will have a - 'name' attribute.. -""" - -from .. import exc, sql -from ..sql import schema as sa_schema -from .. import util -from ..sql.type_api import TypeEngine -from ..util import deprecated -from ..util import topological -from .. import inspection -from .base import Connectable - - -@util.decorator -def cache(fn, self, con, *args, **kw): - info_cache = kw.get('info_cache', None) - if info_cache is None: - return fn(self, con, *args, **kw) - key = ( - fn.__name__, - tuple(a for a in args if isinstance(a, util.string_types)), - tuple((k, v) for k, v in kw.items() if - isinstance(v, - util.string_types + util.int_types + (float, ) - ) - ) - ) - ret = info_cache.get(key) - if ret is None: - ret = fn(self, con, *args, **kw) - info_cache[key] = ret - return ret - - -class Inspector(object): - """Performs database schema inspection. - - The Inspector acts as a proxy to the reflection methods of the - :class:`~sqlalchemy.engine.interfaces.Dialect`, providing a - consistent interface as well as caching support for previously - fetched metadata. - - A :class:`.Inspector` object is usually created via the - :func:`.inspect` function:: - - from sqlalchemy import inspect, create_engine - engine = create_engine('...') - insp = inspect(engine) - - The inspection method above is equivalent to using the - :meth:`.Inspector.from_engine` method, i.e.:: - - engine = create_engine('...') - insp = Inspector.from_engine(engine) - - Where above, the :class:`~sqlalchemy.engine.interfaces.Dialect` may opt - to return an :class:`.Inspector` subclass that provides additional - methods specific to the dialect's target database. - - """ - - def __init__(self, bind): - """Initialize a new :class:`.Inspector`. - - :param bind: a :class:`~sqlalchemy.engine.Connectable`, - which is typically an instance of - :class:`~sqlalchemy.engine.Engine` or - :class:`~sqlalchemy.engine.Connection`. - - For a dialect-specific instance of :class:`.Inspector`, see - :meth:`.Inspector.from_engine` - - """ - # this might not be a connection, it could be an engine. - self.bind = bind - - # set the engine - if hasattr(bind, 'engine'): - self.engine = bind.engine - else: - self.engine = bind - - if self.engine is bind: - # if engine, ensure initialized - bind.connect().close() - - self.dialect = self.engine.dialect - self.info_cache = {} - - @classmethod - def from_engine(cls, bind): - """Construct a new dialect-specific Inspector object from the given - engine or connection. - - :param bind: a :class:`~sqlalchemy.engine.Connectable`, - which is typically an instance of - :class:`~sqlalchemy.engine.Engine` or - :class:`~sqlalchemy.engine.Connection`. - - This method differs from direct a direct constructor call of - :class:`.Inspector` in that the - :class:`~sqlalchemy.engine.interfaces.Dialect` is given a chance to - provide a dialect-specific :class:`.Inspector` instance, which may - provide additional methods. - - See the example at :class:`.Inspector`. - - """ - if hasattr(bind.dialect, 'inspector'): - return bind.dialect.inspector(bind) - return Inspector(bind) - - @inspection._inspects(Connectable) - def _insp(bind): - return Inspector.from_engine(bind) - - @property - def default_schema_name(self): - """Return the default schema name presented by the dialect - for the current engine's database user. - - E.g. this is typically ``public`` for Postgresql and ``dbo`` - for SQL Server. - - """ - return self.dialect.default_schema_name - - def get_schema_names(self): - """Return all schema names. - """ - - if hasattr(self.dialect, 'get_schema_names'): - return self.dialect.get_schema_names(self.bind, - info_cache=self.info_cache) - return [] - - def get_table_names(self, schema=None, order_by=None): - """Return all table names in referred to within a particular schema. - - The names are expected to be real tables only, not views. - Views are instead returned using the :meth:`.Inspector.get_view_names` - method. - - - :param schema: Schema name. If ``schema`` is left at ``None``, the - database's default schema is - used, else the named schema is searched. If the database does not - support named schemas, behavior is undefined if ``schema`` is not - passed as ``None``. For special quoting, use :class:`.quoted_name`. - - :param order_by: Optional, may be the string "foreign_key" to sort - the result on foreign key dependencies. Does not automatically - resolve cycles, and will raise :class:`.CircularDependencyError` - if cycles exist. - - .. deprecated:: 1.0.0 - see - :meth:`.Inspector.get_sorted_table_and_fkc_names` for a version - of this which resolves foreign key cycles between tables - automatically. - - .. versionchanged:: 0.8 the "foreign_key" sorting sorts tables - in order of dependee to dependent; that is, in creation - order, rather than in drop order. This is to maintain - consistency with similar features such as - :attr:`.MetaData.sorted_tables` and :func:`.util.sort_tables`. - - .. seealso:: - - :meth:`.Inspector.get_sorted_table_and_fkc_names` - - :attr:`.MetaData.sorted_tables` - - """ - - if hasattr(self.dialect, 'get_table_names'): - tnames = self.dialect.get_table_names( - self.bind, schema, info_cache=self.info_cache) - else: - tnames = self.engine.table_names(schema) - if order_by == 'foreign_key': - tuples = [] - for tname in tnames: - for fkey in self.get_foreign_keys(tname, schema): - if tname != fkey['referred_table']: - tuples.append((fkey['referred_table'], tname)) - tnames = list(topological.sort(tuples, tnames)) - return tnames - - def get_sorted_table_and_fkc_names(self, schema=None): - """Return dependency-sorted table and foreign key constraint names in - referred to within a particular schema. - - This will yield 2-tuples of - ``(tablename, [(tname, fkname), (tname, fkname), ...])`` - consisting of table names in CREATE order grouped with the foreign key - constraint names that are not detected as belonging to a cycle. - The final element - will be ``(None, [(tname, fkname), (tname, fkname), ..])`` - which will consist of remaining - foreign key constraint names that would require a separate CREATE - step after-the-fact, based on dependencies between tables. - - .. versionadded:: 1.0.- - - .. seealso:: - - :meth:`.Inspector.get_table_names` - - :func:`.sort_tables_and_constraints` - similar method which works - with an already-given :class:`.MetaData`. - - """ - if hasattr(self.dialect, 'get_table_names'): - tnames = self.dialect.get_table_names( - self.bind, schema, info_cache=self.info_cache) - else: - tnames = self.engine.table_names(schema) - - tuples = set() - remaining_fkcs = set() - - fknames_for_table = {} - for tname in tnames: - fkeys = self.get_foreign_keys(tname, schema) - fknames_for_table[tname] = set( - [fk['name'] for fk in fkeys] - ) - for fkey in fkeys: - if tname != fkey['referred_table']: - tuples.add((fkey['referred_table'], tname)) - try: - candidate_sort = list(topological.sort(tuples, tnames)) - except exc.CircularDependencyError as err: - for edge in err.edges: - tuples.remove(edge) - remaining_fkcs.update( - (edge[1], fkc) - for fkc in fknames_for_table[edge[1]] - ) - - candidate_sort = list(topological.sort(tuples, tnames)) - return [ - (tname, fknames_for_table[tname].difference(remaining_fkcs)) - for tname in candidate_sort - ] + [(None, list(remaining_fkcs))] - - def get_temp_table_names(self): - """return a list of temporary table names for the current bind. - - This method is unsupported by most dialects; currently - only SQLite implements it. - - .. versionadded:: 1.0.0 - - """ - return self.dialect.get_temp_table_names( - self.bind, info_cache=self.info_cache) - - def get_temp_view_names(self): - """return a list of temporary view names for the current bind. - - This method is unsupported by most dialects; currently - only SQLite implements it. - - .. versionadded:: 1.0.0 - - """ - return self.dialect.get_temp_view_names( - self.bind, info_cache=self.info_cache) - - def get_table_options(self, table_name, schema=None, **kw): - """Return a dictionary of options specified when the table of the - given name was created. - - This currently includes some options that apply to MySQL tables. - - :param table_name: string name of the table. For special quoting, - use :class:`.quoted_name`. - - :param schema: string schema name; if omitted, uses the default schema - of the database connection. For special quoting, - use :class:`.quoted_name`. - - """ - if hasattr(self.dialect, 'get_table_options'): - return self.dialect.get_table_options( - self.bind, table_name, schema, - info_cache=self.info_cache, **kw) - return {} - - def get_view_names(self, schema=None): - """Return all view names in `schema`. - - :param schema: Optional, retrieve names from a non-default schema. - For special quoting, use :class:`.quoted_name`. - - """ - - return self.dialect.get_view_names(self.bind, schema, - info_cache=self.info_cache) - - def get_view_definition(self, view_name, schema=None): - """Return definition for `view_name`. - - :param schema: Optional, retrieve names from a non-default schema. - For special quoting, use :class:`.quoted_name`. - - """ - - return self.dialect.get_view_definition( - self.bind, view_name, schema, info_cache=self.info_cache) - - def get_columns(self, table_name, schema=None, **kw): - """Return information about columns in `table_name`. - - Given a string `table_name` and an optional string `schema`, return - column information as a list of dicts with these keys: - - name - the column's name - - type - :class:`~sqlalchemy.types.TypeEngine` - - nullable - boolean - - default - the column's default value - - attrs - dict containing optional column attributes - - :param table_name: string name of the table. For special quoting, - use :class:`.quoted_name`. - - :param schema: string schema name; if omitted, uses the default schema - of the database connection. For special quoting, - use :class:`.quoted_name`. - - """ - - col_defs = self.dialect.get_columns(self.bind, table_name, schema, - info_cache=self.info_cache, - **kw) - for col_def in col_defs: - # make this easy and only return instances for coltype - coltype = col_def['type'] - if not isinstance(coltype, TypeEngine): - col_def['type'] = coltype() - return col_defs - - @deprecated('0.7', 'Call to deprecated method get_primary_keys.' - ' Use get_pk_constraint instead.') - def get_primary_keys(self, table_name, schema=None, **kw): - """Return information about primary keys in `table_name`. - - Given a string `table_name`, and an optional string `schema`, return - primary key information as a list of column names. - """ - - return self.dialect.get_pk_constraint(self.bind, table_name, schema, - info_cache=self.info_cache, - **kw)['constrained_columns'] - - def get_pk_constraint(self, table_name, schema=None, **kw): - """Return information about primary key constraint on `table_name`. - - Given a string `table_name`, and an optional string `schema`, return - primary key information as a dictionary with these keys: - - constrained_columns - a list of column names that make up the primary key - - name - optional name of the primary key constraint. - - :param table_name: string name of the table. For special quoting, - use :class:`.quoted_name`. - - :param schema: string schema name; if omitted, uses the default schema - of the database connection. For special quoting, - use :class:`.quoted_name`. - - """ - return self.dialect.get_pk_constraint(self.bind, table_name, schema, - info_cache=self.info_cache, - **kw) - - def get_foreign_keys(self, table_name, schema=None, **kw): - """Return information about foreign_keys in `table_name`. - - Given a string `table_name`, and an optional string `schema`, return - foreign key information as a list of dicts with these keys: - - constrained_columns - a list of column names that make up the foreign key - - referred_schema - the name of the referred schema - - referred_table - the name of the referred table - - referred_columns - a list of column names in the referred table that correspond to - constrained_columns - - name - optional name of the foreign key constraint. - - :param table_name: string name of the table. For special quoting, - use :class:`.quoted_name`. - - :param schema: string schema name; if omitted, uses the default schema - of the database connection. For special quoting, - use :class:`.quoted_name`. - - """ - - return self.dialect.get_foreign_keys(self.bind, table_name, schema, - info_cache=self.info_cache, - **kw) - - def get_indexes(self, table_name, schema=None, **kw): - """Return information about indexes in `table_name`. - - Given a string `table_name` and an optional string `schema`, return - index information as a list of dicts with these keys: - - name - the index's name - - column_names - list of column names in order - - unique - boolean - - dialect_options - dict of dialect-specific index options. May not be present - for all dialects. - - .. versionadded:: 1.0.0 - - :param table_name: string name of the table. For special quoting, - use :class:`.quoted_name`. - - :param schema: string schema name; if omitted, uses the default schema - of the database connection. For special quoting, - use :class:`.quoted_name`. - - """ - - return self.dialect.get_indexes(self.bind, table_name, - schema, - info_cache=self.info_cache, **kw) - - def get_unique_constraints(self, table_name, schema=None, **kw): - """Return information about unique constraints in `table_name`. - - Given a string `table_name` and an optional string `schema`, return - unique constraint information as a list of dicts with these keys: - - name - the unique constraint's name - - column_names - list of column names in order - - :param table_name: string name of the table. For special quoting, - use :class:`.quoted_name`. - - :param schema: string schema name; if omitted, uses the default schema - of the database connection. For special quoting, - use :class:`.quoted_name`. - - .. versionadded:: 0.8.4 - - """ - - return self.dialect.get_unique_constraints( - self.bind, table_name, schema, info_cache=self.info_cache, **kw) - - def reflecttable(self, table, include_columns, exclude_columns=()): - """Given a Table object, load its internal constructs based on - introspection. - - This is the underlying method used by most dialects to produce - table reflection. Direct usage is like:: - - from sqlalchemy import create_engine, MetaData, Table - from sqlalchemy.engine import reflection - - engine = create_engine('...') - meta = MetaData() - user_table = Table('user', meta) - insp = Inspector.from_engine(engine) - insp.reflecttable(user_table, None) - - :param table: a :class:`~sqlalchemy.schema.Table` instance. - :param include_columns: a list of string column names to include - in the reflection process. If ``None``, all columns are reflected. - - """ - dialect = self.bind.dialect - - schema = table.schema - table_name = table.name - - # get table-level arguments that are specifically - # intended for reflection, e.g. oracle_resolve_synonyms. - # these are unconditionally passed to related Table - # objects - reflection_options = dict( - (k, table.dialect_kwargs.get(k)) - for k in dialect.reflection_options - if k in table.dialect_kwargs - ) - - # reflect table options, like mysql_engine - tbl_opts = self.get_table_options( - table_name, schema, **table.dialect_kwargs) - if tbl_opts: - # add additional kwargs to the Table if the dialect - # returned them - table._validate_dialect_kwargs(tbl_opts) - - if util.py2k: - if isinstance(schema, str): - schema = schema.decode(dialect.encoding) - if isinstance(table_name, str): - table_name = table_name.decode(dialect.encoding) - - found_table = False - cols_by_orig_name = {} - - for col_d in self.get_columns( - table_name, schema, **table.dialect_kwargs): - found_table = True - - self._reflect_column( - table, col_d, include_columns, - exclude_columns, cols_by_orig_name) - - if not found_table: - raise exc.NoSuchTableError(table.name) - - self._reflect_pk( - table_name, schema, table, cols_by_orig_name, exclude_columns) - - self._reflect_fk( - table_name, schema, table, cols_by_orig_name, - exclude_columns, reflection_options) - - self._reflect_indexes( - table_name, schema, table, cols_by_orig_name, - include_columns, exclude_columns, reflection_options) - - self._reflect_unique_constraints( - table_name, schema, table, cols_by_orig_name, - include_columns, exclude_columns, reflection_options) - - def _reflect_column( - self, table, col_d, include_columns, - exclude_columns, cols_by_orig_name): - - orig_name = col_d['name'] - - table.dispatch.column_reflect(self, table, col_d) - - # fetch name again as column_reflect is allowed to - # change it - name = col_d['name'] - if (include_columns and name not in include_columns) \ - or (exclude_columns and name in exclude_columns): - return - - coltype = col_d['type'] - - col_kw = dict( - (k, col_d[k]) - for k in ['nullable', 'autoincrement', 'quote', 'info', 'key'] - if k in col_d - ) - - colargs = [] - if col_d.get('default') is not None: - # the "default" value is assumed to be a literal SQL - # expression, so is wrapped in text() so that no quoting - # occurs on re-issuance. - colargs.append( - sa_schema.DefaultClause( - sql.text(col_d['default']), _reflected=True - ) - ) - - if 'sequence' in col_d: - self._reflect_col_sequence(col_d, colargs) - - cols_by_orig_name[orig_name] = col = \ - sa_schema.Column(name, coltype, *colargs, **col_kw) - - if col.key in table.primary_key: - col.primary_key = True - table.append_column(col) - - def _reflect_col_sequence(self, col_d, colargs): - if 'sequence' in col_d: - # TODO: mssql and sybase are using this. - seq = col_d['sequence'] - sequence = sa_schema.Sequence(seq['name'], 1, 1) - if 'start' in seq: - sequence.start = seq['start'] - if 'increment' in seq: - sequence.increment = seq['increment'] - colargs.append(sequence) - - def _reflect_pk( - self, table_name, schema, table, - cols_by_orig_name, exclude_columns): - pk_cons = self.get_pk_constraint( - table_name, schema, **table.dialect_kwargs) - if pk_cons: - pk_cols = [ - cols_by_orig_name[pk] - for pk in pk_cons['constrained_columns'] - if pk in cols_by_orig_name and pk not in exclude_columns - ] - - # update pk constraint name - table.primary_key.name = pk_cons.get('name') - - # tell the PKConstraint to re-initialize - # its column collection - table.primary_key._reload(pk_cols) - - def _reflect_fk( - self, table_name, schema, table, cols_by_orig_name, - exclude_columns, reflection_options): - fkeys = self.get_foreign_keys( - table_name, schema, **table.dialect_kwargs) - for fkey_d in fkeys: - conname = fkey_d['name'] - # look for columns by orig name in cols_by_orig_name, - # but support columns that are in-Python only as fallback - constrained_columns = [ - cols_by_orig_name[c].key - if c in cols_by_orig_name else c - for c in fkey_d['constrained_columns'] - ] - if exclude_columns and set(constrained_columns).intersection( - exclude_columns): - continue - referred_schema = fkey_d['referred_schema'] - referred_table = fkey_d['referred_table'] - referred_columns = fkey_d['referred_columns'] - refspec = [] - if referred_schema is not None: - sa_schema.Table(referred_table, table.metadata, - autoload=True, schema=referred_schema, - autoload_with=self.bind, - **reflection_options - ) - for column in referred_columns: - refspec.append(".".join( - [referred_schema, referred_table, column])) - else: - sa_schema.Table(referred_table, table.metadata, autoload=True, - autoload_with=self.bind, - **reflection_options - ) - for column in referred_columns: - refspec.append(".".join([referred_table, column])) - if 'options' in fkey_d: - options = fkey_d['options'] - else: - options = {} - table.append_constraint( - sa_schema.ForeignKeyConstraint(constrained_columns, refspec, - conname, link_to_name=True, - **options)) - - def _reflect_indexes( - self, table_name, schema, table, cols_by_orig_name, - include_columns, exclude_columns, reflection_options): - # Indexes - indexes = self.get_indexes(table_name, schema) - for index_d in indexes: - name = index_d['name'] - columns = index_d['column_names'] - unique = index_d['unique'] - flavor = index_d.get('type', 'index') - dialect_options = index_d.get('dialect_options', {}) - - duplicates = index_d.get('duplicates_constraint') - if include_columns and \ - not set(columns).issubset(include_columns): - util.warn( - "Omitting %s key for (%s), key covers omitted columns." % - (flavor, ', '.join(columns))) - continue - if duplicates: - continue - # look for columns by orig name in cols_by_orig_name, - # but support columns that are in-Python only as fallback - idx_cols = [] - for c in columns: - try: - idx_col = cols_by_orig_name[c] \ - if c in cols_by_orig_name else table.c[c] - except KeyError: - util.warn( - "%s key '%s' was not located in " - "columns for table '%s'" % ( - flavor, c, table_name - )) - else: - idx_cols.append(idx_col) - - sa_schema.Index( - name, *idx_cols, - **dict(list(dialect_options.items()) + [('unique', unique)]) - ) - - def _reflect_unique_constraints( - self, table_name, schema, table, cols_by_orig_name, - include_columns, exclude_columns, reflection_options): - - # Unique Constraints - try: - constraints = self.get_unique_constraints(table_name, schema) - except NotImplementedError: - # optional dialect feature - return - - for const_d in constraints: - conname = const_d['name'] - columns = const_d['column_names'] - duplicates = const_d.get('duplicates_index') - if include_columns and \ - not set(columns).issubset(include_columns): - util.warn( - "Omitting unique constraint key for (%s), " - "key covers omitted columns." % - ', '.join(columns)) - continue - if duplicates: - continue - # look for columns by orig name in cols_by_orig_name, - # but support columns that are in-Python only as fallback - constrained_cols = [] - for c in columns: - try: - constrained_col = cols_by_orig_name[c] \ - if c in cols_by_orig_name else table.c[c] - except KeyError: - util.warn( - "unique constraint key '%s' was not located in " - "columns for table '%s'" % (c, table_name)) - else: - constrained_cols.append(constrained_col) - table.append_constraint( - sa_schema.UniqueConstraint(*constrained_cols, name=conname)) diff --git a/python/sqlalchemy/engine/result.py b/python/sqlalchemy/engine/result.py deleted file mode 100644 index 74a0fce7..00000000 --- a/python/sqlalchemy/engine/result.py +++ /dev/null @@ -1,1273 +0,0 @@ -# engine/result.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Define result set constructs including :class:`.ResultProxy` -and :class:`.RowProxy.""" - - -from .. import exc, util -from ..sql import expression, sqltypes -import collections -import operator - -# This reconstructor is necessary so that pickles with the C extension or -# without use the same Binary format. -try: - # We need a different reconstructor on the C extension so that we can - # add extra checks that fields have correctly been initialized by - # __setstate__. - from sqlalchemy.cresultproxy import safe_rowproxy_reconstructor - - # The extra function embedding is needed so that the - # reconstructor function has the same signature whether or not - # the extension is present. - def rowproxy_reconstructor(cls, state): - return safe_rowproxy_reconstructor(cls, state) -except ImportError: - def rowproxy_reconstructor(cls, state): - obj = cls.__new__(cls) - obj.__setstate__(state) - return obj - -try: - from sqlalchemy.cresultproxy import BaseRowProxy -except ImportError: - class BaseRowProxy(object): - __slots__ = ('_parent', '_row', '_processors', '_keymap') - - def __init__(self, parent, row, processors, keymap): - """RowProxy objects are constructed by ResultProxy objects.""" - - self._parent = parent - self._row = row - self._processors = processors - self._keymap = keymap - - def __reduce__(self): - return (rowproxy_reconstructor, - (self.__class__, self.__getstate__())) - - def values(self): - """Return the values represented by this RowProxy as a list.""" - return list(self) - - def __iter__(self): - for processor, value in zip(self._processors, self._row): - if processor is None: - yield value - else: - yield processor(value) - - def __len__(self): - return len(self._row) - - def __getitem__(self, key): - try: - processor, obj, index = self._keymap[key] - except KeyError: - processor, obj, index = self._parent._key_fallback(key) - except TypeError: - if isinstance(key, slice): - l = [] - for processor, value in zip(self._processors[key], - self._row[key]): - if processor is None: - l.append(value) - else: - l.append(processor(value)) - return tuple(l) - else: - raise - if index is None: - raise exc.InvalidRequestError( - "Ambiguous column name '%s' in result set! " - "try 'use_labels' option on select statement." % key) - if processor is not None: - return processor(self._row[index]) - else: - return self._row[index] - - def __getattr__(self, name): - try: - return self[name] - except KeyError as e: - raise AttributeError(e.args[0]) - - -class RowProxy(BaseRowProxy): - """Proxy values from a single cursor row. - - Mostly follows "ordered dictionary" behavior, mapping result - values to the string-based column name, the integer position of - the result in the row, as well as Column instances which can be - mapped to the original Columns that produced this result set (for - results that correspond to constructed SQL expressions). - """ - __slots__ = () - - def __contains__(self, key): - return self._parent._has_key(key) - - def __getstate__(self): - return { - '_parent': self._parent, - '_row': tuple(self) - } - - def __setstate__(self, state): - self._parent = parent = state['_parent'] - self._row = state['_row'] - self._processors = parent._processors - self._keymap = parent._keymap - - __hash__ = None - - def _op(self, other, op): - return op(tuple(self), tuple(other)) \ - if isinstance(other, RowProxy) \ - else op(tuple(self), other) - - def __lt__(self, other): - return self._op(other, operator.lt) - - def __le__(self, other): - return self._op(other, operator.le) - - def __ge__(self, other): - return self._op(other, operator.ge) - - def __gt__(self, other): - return self._op(other, operator.gt) - - def __eq__(self, other): - return self._op(other, operator.eq) - - def __ne__(self, other): - return self._op(other, operator.ne) - - def __repr__(self): - return repr(tuple(self)) - - def has_key(self, key): - """Return True if this RowProxy contains the given key.""" - - return self._parent._has_key(key) - - def items(self): - """Return a list of tuples, each tuple containing a key/value pair.""" - # TODO: no coverage here - return [(key, self[key]) for key in self.keys()] - - def keys(self): - """Return the list of keys as strings represented by this RowProxy.""" - - return self._parent.keys - - def iterkeys(self): - return iter(self._parent.keys) - - def itervalues(self): - return iter(self) - -try: - # Register RowProxy with Sequence, - # so sequence protocol is implemented - from collections import Sequence - Sequence.register(RowProxy) -except ImportError: - pass - - -class ResultMetaData(object): - """Handle cursor.description, applying additional info from an execution - context.""" - - def __init__(self, parent, metadata): - context = parent.context - dialect = context.dialect - typemap = dialect.dbapi_type_map - translate_colname = context._translate_colname - self.case_sensitive = case_sensitive = dialect.case_sensitive - - if context.result_column_struct: - result_columns, cols_are_ordered = context.result_column_struct - num_ctx_cols = len(result_columns) - else: - num_ctx_cols = None - - if num_ctx_cols and \ - cols_are_ordered and \ - num_ctx_cols == len(metadata): - # case 1 - SQL expression statement, number of columns - # in result matches number of cols in compiled. This is the - # vast majority case for SQL expression constructs. In this - # case we don't bother trying to parse or match up to - # the colnames in the result description. - raw = [ - ( - idx, - key, - name.lower() if not case_sensitive else name, - context.get_result_processor( - type_, key, metadata[idx][1] - ), - obj, - None - ) for idx, (key, name, obj, type_) - in enumerate(result_columns) - ] - self.keys = [ - elem[0] for elem in result_columns - ] - else: - # case 2 - raw string, or number of columns in result does - # not match number of cols in compiled. The raw string case - # is very common. The latter can happen - # when text() is used with only a partial typemap, or - # in the extremely unlikely cases where the compiled construct - # has a single element with multiple col expressions in it - # (e.g. has commas embedded) or there's some kind of statement - # that is adding extra columns. - # In all these cases we fall back to the "named" approach - # that SQLAlchemy has used up through 0.9. - - if num_ctx_cols: - result_map = self._create_result_map( - result_columns, case_sensitive) - - raw = [] - self.keys = [] - untranslated = None - for idx, rec in enumerate(metadata): - colname = rec[0] - coltype = rec[1] - - if dialect.description_encoding: - colname = dialect._description_decoder(colname) - - if translate_colname: - colname, untranslated = translate_colname(colname) - - if dialect.requires_name_normalize: - colname = dialect.normalize_name(colname) - - self.keys.append(colname) - if not case_sensitive: - colname = colname.lower() - - if num_ctx_cols: - try: - ctx_rec = result_map[colname] - except KeyError: - mapped_type = typemap.get(coltype, sqltypes.NULLTYPE) - obj = None - else: - obj = ctx_rec[1] - mapped_type = ctx_rec[2] - else: - mapped_type = typemap.get(coltype, sqltypes.NULLTYPE) - obj = None - processor = context.get_result_processor( - mapped_type, colname, coltype) - - raw.append( - (idx, colname, colname, processor, obj, untranslated) - ) - - # keymap indexes by integer index... - self._keymap = dict([ - (elem[0], (elem[3], elem[4], elem[0])) - for elem in raw - ]) - - # processors in key order for certain per-row - # views like __iter__ and slices - self._processors = [elem[3] for elem in raw] - - if num_ctx_cols: - # keymap by primary string... - by_key = dict([ - (elem[2], (elem[3], elem[4], elem[0])) - for elem in raw - ]) - - # if by-primary-string dictionary smaller (or bigger?!) than - # number of columns, assume we have dupes, rewrite - # dupe records with "None" for index which results in - # ambiguous column exception when accessed. - if len(by_key) != num_ctx_cols: - seen = set() - for rec in raw: - key = rec[1] - if key in seen: - by_key[key] = (None, by_key[key][1], None) - seen.add(key) - - # update keymap with secondary "object"-based keys - self._keymap.update([ - (obj_elem, by_key[elem[2]]) - for elem in raw if elem[4] - for obj_elem in elem[4] - ]) - - # update keymap with primary string names taking - # precedence - self._keymap.update(by_key) - else: - self._keymap.update([ - (elem[2], (elem[3], elem[4], elem[0])) - for elem in raw - ]) - # update keymap with "translated" names (sqlite-only thing) - if translate_colname: - self._keymap.update([ - (elem[5], self._keymap[elem[2]]) - for elem in raw if elem[5] - ]) - - @classmethod - def _create_result_map(cls, result_columns, case_sensitive=True): - d = {} - for elem in result_columns: - key, rec = elem[0], elem[1:] - if not case_sensitive: - key = key.lower() - if key in d: - # conflicting keyname, just double up the list - # of objects. this will cause an "ambiguous name" - # error if an attempt is made by the result set to - # access. - e_name, e_obj, e_type = d[key] - d[key] = e_name, e_obj + rec[1], e_type - else: - d[key] = rec - return d - - @util.pending_deprecation("0.8", "sqlite dialect uses " - "_translate_colname() now") - def _set_keymap_synonym(self, name, origname): - """Set a synonym for the given name. - - Some dialects (SQLite at the moment) may use this to - adjust the column names that are significant within a - row. - - """ - rec = (processor, obj, i) = self._keymap[origname if - self.case_sensitive - else origname.lower()] - if self._keymap.setdefault(name, rec) is not rec: - self._keymap[name] = (processor, obj, None) - - def _key_fallback(self, key, raiseerr=True): - map = self._keymap - result = None - if isinstance(key, util.string_types): - result = map.get(key if self.case_sensitive else key.lower()) - # fallback for targeting a ColumnElement to a textual expression - # this is a rare use case which only occurs when matching text() - # or colummn('name') constructs to ColumnElements, or after a - # pickle/unpickle roundtrip - elif isinstance(key, expression.ColumnElement): - if key._label and ( - key._label - if self.case_sensitive - else key._label.lower()) in map: - result = map[key._label - if self.case_sensitive - else key._label.lower()] - elif hasattr(key, 'name') and ( - key.name - if self.case_sensitive - else key.name.lower()) in map: - # match is only on name. - result = map[key.name - if self.case_sensitive - else key.name.lower()] - # search extra hard to make sure this - # isn't a column/label name overlap. - # this check isn't currently available if the row - # was unpickled. - if result is not None and \ - result[1] is not None: - for obj in result[1]: - if key._compare_name_for_result(obj): - break - else: - result = None - if result is None: - if raiseerr: - raise exc.NoSuchColumnError( - "Could not locate column in row for column '%s'" % - expression._string_or_unprintable(key)) - else: - return None - else: - map[key] = result - return result - - def _has_key(self, key): - if key in self._keymap: - return True - else: - return self._key_fallback(key, False) is not None - - def _getter(self, key): - if key in self._keymap: - processor, obj, index = self._keymap[key] - else: - ret = self._key_fallback(key, False) - if ret is None: - return None - processor, obj, index = ret - - if index is None: - raise exc.InvalidRequestError( - "Ambiguous column name '%s' in result set! " - "try 'use_labels' option on select statement." % key) - - return operator.itemgetter(index) - - def __getstate__(self): - return { - '_pickled_keymap': dict( - (key, index) - for key, (processor, obj, index) in self._keymap.items() - if isinstance(key, util.string_types + util.int_types) - ), - 'keys': self.keys, - "case_sensitive": self.case_sensitive, - } - - def __setstate__(self, state): - # the row has been processed at pickling time so we don't need any - # processor anymore - self._processors = [None for _ in range(len(state['keys']))] - self._keymap = keymap = {} - for key, index in state['_pickled_keymap'].items(): - # not preserving "obj" here, unfortunately our - # proxy comparison fails with the unpickle - keymap[key] = (None, None, index) - self.keys = state['keys'] - self.case_sensitive = state['case_sensitive'] - self._echo = False - - -class ResultProxy(object): - """Wraps a DB-API cursor object to provide easier access to row columns. - - Individual columns may be accessed by their integer position, - case-insensitive column name, or by ``schema.Column`` - object. e.g.:: - - row = fetchone() - - col1 = row[0] # access via integer position - - col2 = row['col2'] # access via name - - col3 = row[mytable.c.mycol] # access via Column object. - - ``ResultProxy`` also handles post-processing of result column - data using ``TypeEngine`` objects, which are referenced from - the originating SQL statement that produced this result set. - - """ - - _process_row = RowProxy - out_parameters = None - _can_close_connection = False - _metadata = None - _soft_closed = False - closed = False - - def __init__(self, context): - self.context = context - self.dialect = context.dialect - self.cursor = self._saved_cursor = context.cursor - self.connection = context.root_connection - self._echo = self.connection._echo and \ - context.engine._should_log_debug() - self._init_metadata() - - def _getter(self, key): - try: - getter = self._metadata._getter - except AttributeError: - return self._non_result(None) - else: - return getter(key) - - def _has_key(self, key): - try: - has_key = self._metadata._has_key - except AttributeError: - return self._non_result(None) - else: - return has_key(key) - - def _init_metadata(self): - metadata = self._cursor_description() - if metadata is not None: - if self.context.compiled and \ - 'compiled_cache' in self.context.execution_options: - if self.context.compiled._cached_metadata: - self._metadata = self.context.compiled._cached_metadata - else: - self._metadata = self.context.compiled._cached_metadata = \ - ResultMetaData(self, metadata) - else: - self._metadata = ResultMetaData(self, metadata) - if self._echo: - self.context.engine.logger.debug( - "Col %r", tuple(x[0] for x in metadata)) - - def keys(self): - """Return the current set of string keys for rows.""" - if self._metadata: - return self._metadata.keys - else: - return [] - - @util.memoized_property - def rowcount(self): - """Return the 'rowcount' for this result. - - The 'rowcount' reports the number of rows *matched* - by the WHERE criterion of an UPDATE or DELETE statement. - - .. note:: - - Notes regarding :attr:`.ResultProxy.rowcount`: - - - * This attribute returns the number of rows *matched*, - which is not necessarily the same as the number of rows - that were actually *modified* - an UPDATE statement, for example, - may have no net change on a given row if the SET values - given are the same as those present in the row already. - Such a row would be matched but not modified. - On backends that feature both styles, such as MySQL, - rowcount is configured by default to return the match - count in all cases. - - * :attr:`.ResultProxy.rowcount` is *only* useful in conjunction - with an UPDATE or DELETE statement. Contrary to what the Python - DBAPI says, it does *not* return the - number of rows available from the results of a SELECT statement - as DBAPIs cannot support this functionality when rows are - unbuffered. - - * :attr:`.ResultProxy.rowcount` may not be fully implemented by - all dialects. In particular, most DBAPIs do not support an - aggregate rowcount result from an executemany call. - The :meth:`.ResultProxy.supports_sane_rowcount` and - :meth:`.ResultProxy.supports_sane_multi_rowcount` methods - will report from the dialect if each usage is known to be - supported. - - * Statements that use RETURNING may not return a correct - rowcount. - - """ - try: - return self.context.rowcount - except Exception as e: - self.connection._handle_dbapi_exception( - e, None, None, self.cursor, self.context) - - @property - def lastrowid(self): - """return the 'lastrowid' accessor on the DBAPI cursor. - - This is a DBAPI specific method and is only functional - for those backends which support it, for statements - where it is appropriate. It's behavior is not - consistent across backends. - - Usage of this method is normally unnecessary when - using insert() expression constructs; the - :attr:`~ResultProxy.inserted_primary_key` attribute provides a - tuple of primary key values for a newly inserted row, - regardless of database backend. - - """ - try: - return self._saved_cursor.lastrowid - except Exception as e: - self.connection._handle_dbapi_exception( - e, None, None, - self._saved_cursor, self.context) - - @property - def returns_rows(self): - """True if this :class:`.ResultProxy` returns rows. - - I.e. if it is legal to call the methods - :meth:`~.ResultProxy.fetchone`, - :meth:`~.ResultProxy.fetchmany` - :meth:`~.ResultProxy.fetchall`. - - """ - return self._metadata is not None - - @property - def is_insert(self): - """True if this :class:`.ResultProxy` is the result - of a executing an expression language compiled - :func:`.expression.insert` construct. - - When True, this implies that the - :attr:`inserted_primary_key` attribute is accessible, - assuming the statement did not include - a user defined "returning" construct. - - """ - return self.context.isinsert - - def _cursor_description(self): - """May be overridden by subclasses.""" - - return self._saved_cursor.description - - def _soft_close(self, _autoclose_connection=True): - """Soft close this :class:`.ResultProxy`. - - This releases all DBAPI cursor resources, but leaves the - ResultProxy "open" from a semantic perspective, meaning the - fetchXXX() methods will continue to return empty results. - - This method is called automatically when: - - * all result rows are exhausted using the fetchXXX() methods. - * cursor.description is None. - - This method is **not public**, but is documented in order to clarify - the "autoclose" process used. - - .. versionadded:: 1.0.0 - - .. seealso:: - - :meth:`.ResultProxy.close` - - - """ - if self._soft_closed: - return - self._soft_closed = True - cursor = self.cursor - self.connection._safe_close_cursor(cursor) - if _autoclose_connection and \ - self.connection.should_close_with_result: - self.connection.close() - self.cursor = None - - def close(self): - """Close this ResultProxy. - - This closes out the underlying DBAPI cursor corresonding - to the statement execution, if one is stil present. Note that the - DBAPI cursor is automatically released when the :class:`.ResultProxy` - exhausts all available rows. :meth:`.ResultProxy.close` is generally - an optional method except in the case when discarding a - :class:`.ResultProxy` that still has additional rows pending for fetch. - - In the case of a result that is the product of - :ref:`connectionless execution `, - the underyling :class:`.Connection` object is also closed, which - :term:`releases` DBAPI connection resources. - - After this method is called, it is no longer valid to call upon - the fetch methods, which will raise a :class:`.ResourceClosedError` - on subsequent use. - - .. versionchanged:: 1.0.0 - the :meth:`.ResultProxy.close` method - has been separated out from the process that releases the underlying - DBAPI cursor resource. The "auto close" feature of the - :class:`.Connection` now performs a so-called "soft close", which - releases the underlying DBAPI cursor, but allows the - :class:`.ResultProxy` to still behave as an open-but-exhausted - result set; the actual :meth:`.ResultProxy.close` method is never - called. It is still safe to discard a :class:`.ResultProxy` - that has been fully exhausted without calling this method. - - .. seealso:: - - :ref:`connections_toplevel` - - :meth:`.ResultProxy._soft_close` - - """ - - if not self.closed: - self._soft_close() - self.closed = True - - def __iter__(self): - while True: - row = self.fetchone() - if row is None: - raise StopIteration - else: - yield row - - @util.memoized_property - def inserted_primary_key(self): - """Return the primary key for the row just inserted. - - The return value is a list of scalar values - corresponding to the list of primary key columns - in the target table. - - This only applies to single row :func:`.insert` - constructs which did not explicitly specify - :meth:`.Insert.returning`. - - Note that primary key columns which specify a - server_default clause, - or otherwise do not qualify as "autoincrement" - columns (see the notes at :class:`.Column`), and were - generated using the database-side default, will - appear in this list as ``None`` unless the backend - supports "returning" and the insert statement executed - with the "implicit returning" enabled. - - Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed - statement is not a compiled expression construct - or is not an insert() construct. - - """ - - if not self.context.compiled: - raise exc.InvalidRequestError( - "Statement is not a compiled " - "expression construct.") - elif not self.context.isinsert: - raise exc.InvalidRequestError( - "Statement is not an insert() " - "expression construct.") - elif self.context._is_explicit_returning: - raise exc.InvalidRequestError( - "Can't call inserted_primary_key " - "when returning() " - "is used.") - - return self.context.inserted_primary_key - - def last_updated_params(self): - """Return the collection of updated parameters from this - execution. - - Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed - statement is not a compiled expression construct - or is not an update() construct. - - """ - if not self.context.compiled: - raise exc.InvalidRequestError( - "Statement is not a compiled " - "expression construct.") - elif not self.context.isupdate: - raise exc.InvalidRequestError( - "Statement is not an update() " - "expression construct.") - elif self.context.executemany: - return self.context.compiled_parameters - else: - return self.context.compiled_parameters[0] - - def last_inserted_params(self): - """Return the collection of inserted parameters from this - execution. - - Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed - statement is not a compiled expression construct - or is not an insert() construct. - - """ - if not self.context.compiled: - raise exc.InvalidRequestError( - "Statement is not a compiled " - "expression construct.") - elif not self.context.isinsert: - raise exc.InvalidRequestError( - "Statement is not an insert() " - "expression construct.") - elif self.context.executemany: - return self.context.compiled_parameters - else: - return self.context.compiled_parameters[0] - - @property - def returned_defaults(self): - """Return the values of default columns that were fetched using - the :meth:`.ValuesBase.return_defaults` feature. - - The value is an instance of :class:`.RowProxy`, or ``None`` - if :meth:`.ValuesBase.return_defaults` was not used or if the - backend does not support RETURNING. - - .. versionadded:: 0.9.0 - - .. seealso:: - - :meth:`.ValuesBase.return_defaults` - - """ - return self.context.returned_defaults - - def lastrow_has_defaults(self): - """Return ``lastrow_has_defaults()`` from the underlying - :class:`.ExecutionContext`. - - See :class:`.ExecutionContext` for details. - - """ - - return self.context.lastrow_has_defaults() - - def postfetch_cols(self): - """Return ``postfetch_cols()`` from the underlying - :class:`.ExecutionContext`. - - See :class:`.ExecutionContext` for details. - - Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed - statement is not a compiled expression construct - or is not an insert() or update() construct. - - """ - - if not self.context.compiled: - raise exc.InvalidRequestError( - "Statement is not a compiled " - "expression construct.") - elif not self.context.isinsert and not self.context.isupdate: - raise exc.InvalidRequestError( - "Statement is not an insert() or update() " - "expression construct.") - return self.context.postfetch_cols - - def prefetch_cols(self): - """Return ``prefetch_cols()`` from the underlying - :class:`.ExecutionContext`. - - See :class:`.ExecutionContext` for details. - - Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed - statement is not a compiled expression construct - or is not an insert() or update() construct. - - """ - - if not self.context.compiled: - raise exc.InvalidRequestError( - "Statement is not a compiled " - "expression construct.") - elif not self.context.isinsert and not self.context.isupdate: - raise exc.InvalidRequestError( - "Statement is not an insert() or update() " - "expression construct.") - return self.context.prefetch_cols - - def supports_sane_rowcount(self): - """Return ``supports_sane_rowcount`` from the dialect. - - See :attr:`.ResultProxy.rowcount` for background. - - """ - - return self.dialect.supports_sane_rowcount - - def supports_sane_multi_rowcount(self): - """Return ``supports_sane_multi_rowcount`` from the dialect. - - See :attr:`.ResultProxy.rowcount` for background. - - """ - - return self.dialect.supports_sane_multi_rowcount - - def _fetchone_impl(self): - try: - return self.cursor.fetchone() - except AttributeError: - return self._non_result(None) - - def _fetchmany_impl(self, size=None): - try: - if size is None: - return self.cursor.fetchmany() - else: - return self.cursor.fetchmany(size) - except AttributeError: - return self._non_result([]) - - def _fetchall_impl(self): - try: - return self.cursor.fetchall() - except AttributeError: - return self._non_result([]) - - def _non_result(self, default): - if self._metadata is None: - raise exc.ResourceClosedError( - "This result object does not return rows. " - "It has been closed automatically.", - ) - elif self.closed: - raise exc.ResourceClosedError("This result object is closed.") - else: - return default - - def process_rows(self, rows): - process_row = self._process_row - metadata = self._metadata - keymap = metadata._keymap - processors = metadata._processors - if self._echo: - log = self.context.engine.logger.debug - l = [] - for row in rows: - log("Row %r", row) - l.append(process_row(metadata, row, processors, keymap)) - return l - else: - return [process_row(metadata, row, processors, keymap) - for row in rows] - - def fetchall(self): - """Fetch all rows, just like DB-API ``cursor.fetchall()``. - - After all rows have been exhausted, the underlying DBAPI - cursor resource is released, and the object may be safely - discarded. - - Subsequent calls to :meth:`.ResultProxy.fetchall` will return - an empty list. After the :meth:`.ResultProxy.close` method is - called, the method will raise :class:`.ResourceClosedError`. - - .. versionchanged:: 1.0.0 - Added "soft close" behavior which - allows the result to be used in an "exhausted" state prior to - calling the :meth:`.ResultProxy.close` method. - - """ - - try: - l = self.process_rows(self._fetchall_impl()) - self._soft_close() - return l - except Exception as e: - self.connection._handle_dbapi_exception( - e, None, None, - self.cursor, self.context) - - def fetchmany(self, size=None): - """Fetch many rows, just like DB-API - ``cursor.fetchmany(size=cursor.arraysize)``. - - After all rows have been exhausted, the underlying DBAPI - cursor resource is released, and the object may be safely - discarded. - - Calls to :meth:`.ResultProxy.fetchmany` after all rows have been - exhuasted will return - an empty list. After the :meth:`.ResultProxy.close` method is - called, the method will raise :class:`.ResourceClosedError`. - - .. versionchanged:: 1.0.0 - Added "soft close" behavior which - allows the result to be used in an "exhausted" state prior to - calling the :meth:`.ResultProxy.close` method. - - """ - - try: - l = self.process_rows(self._fetchmany_impl(size)) - if len(l) == 0: - self._soft_close() - return l - except Exception as e: - self.connection._handle_dbapi_exception( - e, None, None, - self.cursor, self.context) - - def fetchone(self): - """Fetch one row, just like DB-API ``cursor.fetchone()``. - - After all rows have been exhausted, the underlying DBAPI - cursor resource is released, and the object may be safely - discarded. - - Calls to :meth:`.ResultProxy.fetchone` after all rows have - been exhausted will return ``None``. - After the :meth:`.ResultProxy.close` method is - called, the method will raise :class:`.ResourceClosedError`. - - .. versionchanged:: 1.0.0 - Added "soft close" behavior which - allows the result to be used in an "exhausted" state prior to - calling the :meth:`.ResultProxy.close` method. - - """ - try: - row = self._fetchone_impl() - if row is not None: - return self.process_rows([row])[0] - else: - self._soft_close() - return None - except Exception as e: - self.connection._handle_dbapi_exception( - e, None, None, - self.cursor, self.context) - - def first(self): - """Fetch the first row and then close the result set unconditionally. - - Returns None if no row is present. - - After calling this method, the object is fully closed, - e.g. the :meth:`.ResultProxy.close` method will have been called. - - """ - if self._metadata is None: - return self._non_result(None) - - try: - row = self._fetchone_impl() - except Exception as e: - self.connection._handle_dbapi_exception( - e, None, None, - self.cursor, self.context) - - try: - if row is not None: - return self.process_rows([row])[0] - else: - return None - finally: - self.close() - - def scalar(self): - """Fetch the first column of the first row, and close the result set. - - Returns None if no row is present. - - After calling this method, the object is fully closed, - e.g. the :meth:`.ResultProxy.close` method will have been called. - - """ - row = self.first() - if row is not None: - return row[0] - else: - return None - - -class BufferedRowResultProxy(ResultProxy): - """A ResultProxy with row buffering behavior. - - ``ResultProxy`` that buffers the contents of a selection of rows - before ``fetchone()`` is called. This is to allow the results of - ``cursor.description`` to be available immediately, when - interfacing with a DB-API that requires rows to be consumed before - this information is available (currently psycopg2, when used with - server-side cursors). - - The pre-fetching behavior fetches only one row initially, and then - grows its buffer size by a fixed amount with each successive need - for additional rows up to a size of 1000. - - The size argument is configurable using the ``max_row_buffer`` - execution option:: - - with psycopg2_engine.connect() as conn: - - result = conn.execution_options( - stream_results=True, max_row_buffer=50 - ).execute("select * from table") - - .. versionadded:: 1.0.6 Added the ``max_row_buffer`` option. - - .. seealso:: - - :ref:`psycopg2_execution_options` - """ - - def _init_metadata(self): - self._max_row_buffer = self.context.execution_options.get( - 'max_row_buffer', None) - self.__buffer_rows() - super(BufferedRowResultProxy, self)._init_metadata() - - # this is a "growth chart" for the buffering of rows. - # each successive __buffer_rows call will use the next - # value in the list for the buffer size until the max - # is reached - size_growth = { - 1: 5, - 5: 10, - 10: 20, - 20: 50, - 50: 100, - 100: 250, - 250: 500, - 500: 1000 - } - - def __buffer_rows(self): - if self.cursor is None: - return - size = getattr(self, '_bufsize', 1) - self.__rowbuffer = collections.deque(self.cursor.fetchmany(size)) - self._bufsize = self.size_growth.get(size, size) - if self._max_row_buffer is not None: - self._bufsize = min(self._max_row_buffer, self._bufsize) - - def _soft_close(self, **kw): - self.__rowbuffer.clear() - super(BufferedRowResultProxy, self)._soft_close(**kw) - - def _fetchone_impl(self): - if self.cursor is None: - return self._non_result(None) - if not self.__rowbuffer: - self.__buffer_rows() - if not self.__rowbuffer: - return None - return self.__rowbuffer.popleft() - - def _fetchmany_impl(self, size=None): - if size is None: - return self._fetchall_impl() - result = [] - for x in range(0, size): - row = self._fetchone_impl() - if row is None: - break - result.append(row) - return result - - def _fetchall_impl(self): - if self.cursor is None: - return self._non_result([]) - self.__rowbuffer.extend(self.cursor.fetchall()) - ret = self.__rowbuffer - self.__rowbuffer = collections.deque() - return ret - - -class FullyBufferedResultProxy(ResultProxy): - """A result proxy that buffers rows fully upon creation. - - Used for operations where a result is to be delivered - after the database conversation can not be continued, - such as MSSQL INSERT...OUTPUT after an autocommit. - - """ - - def _init_metadata(self): - super(FullyBufferedResultProxy, self)._init_metadata() - self.__rowbuffer = self._buffer_rows() - - def _buffer_rows(self): - return collections.deque(self.cursor.fetchall()) - - def _soft_close(self, **kw): - self.__rowbuffer.clear() - super(FullyBufferedResultProxy, self)._soft_close(**kw) - - def _fetchone_impl(self): - if self.__rowbuffer: - return self.__rowbuffer.popleft() - else: - return self._non_result(None) - - def _fetchmany_impl(self, size=None): - if size is None: - return self._fetchall_impl() - result = [] - for x in range(0, size): - row = self._fetchone_impl() - if row is None: - break - result.append(row) - return result - - def _fetchall_impl(self): - if not self.cursor: - return self._non_result([]) - ret = self.__rowbuffer - self.__rowbuffer = collections.deque() - return ret - - -class BufferedColumnRow(RowProxy): - def __init__(self, parent, row, processors, keymap): - # preprocess row - row = list(row) - # this is a tad faster than using enumerate - index = 0 - for processor in parent._orig_processors: - if processor is not None: - row[index] = processor(row[index]) - index += 1 - row = tuple(row) - super(BufferedColumnRow, self).__init__(parent, row, - processors, keymap) - - -class BufferedColumnResultProxy(ResultProxy): - """A ResultProxy with column buffering behavior. - - ``ResultProxy`` that loads all columns into memory each time - fetchone() is called. If fetchmany() or fetchall() are called, - the full grid of results is fetched. This is to operate with - databases where result rows contain "live" results that fall out - of scope unless explicitly fetched. Currently this includes - cx_Oracle LOB objects. - - """ - - _process_row = BufferedColumnRow - - def _init_metadata(self): - super(BufferedColumnResultProxy, self)._init_metadata() - metadata = self._metadata - # orig_processors will be used to preprocess each row when they are - # constructed. - metadata._orig_processors = metadata._processors - # replace the all type processors by None processors. - metadata._processors = [None for _ in range(len(metadata.keys))] - keymap = {} - for k, (func, obj, index) in metadata._keymap.items(): - keymap[k] = (None, obj, index) - self._metadata._keymap = keymap - - def fetchall(self): - # can't call cursor.fetchall(), since rows must be - # fully processed before requesting more from the DBAPI. - l = [] - while True: - row = self.fetchone() - if row is None: - break - l.append(row) - return l - - def fetchmany(self, size=None): - # can't call cursor.fetchmany(), since rows must be - # fully processed before requesting more from the DBAPI. - if size is None: - return self.fetchall() - l = [] - for i in range(size): - row = self.fetchone() - if row is None: - break - l.append(row) - return l diff --git a/python/sqlalchemy/engine/strategies.py b/python/sqlalchemy/engine/strategies.py deleted file mode 100644 index a539ee9f..00000000 --- a/python/sqlalchemy/engine/strategies.py +++ /dev/null @@ -1,262 +0,0 @@ -# engine/strategies.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Strategies for creating new instances of Engine types. - -These are semi-private implementation classes which provide the -underlying behavior for the "strategy" keyword argument available on -:func:`~sqlalchemy.engine.create_engine`. Current available options are -``plain``, ``threadlocal``, and ``mock``. - -New strategies can be added via new ``EngineStrategy`` classes. -""" - -from operator import attrgetter - -from sqlalchemy.engine import base, threadlocal, url -from sqlalchemy import util, exc, event -from sqlalchemy import pool as poollib - -strategies = {} - - -class EngineStrategy(object): - """An adaptor that processes input arguments and produces an Engine. - - Provides a ``create`` method that receives input arguments and - produces an instance of base.Engine or a subclass. - - """ - - def __init__(self): - strategies[self.name] = self - - def create(self, *args, **kwargs): - """Given arguments, returns a new Engine instance.""" - - raise NotImplementedError() - - -class DefaultEngineStrategy(EngineStrategy): - """Base class for built-in strategies.""" - - def create(self, name_or_url, **kwargs): - # create url.URL object - u = url.make_url(name_or_url) - - entrypoint = u._get_entrypoint() - dialect_cls = entrypoint.get_dialect_cls(u) - - if kwargs.pop('_coerce_config', False): - def pop_kwarg(key, default=None): - value = kwargs.pop(key, default) - if key in dialect_cls.engine_config_types: - value = dialect_cls.engine_config_types[key](value) - return value - else: - pop_kwarg = kwargs.pop - - dialect_args = {} - # consume dialect arguments from kwargs - for k in util.get_cls_kwargs(dialect_cls): - if k in kwargs: - dialect_args[k] = pop_kwarg(k) - - dbapi = kwargs.pop('module', None) - if dbapi is None: - dbapi_args = {} - for k in util.get_func_kwargs(dialect_cls.dbapi): - if k in kwargs: - dbapi_args[k] = pop_kwarg(k) - dbapi = dialect_cls.dbapi(**dbapi_args) - - dialect_args['dbapi'] = dbapi - - # create dialect - dialect = dialect_cls(**dialect_args) - - # assemble connection arguments - (cargs, cparams) = dialect.create_connect_args(u) - cparams.update(pop_kwarg('connect_args', {})) - cargs = list(cargs) # allow mutability - - # look for existing pool or create - pool = pop_kwarg('pool', None) - if pool is None: - def connect(connection_record=None): - if dialect._has_events: - for fn in dialect.dispatch.do_connect: - connection = fn( - dialect, connection_record, cargs, cparams) - if connection is not None: - return connection - return dialect.connect(*cargs, **cparams) - - creator = pop_kwarg('creator', connect) - - poolclass = pop_kwarg('poolclass', None) - if poolclass is None: - poolclass = dialect_cls.get_pool_class(u) - pool_args = {} - - # consume pool arguments from kwargs, translating a few of - # the arguments - translate = {'logging_name': 'pool_logging_name', - 'echo': 'echo_pool', - 'timeout': 'pool_timeout', - 'recycle': 'pool_recycle', - 'events': 'pool_events', - 'use_threadlocal': 'pool_threadlocal', - 'reset_on_return': 'pool_reset_on_return'} - for k in util.get_cls_kwargs(poolclass): - tk = translate.get(k, k) - if tk in kwargs: - pool_args[k] = pop_kwarg(tk) - pool = poolclass(creator, **pool_args) - else: - if isinstance(pool, poollib._DBProxy): - pool = pool.get_pool(*cargs, **cparams) - else: - pool = pool - - # create engine. - engineclass = self.engine_cls - engine_args = {} - for k in util.get_cls_kwargs(engineclass): - if k in kwargs: - engine_args[k] = pop_kwarg(k) - - _initialize = kwargs.pop('_initialize', True) - - # all kwargs should be consumed - if kwargs: - raise TypeError( - "Invalid argument(s) %s sent to create_engine(), " - "using configuration %s/%s/%s. Please check that the " - "keyword arguments are appropriate for this combination " - "of components." % (','.join("'%s'" % k for k in kwargs), - dialect.__class__.__name__, - pool.__class__.__name__, - engineclass.__name__)) - - engine = engineclass(pool, dialect, u, **engine_args) - - if _initialize: - do_on_connect = dialect.on_connect() - if do_on_connect: - def on_connect(dbapi_connection, connection_record): - conn = getattr( - dbapi_connection, '_sqla_unwrap', dbapi_connection) - if conn is None: - return - do_on_connect(conn) - - event.listen(pool, 'first_connect', on_connect) - event.listen(pool, 'connect', on_connect) - - def first_connect(dbapi_connection, connection_record): - c = base.Connection(engine, connection=dbapi_connection, - _has_events=False) - c._execution_options = util.immutabledict() - dialect.initialize(c) - event.listen(pool, 'first_connect', first_connect, once=True) - - dialect_cls.engine_created(engine) - if entrypoint is not dialect_cls: - entrypoint.engine_created(engine) - - return engine - - -class PlainEngineStrategy(DefaultEngineStrategy): - """Strategy for configuring a regular Engine.""" - - name = 'plain' - engine_cls = base.Engine - -PlainEngineStrategy() - - -class ThreadLocalEngineStrategy(DefaultEngineStrategy): - """Strategy for configuring an Engine with threadlocal behavior.""" - - name = 'threadlocal' - engine_cls = threadlocal.TLEngine - -ThreadLocalEngineStrategy() - - -class MockEngineStrategy(EngineStrategy): - """Strategy for configuring an Engine-like object with mocked execution. - - Produces a single mock Connectable object which dispatches - statement execution to a passed-in function. - - """ - - name = 'mock' - - def create(self, name_or_url, executor, **kwargs): - # create url.URL object - u = url.make_url(name_or_url) - - dialect_cls = u.get_dialect() - - dialect_args = {} - # consume dialect arguments from kwargs - for k in util.get_cls_kwargs(dialect_cls): - if k in kwargs: - dialect_args[k] = kwargs.pop(k) - - # create dialect - dialect = dialect_cls(**dialect_args) - - return MockEngineStrategy.MockConnection(dialect, executor) - - class MockConnection(base.Connectable): - def __init__(self, dialect, execute): - self._dialect = dialect - self.execute = execute - - engine = property(lambda s: s) - dialect = property(attrgetter('_dialect')) - name = property(lambda s: s._dialect.name) - - def contextual_connect(self, **kwargs): - return self - - def execution_options(self, **kw): - return self - - def compiler(self, statement, parameters, **kwargs): - return self._dialect.compiler( - statement, parameters, engine=self, **kwargs) - - def create(self, entity, **kwargs): - kwargs['checkfirst'] = False - from sqlalchemy.engine import ddl - - ddl.SchemaGenerator( - self.dialect, self, **kwargs).traverse_single(entity) - - def drop(self, entity, **kwargs): - kwargs['checkfirst'] = False - from sqlalchemy.engine import ddl - ddl.SchemaDropper( - self.dialect, self, **kwargs).traverse_single(entity) - - def _run_visitor(self, visitorcallable, element, - connection=None, - **kwargs): - kwargs['checkfirst'] = False - visitorcallable(self.dialect, self, - **kwargs).traverse_single(element) - - def execute(self, object, *multiparams, **params): - raise NotImplementedError() - -MockEngineStrategy() diff --git a/python/sqlalchemy/engine/threadlocal.py b/python/sqlalchemy/engine/threadlocal.py deleted file mode 100644 index 0d6e1c0f..00000000 --- a/python/sqlalchemy/engine/threadlocal.py +++ /dev/null @@ -1,138 +0,0 @@ -# engine/threadlocal.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Provides a thread-local transactional wrapper around the root Engine class. - -The ``threadlocal`` module is invoked when using the -``strategy="threadlocal"`` flag with :func:`~sqlalchemy.engine.create_engine`. -This module is semi-private and is invoked automatically when the threadlocal -engine strategy is used. -""" - -from .. import util -from . import base -import weakref - - -class TLConnection(base.Connection): - - def __init__(self, *arg, **kw): - super(TLConnection, self).__init__(*arg, **kw) - self.__opencount = 0 - - def _increment_connect(self): - self.__opencount += 1 - return self - - def close(self): - if self.__opencount == 1: - base.Connection.close(self) - self.__opencount -= 1 - - def _force_close(self): - self.__opencount = 0 - base.Connection.close(self) - - -class TLEngine(base.Engine): - """An Engine that includes support for thread-local managed - transactions. - - """ - _tl_connection_cls = TLConnection - - def __init__(self, *args, **kwargs): - super(TLEngine, self).__init__(*args, **kwargs) - self._connections = util.threading.local() - - def contextual_connect(self, **kw): - if not hasattr(self._connections, 'conn'): - connection = None - else: - connection = self._connections.conn() - - if connection is None or connection.closed: - # guards against pool-level reapers, if desired. - # or not connection.connection.is_valid: - connection = self._tl_connection_cls( - self, - self._wrap_pool_connect( - self.pool.connect, connection), - **kw) - self._connections.conn = weakref.ref(connection) - - return connection._increment_connect() - - def begin_twophase(self, xid=None): - if not hasattr(self._connections, 'trans'): - self._connections.trans = [] - self._connections.trans.append( - self.contextual_connect().begin_twophase(xid=xid)) - return self - - def begin_nested(self): - if not hasattr(self._connections, 'trans'): - self._connections.trans = [] - self._connections.trans.append( - self.contextual_connect().begin_nested()) - return self - - def begin(self): - if not hasattr(self._connections, 'trans'): - self._connections.trans = [] - self._connections.trans.append(self.contextual_connect().begin()) - return self - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - if type is None: - self.commit() - else: - self.rollback() - - def prepare(self): - if not hasattr(self._connections, 'trans') or \ - not self._connections.trans: - return - self._connections.trans[-1].prepare() - - def commit(self): - if not hasattr(self._connections, 'trans') or \ - not self._connections.trans: - return - trans = self._connections.trans.pop(-1) - trans.commit() - - def rollback(self): - if not hasattr(self._connections, 'trans') or \ - not self._connections.trans: - return - trans = self._connections.trans.pop(-1) - trans.rollback() - - def dispose(self): - self._connections = util.threading.local() - super(TLEngine, self).dispose() - - @property - def closed(self): - return not hasattr(self._connections, 'conn') or \ - self._connections.conn() is None or \ - self._connections.conn().closed - - def close(self): - if not self.closed: - self.contextual_connect().close() - connection = self._connections.conn() - connection._force_close() - del self._connections.conn - self._connections.trans = [] - - def __repr__(self): - return 'TLEngine(%s)' % str(self.url) diff --git a/python/sqlalchemy/engine/url.py b/python/sqlalchemy/engine/url.py deleted file mode 100644 index 32e3f8a6..00000000 --- a/python/sqlalchemy/engine/url.py +++ /dev/null @@ -1,253 +0,0 @@ -# engine/url.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Provides the :class:`~sqlalchemy.engine.url.URL` class which encapsulates -information about a database connection specification. - -The URL object is created automatically when -:func:`~sqlalchemy.engine.create_engine` is called with a string -argument; alternatively, the URL is a public-facing construct which can -be used directly and is also accepted directly by ``create_engine()``. -""" - -import re -from .. import exc, util -from . import Dialect -from ..dialects import registry - - -class URL(object): - """ - Represent the components of a URL used to connect to a database. - - This object is suitable to be passed directly to a - :func:`~sqlalchemy.create_engine` call. The fields of the URL are parsed - from a string by the :func:`.make_url` function. the string - format of the URL is an RFC-1738-style string. - - All initialization parameters are available as public attributes. - - :param drivername: the name of the database backend. - This name will correspond to a module in sqlalchemy/databases - or a third party plug-in. - - :param username: The user name. - - :param password: database password. - - :param host: The name of the host. - - :param port: The port number. - - :param database: The database name. - - :param query: A dictionary of options to be passed to the - dialect and/or the DBAPI upon connect. - - """ - - def __init__(self, drivername, username=None, password=None, - host=None, port=None, database=None, query=None): - self.drivername = drivername - self.username = username - self.password = password - self.host = host - if port is not None: - self.port = int(port) - else: - self.port = None - self.database = database - self.query = query or {} - - def __to_string__(self, hide_password=True): - s = self.drivername + "://" - if self.username is not None: - s += _rfc_1738_quote(self.username) - if self.password is not None: - s += ':' + ('***' if hide_password - else _rfc_1738_quote(self.password)) - s += "@" - if self.host is not None: - if ':' in self.host: - s += "[%s]" % self.host - else: - s += self.host - if self.port is not None: - s += ':' + str(self.port) - if self.database is not None: - s += '/' + self.database - if self.query: - keys = list(self.query) - keys.sort() - s += '?' + "&".join("%s=%s" % (k, self.query[k]) for k in keys) - return s - - def __str__(self): - return self.__to_string__(hide_password=False) - - def __repr__(self): - return self.__to_string__() - - def __hash__(self): - return hash(str(self)) - - def __eq__(self, other): - return \ - isinstance(other, URL) and \ - self.drivername == other.drivername and \ - self.username == other.username and \ - self.password == other.password and \ - self.host == other.host and \ - self.database == other.database and \ - self.query == other.query - - def get_backend_name(self): - if '+' not in self.drivername: - return self.drivername - else: - return self.drivername.split('+')[0] - - def get_driver_name(self): - if '+' not in self.drivername: - return self.get_dialect().driver - else: - return self.drivername.split('+')[1] - - def _get_entrypoint(self): - """Return the "entry point" dialect class. - - This is normally the dialect itself except in the case when the - returned class implements the get_dialect_cls() method. - - """ - if '+' not in self.drivername: - name = self.drivername - else: - name = self.drivername.replace('+', '.') - cls = registry.load(name) - # check for legacy dialects that - # would return a module with 'dialect' as the - # actual class - if hasattr(cls, 'dialect') and \ - isinstance(cls.dialect, type) and \ - issubclass(cls.dialect, Dialect): - return cls.dialect - else: - return cls - - def get_dialect(self): - """Return the SQLAlchemy database dialect class corresponding - to this URL's driver name. - """ - entrypoint = self._get_entrypoint() - dialect_cls = entrypoint.get_dialect_cls(self) - return dialect_cls - - def translate_connect_args(self, names=[], **kw): - """Translate url attributes into a dictionary of connection arguments. - - Returns attributes of this url (`host`, `database`, `username`, - `password`, `port`) as a plain dictionary. The attribute names are - used as the keys by default. Unset or false attributes are omitted - from the final dictionary. - - :param \**kw: Optional, alternate key names for url attributes. - - :param names: Deprecated. Same purpose as the keyword-based alternate - names, but correlates the name to the original positionally. - """ - - translated = {} - attribute_names = ['host', 'database', 'username', 'password', 'port'] - for sname in attribute_names: - if names: - name = names.pop(0) - elif sname in kw: - name = kw[sname] - else: - name = sname - if name is not None and getattr(self, sname, False): - translated[name] = getattr(self, sname) - return translated - - -def make_url(name_or_url): - """Given a string or unicode instance, produce a new URL instance. - - The given string is parsed according to the RFC 1738 spec. If an - existing URL object is passed, just returns the object. - """ - - if isinstance(name_or_url, util.string_types): - return _parse_rfc1738_args(name_or_url) - else: - return name_or_url - - -def _parse_rfc1738_args(name): - pattern = re.compile(r''' - (?P[\w\+]+):// - (?: - (?P[^:/]*) - (?::(?P.*))? - @)? - (?: - (?: - \[(?P[^/]+)\] | - (?P[^/:]+) - )? - (?::(?P[^/]*))? - )? - (?:/(?P.*))? - ''', re.X) - - m = pattern.match(name) - if m is not None: - components = m.groupdict() - if components['database'] is not None: - tokens = components['database'].split('?', 2) - components['database'] = tokens[0] - query = ( - len(tokens) > 1 and dict(util.parse_qsl(tokens[1]))) or None - if util.py2k and query is not None: - query = dict((k.encode('ascii'), query[k]) for k in query) - else: - query = None - components['query'] = query - - if components['username'] is not None: - components['username'] = _rfc_1738_unquote(components['username']) - - if components['password'] is not None: - components['password'] = _rfc_1738_unquote(components['password']) - - ipv4host = components.pop('ipv4host') - ipv6host = components.pop('ipv6host') - components['host'] = ipv4host or ipv6host - name = components.pop('name') - return URL(name, **components) - else: - raise exc.ArgumentError( - "Could not parse rfc1738 URL from string '%s'" % name) - - -def _rfc_1738_quote(text): - return re.sub(r'[:@/]', lambda m: "%%%X" % ord(m.group(0)), text) - - -def _rfc_1738_unquote(text): - return util.unquote(text) - - -def _parse_keyvalue_args(name): - m = re.match(r'(\w+)://(.*)', name) - if m is not None: - (name, args) = m.group(1, 2) - opts = dict(util.parse_qsl(args)) - return URL(name, *opts) - else: - return None diff --git a/python/sqlalchemy/engine/util.py b/python/sqlalchemy/engine/util.py deleted file mode 100644 index 3734c996..00000000 --- a/python/sqlalchemy/engine/util.py +++ /dev/null @@ -1,74 +0,0 @@ -# engine/util.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -from .. import util - - -def connection_memoize(key): - """Decorator, memoize a function in a connection.info stash. - - Only applicable to functions which take no arguments other than a - connection. The memo will be stored in ``connection.info[key]``. - """ - - @util.decorator - def decorated(fn, self, connection): - connection = connection.connect() - try: - return connection.info[key] - except KeyError: - connection.info[key] = val = fn(self, connection) - return val - - return decorated - - -def py_fallback(): - def _distill_params(multiparams, params): - """Given arguments from the calling form *multiparams, **params, - return a list of bind parameter structures, usually a list of - dictionaries. - - In the case of 'raw' execution which accepts positional parameters, - it may be a list of tuples or lists. - - """ - - if not multiparams: - if params: - return [params] - else: - return [] - elif len(multiparams) == 1: - zero = multiparams[0] - if isinstance(zero, (list, tuple)): - if not zero or hasattr(zero[0], '__iter__') and \ - not hasattr(zero[0], 'strip'): - # execute(stmt, [{}, {}, {}, ...]) - # execute(stmt, [(), (), (), ...]) - return zero - else: - # execute(stmt, ("value", "value")) - return [zero] - elif hasattr(zero, 'keys'): - # execute(stmt, {"key":"value"}) - return [zero] - else: - # execute(stmt, "value") - return [[zero]] - else: - if hasattr(multiparams[0], '__iter__') and \ - not hasattr(multiparams[0], 'strip'): - return multiparams - else: - return [multiparams] - - return locals() -try: - from sqlalchemy.cutils import _distill_params -except ImportError: - globals().update(py_fallback()) diff --git a/python/sqlalchemy/event/__init__.py b/python/sqlalchemy/event/__init__.py deleted file mode 100644 index c9bdb9a0..00000000 --- a/python/sqlalchemy/event/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -# event/__init__.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -from .api import CANCEL, NO_RETVAL, listen, listens_for, remove, contains -from .base import Events, dispatcher -from .attr import RefCollection -from .legacy import _legacy_signature diff --git a/python/sqlalchemy/event/api.py b/python/sqlalchemy/event/api.py deleted file mode 100644 index 86ef094d..00000000 --- a/python/sqlalchemy/event/api.py +++ /dev/null @@ -1,188 +0,0 @@ -# event/api.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Public API functions for the event system. - -""" -from __future__ import absolute_import - -from .. import util, exc -from .base import _registrars -from .registry import _EventKey - -CANCEL = util.symbol('CANCEL') -NO_RETVAL = util.symbol('NO_RETVAL') - - -def _event_key(target, identifier, fn): - for evt_cls in _registrars[identifier]: - tgt = evt_cls._accept_with(target) - if tgt is not None: - return _EventKey(target, identifier, fn, tgt) - else: - raise exc.InvalidRequestError("No such event '%s' for target '%s'" % - (identifier, target)) - - -def listen(target, identifier, fn, *args, **kw): - """Register a listener function for the given target. - - e.g.:: - - from sqlalchemy import event - from sqlalchemy.schema import UniqueConstraint - - def unique_constraint_name(const, table): - const.name = "uq_%s_%s" % ( - table.name, - list(const.columns)[0].name - ) - event.listen( - UniqueConstraint, - "after_parent_attach", - unique_constraint_name) - - - A given function can also be invoked for only the first invocation - of the event using the ``once`` argument:: - - def on_config(): - do_config() - - event.listen(Mapper, "before_configure", on_config, once=True) - - .. versionadded:: 0.9.4 Added ``once=True`` to :func:`.event.listen` - and :func:`.event.listens_for`. - - .. note:: - - The :func:`.listen` function cannot be called at the same time - that the target event is being run. This has implications - for thread safety, and also means an event cannot be added - from inside the listener function for itself. The list of - events to be run are present inside of a mutable collection - that can't be changed during iteration. - - Event registration and removal is not intended to be a "high - velocity" operation; it is a configurational operation. For - systems that need to quickly associate and deassociate with - events at high scale, use a mutable structure that is handled - from inside of a single listener. - - .. versionchanged:: 1.0.0 - a ``collections.deque()`` object is now - used as the container for the list of events, which explicitly - disallows collection mutation while the collection is being - iterated. - - .. seealso:: - - :func:`.listens_for` - - :func:`.remove` - - """ - - _event_key(target, identifier, fn).listen(*args, **kw) - - -def listens_for(target, identifier, *args, **kw): - """Decorate a function as a listener for the given target + identifier. - - e.g.:: - - from sqlalchemy import event - from sqlalchemy.schema import UniqueConstraint - - @event.listens_for(UniqueConstraint, "after_parent_attach") - def unique_constraint_name(const, table): - const.name = "uq_%s_%s" % ( - table.name, - list(const.columns)[0].name - ) - - A given function can also be invoked for only the first invocation - of the event using the ``once`` argument:: - - @event.listens_for(Mapper, "before_configure", once=True) - def on_config(): - do_config() - - - .. versionadded:: 0.9.4 Added ``once=True`` to :func:`.event.listen` - and :func:`.event.listens_for`. - - .. seealso:: - - :func:`.listen` - general description of event listening - - """ - def decorate(fn): - listen(target, identifier, fn, *args, **kw) - return fn - return decorate - - -def remove(target, identifier, fn): - """Remove an event listener. - - The arguments here should match exactly those which were sent to - :func:`.listen`; all the event registration which proceeded as a result - of this call will be reverted by calling :func:`.remove` with the same - arguments. - - e.g.:: - - # if a function was registered like this... - @event.listens_for(SomeMappedClass, "before_insert", propagate=True) - def my_listener_function(*arg): - pass - - # ... it's removed like this - event.remove(SomeMappedClass, "before_insert", my_listener_function) - - Above, the listener function associated with ``SomeMappedClass`` was also - propagated to subclasses of ``SomeMappedClass``; the :func:`.remove` - function will revert all of these operations. - - .. versionadded:: 0.9.0 - - .. note:: - - The :func:`.remove` function cannot be called at the same time - that the target event is being run. This has implications - for thread safety, and also means an event cannot be removed - from inside the listener function for itself. The list of - events to be run are present inside of a mutable collection - that can't be changed during iteration. - - Event registration and removal is not intended to be a "high - velocity" operation; it is a configurational operation. For - systems that need to quickly associate and deassociate with - events at high scale, use a mutable structure that is handled - from inside of a single listener. - - .. versionchanged:: 1.0.0 - a ``collections.deque()`` object is now - used as the container for the list of events, which explicitly - disallows collection mutation while the collection is being - iterated. - - .. seealso:: - - :func:`.listen` - - """ - _event_key(target, identifier, fn).remove() - - -def contains(target, identifier, fn): - """Return True if the given target/ident/fn is set up to listen. - - .. versionadded:: 0.9.0 - - """ - - return _event_key(target, identifier, fn).contains() diff --git a/python/sqlalchemy/event/attr.py b/python/sqlalchemy/event/attr.py deleted file mode 100644 index 8a88e40e..00000000 --- a/python/sqlalchemy/event/attr.py +++ /dev/null @@ -1,373 +0,0 @@ -# event/attr.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Attribute implementation for _Dispatch classes. - -The various listener targets for a particular event class are represented -as attributes, which refer to collections of listeners to be fired off. -These collections can exist at the class level as well as at the instance -level. An event is fired off using code like this:: - - some_object.dispatch.first_connect(arg1, arg2) - -Above, ``some_object.dispatch`` would be an instance of ``_Dispatch`` and -``first_connect`` is typically an instance of ``_ListenerCollection`` -if event listeners are present, or ``_EmptyListener`` if none are present. - -The attribute mechanics here spend effort trying to ensure listener functions -are available with a minimum of function call overhead, that unnecessary -objects aren't created (i.e. many empty per-instance listener collections), -as well as that everything is garbage collectable when owning references are -lost. Other features such as "propagation" of listener functions across -many ``_Dispatch`` instances, "joining" of multiple ``_Dispatch`` instances, -as well as support for subclass propagation (e.g. events assigned to -``Pool`` vs. ``QueuePool``) are all implemented here. - -""" - -from __future__ import absolute_import, with_statement - -from .. import util -from ..util import threading -from . import registry -from . import legacy -from itertools import chain -import weakref -import collections - - -class RefCollection(util.MemoizedSlots): - __slots__ = 'ref', - - def _memoized_attr_ref(self): - return weakref.ref(self, registry._collection_gced) - - -class _ClsLevelDispatch(RefCollection): - """Class-level events on :class:`._Dispatch` classes.""" - - __slots__ = ('name', 'arg_names', 'has_kw', - 'legacy_signatures', '_clslevel', '__weakref__') - - def __init__(self, parent_dispatch_cls, fn): - self.name = fn.__name__ - argspec = util.inspect_getargspec(fn) - self.arg_names = argspec.args[1:] - self.has_kw = bool(argspec.keywords) - self.legacy_signatures = list(reversed( - sorted( - getattr(fn, '_legacy_signatures', []), - key=lambda s: s[0] - ) - )) - fn.__doc__ = legacy._augment_fn_docs(self, parent_dispatch_cls, fn) - - self._clslevel = weakref.WeakKeyDictionary() - - def _adjust_fn_spec(self, fn, named): - if named: - fn = self._wrap_fn_for_kw(fn) - if self.legacy_signatures: - try: - argspec = util.get_callable_argspec(fn, no_self=True) - except TypeError: - pass - else: - fn = legacy._wrap_fn_for_legacy(self, fn, argspec) - return fn - - def _wrap_fn_for_kw(self, fn): - def wrap_kw(*args, **kw): - argdict = dict(zip(self.arg_names, args)) - argdict.update(kw) - return fn(**argdict) - return wrap_kw - - def insert(self, event_key, propagate): - target = event_key.dispatch_target - assert isinstance(target, type), \ - "Class-level Event targets must be classes." - stack = [target] - while stack: - cls = stack.pop(0) - stack.extend(cls.__subclasses__()) - if cls is not target and cls not in self._clslevel: - self.update_subclass(cls) - else: - if cls not in self._clslevel: - self._clslevel[cls] = collections.deque() - self._clslevel[cls].appendleft(event_key._listen_fn) - registry._stored_in_collection(event_key, self) - - def append(self, event_key, propagate): - target = event_key.dispatch_target - assert isinstance(target, type), \ - "Class-level Event targets must be classes." - - stack = [target] - while stack: - cls = stack.pop(0) - stack.extend(cls.__subclasses__()) - if cls is not target and cls not in self._clslevel: - self.update_subclass(cls) - else: - if cls not in self._clslevel: - self._clslevel[cls] = collections.deque() - self._clslevel[cls].append(event_key._listen_fn) - registry._stored_in_collection(event_key, self) - - def update_subclass(self, target): - if target not in self._clslevel: - self._clslevel[target] = collections.deque() - clslevel = self._clslevel[target] - for cls in target.__mro__[1:]: - if cls in self._clslevel: - clslevel.extend([ - fn for fn - in self._clslevel[cls] - if fn not in clslevel - ]) - - def remove(self, event_key): - target = event_key.dispatch_target - stack = [target] - while stack: - cls = stack.pop(0) - stack.extend(cls.__subclasses__()) - if cls in self._clslevel: - self._clslevel[cls].remove(event_key._listen_fn) - registry._removed_from_collection(event_key, self) - - def clear(self): - """Clear all class level listeners""" - - to_clear = set() - for dispatcher in self._clslevel.values(): - to_clear.update(dispatcher) - dispatcher.clear() - registry._clear(self, to_clear) - - def for_modify(self, obj): - """Return an event collection which can be modified. - - For _ClsLevelDispatch at the class level of - a dispatcher, this returns self. - - """ - return self - - -class _InstanceLevelDispatch(RefCollection): - __slots__ = () - - def _adjust_fn_spec(self, fn, named): - return self.parent._adjust_fn_spec(fn, named) - - -class _EmptyListener(_InstanceLevelDispatch): - """Serves as a proxy interface to the events - served by a _ClsLevelDispatch, when there are no - instance-level events present. - - Is replaced by _ListenerCollection when instance-level - events are added. - - """ - - propagate = frozenset() - listeners = () - - __slots__ = 'parent', 'parent_listeners', 'name' - - def __init__(self, parent, target_cls): - if target_cls not in parent._clslevel: - parent.update_subclass(target_cls) - self.parent = parent # _ClsLevelDispatch - self.parent_listeners = parent._clslevel[target_cls] - self.name = parent.name - - def for_modify(self, obj): - """Return an event collection which can be modified. - - For _EmptyListener at the instance level of - a dispatcher, this generates a new - _ListenerCollection, applies it to the instance, - and returns it. - - """ - result = _ListenerCollection(self.parent, obj._instance_cls) - if getattr(obj, self.name) is self: - setattr(obj, self.name, result) - else: - assert isinstance(getattr(obj, self.name), _JoinedListener) - return result - - def _needs_modify(self, *args, **kw): - raise NotImplementedError("need to call for_modify()") - - exec_once = insert = append = remove = clear = _needs_modify - - def __call__(self, *args, **kw): - """Execute this event.""" - - for fn in self.parent_listeners: - fn(*args, **kw) - - def __len__(self): - return len(self.parent_listeners) - - def __iter__(self): - return iter(self.parent_listeners) - - def __bool__(self): - return bool(self.parent_listeners) - - __nonzero__ = __bool__ - - -class _CompoundListener(_InstanceLevelDispatch): - __slots__ = '_exec_once_mutex', '_exec_once' - - def _memoized_attr__exec_once_mutex(self): - return threading.Lock() - - def exec_once(self, *args, **kw): - """Execute this event, but only if it has not been - executed already for this collection.""" - - if not self._exec_once: - with self._exec_once_mutex: - if not self._exec_once: - try: - self(*args, **kw) - finally: - self._exec_once = True - - def __call__(self, *args, **kw): - """Execute this event.""" - - for fn in self.parent_listeners: - fn(*args, **kw) - for fn in self.listeners: - fn(*args, **kw) - - def __len__(self): - return len(self.parent_listeners) + len(self.listeners) - - def __iter__(self): - return chain(self.parent_listeners, self.listeners) - - def __bool__(self): - return bool(self.listeners or self.parent_listeners) - - __nonzero__ = __bool__ - - -class _ListenerCollection(_CompoundListener): - """Instance-level attributes on instances of :class:`._Dispatch`. - - Represents a collection of listeners. - - As of 0.7.9, _ListenerCollection is only first - created via the _EmptyListener.for_modify() method. - - """ - - __slots__ = ( - 'parent_listeners', 'parent', 'name', 'listeners', - 'propagate', '__weakref__') - - def __init__(self, parent, target_cls): - if target_cls not in parent._clslevel: - parent.update_subclass(target_cls) - self._exec_once = False - self.parent_listeners = parent._clslevel[target_cls] - self.parent = parent - self.name = parent.name - self.listeners = collections.deque() - self.propagate = set() - - def for_modify(self, obj): - """Return an event collection which can be modified. - - For _ListenerCollection at the instance level of - a dispatcher, this returns self. - - """ - return self - - def _update(self, other, only_propagate=True): - """Populate from the listeners in another :class:`_Dispatch` - object.""" - - existing_listeners = self.listeners - existing_listener_set = set(existing_listeners) - self.propagate.update(other.propagate) - other_listeners = [l for l - in other.listeners - if l not in existing_listener_set - and not only_propagate or l in self.propagate - ] - - existing_listeners.extend(other_listeners) - - to_associate = other.propagate.union(other_listeners) - registry._stored_in_collection_multi(self, other, to_associate) - - def insert(self, event_key, propagate): - if event_key.prepend_to_list(self, self.listeners): - if propagate: - self.propagate.add(event_key._listen_fn) - - def append(self, event_key, propagate): - if event_key.append_to_list(self, self.listeners): - if propagate: - self.propagate.add(event_key._listen_fn) - - def remove(self, event_key): - self.listeners.remove(event_key._listen_fn) - self.propagate.discard(event_key._listen_fn) - registry._removed_from_collection(event_key, self) - - def clear(self): - registry._clear(self, self.listeners) - self.propagate.clear() - self.listeners.clear() - - -class _JoinedListener(_CompoundListener): - __slots__ = 'parent', 'name', 'local', 'parent_listeners' - - def __init__(self, parent, name, local): - self._exec_once = False - self.parent = parent - self.name = name - self.local = local - self.parent_listeners = self.local - - @property - def listeners(self): - return getattr(self.parent, self.name) - - def _adjust_fn_spec(self, fn, named): - return self.local._adjust_fn_spec(fn, named) - - def for_modify(self, obj): - self.local = self.parent_listeners = self.local.for_modify(obj) - return self - - def insert(self, event_key, propagate): - self.local.insert(event_key, propagate) - - def append(self, event_key, propagate): - self.local.append(event_key, propagate) - - def remove(self, event_key): - self.local.remove(event_key) - - def clear(self): - raise NotImplementedError() diff --git a/python/sqlalchemy/event/base.py b/python/sqlalchemy/event/base.py deleted file mode 100644 index 1fe83eea..00000000 --- a/python/sqlalchemy/event/base.py +++ /dev/null @@ -1,289 +0,0 @@ -# event/base.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Base implementation classes. - -The public-facing ``Events`` serves as the base class for an event interface; -its public attributes represent different kinds of events. These attributes -are mirrored onto a ``_Dispatch`` class, which serves as a container for -collections of listener functions. These collections are represented both -at the class level of a particular ``_Dispatch`` class as well as within -instances of ``_Dispatch``. - -""" -from __future__ import absolute_import - -import weakref - -from .. import util -from .attr import _JoinedListener, \ - _EmptyListener, _ClsLevelDispatch - -_registrars = util.defaultdict(list) - - -def _is_event_name(name): - return not name.startswith('_') and name != 'dispatch' - - -class _UnpickleDispatch(object): - """Serializable callable that re-generates an instance of - :class:`_Dispatch` given a particular :class:`.Events` subclass. - - """ - - def __call__(self, _instance_cls): - for cls in _instance_cls.__mro__: - if 'dispatch' in cls.__dict__: - return cls.__dict__['dispatch'].\ - dispatch_cls._for_class(_instance_cls) - else: - raise AttributeError("No class with a 'dispatch' member present.") - - -class _Dispatch(object): - """Mirror the event listening definitions of an Events class with - listener collections. - - Classes which define a "dispatch" member will return a - non-instantiated :class:`._Dispatch` subclass when the member - is accessed at the class level. When the "dispatch" member is - accessed at the instance level of its owner, an instance - of the :class:`._Dispatch` class is returned. - - A :class:`._Dispatch` class is generated for each :class:`.Events` - class defined, by the :func:`._create_dispatcher_class` function. - The original :class:`.Events` classes remain untouched. - This decouples the construction of :class:`.Events` subclasses from - the implementation used by the event internals, and allows - inspecting tools like Sphinx to work in an unsurprising - way against the public API. - - """ - - # in one ORM edge case, an attribute is added to _Dispatch, - # so __dict__ is used in just that case and potentially others. - __slots__ = '_parent', '_instance_cls', '__dict__', '_empty_listeners' - - _empty_listener_reg = weakref.WeakKeyDictionary() - - def __init__(self, parent, instance_cls=None): - self._parent = parent - self._instance_cls = instance_cls - if instance_cls: - try: - self._empty_listeners = self._empty_listener_reg[instance_cls] - except KeyError: - self._empty_listeners = \ - self._empty_listener_reg[instance_cls] = dict( - (ls.name, _EmptyListener(ls, instance_cls)) - for ls in parent._event_descriptors - ) - else: - self._empty_listeners = {} - - def __getattr__(self, name): - # assign EmptyListeners as attributes on demand - # to reduce startup time for new dispatch objects - try: - ls = self._empty_listeners[name] - except KeyError: - raise AttributeError(name) - else: - setattr(self, ls.name, ls) - return ls - - @property - def _event_descriptors(self): - for k in self._event_names: - yield getattr(self, k) - - def _for_class(self, instance_cls): - return self.__class__(self, instance_cls) - - def _for_instance(self, instance): - instance_cls = instance.__class__ - return self._for_class(instance_cls) - - @property - def _listen(self): - return self._events._listen - - def _join(self, other): - """Create a 'join' of this :class:`._Dispatch` and another. - - This new dispatcher will dispatch events to both - :class:`._Dispatch` objects. - - """ - if '_joined_dispatch_cls' not in self.__class__.__dict__: - cls = type( - "Joined%s" % self.__class__.__name__, - (_JoinedDispatcher, ), {'__slots__': self._event_names} - ) - - self.__class__._joined_dispatch_cls = cls - return self._joined_dispatch_cls(self, other) - - def __reduce__(self): - return _UnpickleDispatch(), (self._instance_cls, ) - - def _update(self, other, only_propagate=True): - """Populate from the listeners in another :class:`_Dispatch` - object.""" - for ls in other._event_descriptors: - if isinstance(ls, _EmptyListener): - continue - getattr(self, ls.name).\ - for_modify(self)._update(ls, only_propagate=only_propagate) - - def _clear(self): - for ls in self._event_descriptors: - ls.for_modify(self).clear() - - -class _EventMeta(type): - """Intercept new Event subclasses and create - associated _Dispatch classes.""" - - def __init__(cls, classname, bases, dict_): - _create_dispatcher_class(cls, classname, bases, dict_) - return type.__init__(cls, classname, bases, dict_) - - -def _create_dispatcher_class(cls, classname, bases, dict_): - """Create a :class:`._Dispatch` class corresponding to an - :class:`.Events` class.""" - - # there's all kinds of ways to do this, - # i.e. make a Dispatch class that shares the '_listen' method - # of the Event class, this is the straight monkeypatch. - if hasattr(cls, 'dispatch'): - dispatch_base = cls.dispatch.__class__ - else: - dispatch_base = _Dispatch - - event_names = [k for k in dict_ if _is_event_name(k)] - dispatch_cls = type("%sDispatch" % classname, - (dispatch_base, ), {'__slots__': event_names}) - - dispatch_cls._event_names = event_names - - dispatch_inst = cls._set_dispatch(cls, dispatch_cls) - for k in dispatch_cls._event_names: - setattr(dispatch_inst, k, _ClsLevelDispatch(cls, dict_[k])) - _registrars[k].append(cls) - - for super_ in dispatch_cls.__bases__: - if issubclass(super_, _Dispatch) and super_ is not _Dispatch: - for ls in super_._events.dispatch._event_descriptors: - setattr(dispatch_inst, ls.name, ls) - dispatch_cls._event_names.append(ls.name) - - if getattr(cls, '_dispatch_target', None): - cls._dispatch_target.dispatch = dispatcher(cls) - - -def _remove_dispatcher(cls): - for k in cls.dispatch._event_names: - _registrars[k].remove(cls) - if not _registrars[k]: - del _registrars[k] - - -class Events(util.with_metaclass(_EventMeta, object)): - """Define event listening functions for a particular target type.""" - - @staticmethod - def _set_dispatch(cls, dispatch_cls): - # this allows an Events subclass to define additional utility - # methods made available to the target via - # "self.dispatch._events." - # @staticemethod to allow easy "super" calls while in a metaclass - # constructor. - cls.dispatch = dispatch_cls(None) - dispatch_cls._events = cls - return cls.dispatch - - @classmethod - def _accept_with(cls, target): - # Mapper, ClassManager, Session override this to - # also accept classes, scoped_sessions, sessionmakers, etc. - if hasattr(target, 'dispatch') and ( - - isinstance(target.dispatch, cls.dispatch.__class__) or - - - ( - isinstance(target.dispatch, type) and - isinstance(target.dispatch, cls.dispatch.__class__) - ) or - - ( - isinstance(target.dispatch, _JoinedDispatcher) and - isinstance(target.dispatch.parent, cls.dispatch.__class__) - ) - - - ): - return target - else: - return None - - @classmethod - def _listen(cls, event_key, propagate=False, insert=False, named=False): - event_key.base_listen(propagate=propagate, insert=insert, named=named) - - @classmethod - def _remove(cls, event_key): - event_key.remove() - - @classmethod - def _clear(cls): - cls.dispatch._clear() - - -class _JoinedDispatcher(object): - """Represent a connection between two _Dispatch objects.""" - - __slots__ = 'local', 'parent', '_instance_cls' - - def __init__(self, local, parent): - self.local = local - self.parent = parent - self._instance_cls = self.local._instance_cls - - def __getattr__(self, name): - # assign _JoinedListeners as attributes on demand - # to reduce startup time for new dispatch objects - ls = getattr(self.local, name) - jl = _JoinedListener(self.parent, ls.name, ls) - setattr(self, ls.name, jl) - return jl - - @property - def _listen(self): - return self.parent._listen - - -class dispatcher(object): - """Descriptor used by target classes to - deliver the _Dispatch class at the class level - and produce new _Dispatch instances for target - instances. - - """ - - def __init__(self, events): - self.dispatch_cls = events.dispatch - self.events = events - - def __get__(self, obj, cls): - if obj is None: - return self.dispatch_cls - obj.__dict__['dispatch'] = disp = self.dispatch_cls._for_instance(obj) - return disp diff --git a/python/sqlalchemy/event/legacy.py b/python/sqlalchemy/event/legacy.py deleted file mode 100644 index daa74226..00000000 --- a/python/sqlalchemy/event/legacy.py +++ /dev/null @@ -1,169 +0,0 @@ -# event/legacy.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Routines to handle adaption of legacy call signatures, -generation of deprecation notes and docstrings. - -""" - -from .. import util - - -def _legacy_signature(since, argnames, converter=None): - def leg(fn): - if not hasattr(fn, '_legacy_signatures'): - fn._legacy_signatures = [] - fn._legacy_signatures.append((since, argnames, converter)) - return fn - return leg - - -def _wrap_fn_for_legacy(dispatch_collection, fn, argspec): - for since, argnames, conv in dispatch_collection.legacy_signatures: - if argnames[-1] == "**kw": - has_kw = True - argnames = argnames[0:-1] - else: - has_kw = False - - if len(argnames) == len(argspec.args) \ - and has_kw is bool(argspec.keywords): - - if conv: - assert not has_kw - - def wrap_leg(*args): - return fn(*conv(*args)) - else: - def wrap_leg(*args, **kw): - argdict = dict(zip(dispatch_collection.arg_names, args)) - args = [argdict[name] for name in argnames] - if has_kw: - return fn(*args, **kw) - else: - return fn(*args) - return wrap_leg - else: - return fn - - -def _indent(text, indent): - return "\n".join( - indent + line - for line in text.split("\n") - ) - - -def _standard_listen_example(dispatch_collection, sample_target, fn): - example_kw_arg = _indent( - "\n".join( - "%(arg)s = kw['%(arg)s']" % {"arg": arg} - for arg in dispatch_collection.arg_names[0:2] - ), - " ") - if dispatch_collection.legacy_signatures: - current_since = max(since for since, args, conv - in dispatch_collection.legacy_signatures) - else: - current_since = None - text = ( - "from sqlalchemy import event\n\n" - "# standard decorator style%(current_since)s\n" - "@event.listens_for(%(sample_target)s, '%(event_name)s')\n" - "def receive_%(event_name)s(" - "%(named_event_arguments)s%(has_kw_arguments)s):\n" - " \"listen for the '%(event_name)s' event\"\n" - "\n # ... (event handling logic) ...\n" - ) - - if len(dispatch_collection.arg_names) > 3: - text += ( - - "\n# named argument style (new in 0.9)\n" - "@event.listens_for(" - "%(sample_target)s, '%(event_name)s', named=True)\n" - "def receive_%(event_name)s(**kw):\n" - " \"listen for the '%(event_name)s' event\"\n" - "%(example_kw_arg)s\n" - "\n # ... (event handling logic) ...\n" - ) - - text %= { - "current_since": " (arguments as of %s)" % - current_since if current_since else "", - "event_name": fn.__name__, - "has_kw_arguments": ", **kw" if dispatch_collection.has_kw else "", - "named_event_arguments": ", ".join(dispatch_collection.arg_names), - "example_kw_arg": example_kw_arg, - "sample_target": sample_target - } - return text - - -def _legacy_listen_examples(dispatch_collection, sample_target, fn): - text = "" - for since, args, conv in dispatch_collection.legacy_signatures: - text += ( - "\n# legacy calling style (pre-%(since)s)\n" - "@event.listens_for(%(sample_target)s, '%(event_name)s')\n" - "def receive_%(event_name)s(" - "%(named_event_arguments)s%(has_kw_arguments)s):\n" - " \"listen for the '%(event_name)s' event\"\n" - "\n # ... (event handling logic) ...\n" % { - "since": since, - "event_name": fn.__name__, - "has_kw_arguments": " **kw" - if dispatch_collection.has_kw else "", - "named_event_arguments": ", ".join(args), - "sample_target": sample_target - } - ) - return text - - -def _version_signature_changes(dispatch_collection): - since, args, conv = dispatch_collection.legacy_signatures[0] - return ( - "\n.. versionchanged:: %(since)s\n" - " The ``%(event_name)s`` event now accepts the \n" - " arguments ``%(named_event_arguments)s%(has_kw_arguments)s``.\n" - " Listener functions which accept the previous argument \n" - " signature(s) listed above will be automatically \n" - " adapted to the new signature." % { - "since": since, - "event_name": dispatch_collection.name, - "named_event_arguments": ", ".join(dispatch_collection.arg_names), - "has_kw_arguments": ", **kw" if dispatch_collection.has_kw else "" - } - ) - - -def _augment_fn_docs(dispatch_collection, parent_dispatch_cls, fn): - header = ".. container:: event_signatures\n\n"\ - " Example argument forms::\n"\ - "\n" - - sample_target = getattr(parent_dispatch_cls, "_target_class_doc", "obj") - text = ( - header + - _indent( - _standard_listen_example( - dispatch_collection, sample_target, fn), - " " * 8) - ) - if dispatch_collection.legacy_signatures: - text += _indent( - _legacy_listen_examples( - dispatch_collection, sample_target, fn), - " " * 8) - - text += _version_signature_changes(dispatch_collection) - - return util.inject_docstring_text(fn.__doc__, - text, - 1 - ) diff --git a/python/sqlalchemy/event/registry.py b/python/sqlalchemy/event/registry.py deleted file mode 100644 index a6eabb2f..00000000 --- a/python/sqlalchemy/event/registry.py +++ /dev/null @@ -1,262 +0,0 @@ -# event/registry.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Provides managed registration services on behalf of :func:`.listen` -arguments. - -By "managed registration", we mean that event listening functions and -other objects can be added to various collections in such a way that their -membership in all those collections can be revoked at once, based on -an equivalent :class:`._EventKey`. - -""" - -from __future__ import absolute_import - -import weakref -import collections -import types -from .. import exc, util - - -_key_to_collection = collections.defaultdict(dict) -""" -Given an original listen() argument, can locate all -listener collections and the listener fn contained - -(target, identifier, fn) -> { - ref(listenercollection) -> ref(listener_fn) - ref(listenercollection) -> ref(listener_fn) - ref(listenercollection) -> ref(listener_fn) - } -""" - -_collection_to_key = collections.defaultdict(dict) -""" -Given a _ListenerCollection or _ClsLevelListener, can locate -all the original listen() arguments and the listener fn contained - -ref(listenercollection) -> { - ref(listener_fn) -> (target, identifier, fn), - ref(listener_fn) -> (target, identifier, fn), - ref(listener_fn) -> (target, identifier, fn), - } -""" - - -def _collection_gced(ref): - # defaultdict, so can't get a KeyError - if not _collection_to_key or ref not in _collection_to_key: - return - listener_to_key = _collection_to_key.pop(ref) - for key in listener_to_key.values(): - if key in _key_to_collection: - # defaultdict, so can't get a KeyError - dispatch_reg = _key_to_collection[key] - dispatch_reg.pop(ref) - if not dispatch_reg: - _key_to_collection.pop(key) - - -def _stored_in_collection(event_key, owner): - key = event_key._key - - dispatch_reg = _key_to_collection[key] - - owner_ref = owner.ref - listen_ref = weakref.ref(event_key._listen_fn) - - if owner_ref in dispatch_reg: - return False - - dispatch_reg[owner_ref] = listen_ref - - listener_to_key = _collection_to_key[owner_ref] - listener_to_key[listen_ref] = key - - return True - - -def _removed_from_collection(event_key, owner): - key = event_key._key - - dispatch_reg = _key_to_collection[key] - - listen_ref = weakref.ref(event_key._listen_fn) - - owner_ref = owner.ref - dispatch_reg.pop(owner_ref, None) - if not dispatch_reg: - del _key_to_collection[key] - - if owner_ref in _collection_to_key: - listener_to_key = _collection_to_key[owner_ref] - listener_to_key.pop(listen_ref) - - -def _stored_in_collection_multi(newowner, oldowner, elements): - if not elements: - return - - oldowner = oldowner.ref - newowner = newowner.ref - - old_listener_to_key = _collection_to_key[oldowner] - new_listener_to_key = _collection_to_key[newowner] - - for listen_fn in elements: - listen_ref = weakref.ref(listen_fn) - key = old_listener_to_key[listen_ref] - dispatch_reg = _key_to_collection[key] - if newowner in dispatch_reg: - assert dispatch_reg[newowner] == listen_ref - else: - dispatch_reg[newowner] = listen_ref - - new_listener_to_key[listen_ref] = key - - -def _clear(owner, elements): - if not elements: - return - - owner = owner.ref - listener_to_key = _collection_to_key[owner] - for listen_fn in elements: - listen_ref = weakref.ref(listen_fn) - key = listener_to_key[listen_ref] - dispatch_reg = _key_to_collection[key] - dispatch_reg.pop(owner, None) - - if not dispatch_reg: - del _key_to_collection[key] - - -class _EventKey(object): - """Represent :func:`.listen` arguments. - """ - - __slots__ = ( - 'target', 'identifier', 'fn', 'fn_key', 'fn_wrap', 'dispatch_target' - ) - - def __init__(self, target, identifier, - fn, dispatch_target, _fn_wrap=None): - self.target = target - self.identifier = identifier - self.fn = fn - if isinstance(fn, types.MethodType): - self.fn_key = id(fn.__func__), id(fn.__self__) - else: - self.fn_key = id(fn) - self.fn_wrap = _fn_wrap - self.dispatch_target = dispatch_target - - @property - def _key(self): - return (id(self.target), self.identifier, self.fn_key) - - def with_wrapper(self, fn_wrap): - if fn_wrap is self._listen_fn: - return self - else: - return _EventKey( - self.target, - self.identifier, - self.fn, - self.dispatch_target, - _fn_wrap=fn_wrap - ) - - def with_dispatch_target(self, dispatch_target): - if dispatch_target is self.dispatch_target: - return self - else: - return _EventKey( - self.target, - self.identifier, - self.fn, - dispatch_target, - _fn_wrap=self.fn_wrap - ) - - def listen(self, *args, **kw): - once = kw.pop("once", False) - named = kw.pop("named", False) - - target, identifier, fn = \ - self.dispatch_target, self.identifier, self._listen_fn - - dispatch_collection = getattr(target.dispatch, identifier) - - adjusted_fn = dispatch_collection._adjust_fn_spec(fn, named) - - self = self.with_wrapper(adjusted_fn) - - if once: - self.with_wrapper( - util.only_once(self._listen_fn)).listen(*args, **kw) - else: - self.dispatch_target.dispatch._listen(self, *args, **kw) - - def remove(self): - key = self._key - - if key not in _key_to_collection: - raise exc.InvalidRequestError( - "No listeners found for event %s / %r / %s " % - (self.target, self.identifier, self.fn) - ) - dispatch_reg = _key_to_collection.pop(key) - - for collection_ref, listener_ref in dispatch_reg.items(): - collection = collection_ref() - listener_fn = listener_ref() - if collection is not None and listener_fn is not None: - collection.remove(self.with_wrapper(listener_fn)) - - def contains(self): - """Return True if this event key is registered to listen. - """ - return self._key in _key_to_collection - - def base_listen(self, propagate=False, insert=False, - named=False): - - target, identifier, fn = \ - self.dispatch_target, self.identifier, self._listen_fn - - dispatch_collection = getattr(target.dispatch, identifier) - - if insert: - dispatch_collection.\ - for_modify(target.dispatch).insert(self, propagate) - else: - dispatch_collection.\ - for_modify(target.dispatch).append(self, propagate) - - @property - def _listen_fn(self): - return self.fn_wrap or self.fn - - def append_to_list(self, owner, list_): - if _stored_in_collection(self, owner): - list_.append(self._listen_fn) - return True - else: - return False - - def remove_from_list(self, owner, list_): - _removed_from_collection(self, owner) - list_.remove(self._listen_fn) - - def prepend_to_list(self, owner, list_): - if _stored_in_collection(self, owner): - list_.appendleft(self._listen_fn) - return True - else: - return False diff --git a/python/sqlalchemy/events.py b/python/sqlalchemy/events.py deleted file mode 100644 index f439d554..00000000 --- a/python/sqlalchemy/events.py +++ /dev/null @@ -1,1096 +0,0 @@ -# sqlalchemy/events.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Core event interfaces.""" - -from . import event, exc -from .pool import Pool -from .engine import Connectable, Engine, Dialect -from .sql.base import SchemaEventTarget - - -class DDLEvents(event.Events): - """ - Define event listeners for schema objects, - that is, :class:`.SchemaItem` and other :class:`.SchemaEventTarget` - subclasses, including :class:`.MetaData`, :class:`.Table`, - :class:`.Column`. - - :class:`.MetaData` and :class:`.Table` support events - specifically regarding when CREATE and DROP - DDL is emitted to the database. - - Attachment events are also provided to customize - behavior whenever a child schema element is associated - with a parent, such as, when a :class:`.Column` is associated - with its :class:`.Table`, when a :class:`.ForeignKeyConstraint` - is associated with a :class:`.Table`, etc. - - Example using the ``after_create`` event:: - - from sqlalchemy import event - from sqlalchemy import Table, Column, Metadata, Integer - - m = MetaData() - some_table = Table('some_table', m, Column('data', Integer)) - - def after_create(target, connection, **kw): - connection.execute("ALTER TABLE %s SET name=foo_%s" % - (target.name, target.name)) - - event.listen(some_table, "after_create", after_create) - - DDL events integrate closely with the - :class:`.DDL` class and the :class:`.DDLElement` hierarchy - of DDL clause constructs, which are themselves appropriate - as listener callables:: - - from sqlalchemy import DDL - event.listen( - some_table, - "after_create", - DDL("ALTER TABLE %(table)s SET name=foo_%(table)s") - ) - - The methods here define the name of an event as well - as the names of members that are passed to listener - functions. - - See also: - - :ref:`event_toplevel` - - :class:`.DDLElement` - - :class:`.DDL` - - :ref:`schema_ddl_sequences` - - """ - - _target_class_doc = "SomeSchemaClassOrObject" - _dispatch_target = SchemaEventTarget - - def before_create(self, target, connection, **kw): - """Called before CREATE statements are emitted. - - :param target: the :class:`.MetaData` or :class:`.Table` - object which is the target of the event. - :param connection: the :class:`.Connection` where the - CREATE statement or statements will be emitted. - :param \**kw: additional keyword arguments relevant - to the event. The contents of this dictionary - may vary across releases, and include the - list of tables being generated for a metadata-level - event, the checkfirst flag, and other - elements used by internal events. - - """ - - def after_create(self, target, connection, **kw): - """Called after CREATE statements are emitted. - - :param target: the :class:`.MetaData` or :class:`.Table` - object which is the target of the event. - :param connection: the :class:`.Connection` where the - CREATE statement or statements have been emitted. - :param \**kw: additional keyword arguments relevant - to the event. The contents of this dictionary - may vary across releases, and include the - list of tables being generated for a metadata-level - event, the checkfirst flag, and other - elements used by internal events. - - """ - - def before_drop(self, target, connection, **kw): - """Called before DROP statements are emitted. - - :param target: the :class:`.MetaData` or :class:`.Table` - object which is the target of the event. - :param connection: the :class:`.Connection` where the - DROP statement or statements will be emitted. - :param \**kw: additional keyword arguments relevant - to the event. The contents of this dictionary - may vary across releases, and include the - list of tables being generated for a metadata-level - event, the checkfirst flag, and other - elements used by internal events. - - """ - - def after_drop(self, target, connection, **kw): - """Called after DROP statements are emitted. - - :param target: the :class:`.MetaData` or :class:`.Table` - object which is the target of the event. - :param connection: the :class:`.Connection` where the - DROP statement or statements have been emitted. - :param \**kw: additional keyword arguments relevant - to the event. The contents of this dictionary - may vary across releases, and include the - list of tables being generated for a metadata-level - event, the checkfirst flag, and other - elements used by internal events. - - """ - - def before_parent_attach(self, target, parent): - """Called before a :class:`.SchemaItem` is associated with - a parent :class:`.SchemaItem`. - - :param target: the target object - :param parent: the parent to which the target is being attached. - - :func:`.event.listen` also accepts a modifier for this event: - - :param propagate=False: When True, the listener function will - be established for any copies made of the target object, - i.e. those copies that are generated when - :meth:`.Table.tometadata` is used. - - """ - - def after_parent_attach(self, target, parent): - """Called after a :class:`.SchemaItem` is associated with - a parent :class:`.SchemaItem`. - - :param target: the target object - :param parent: the parent to which the target is being attached. - - :func:`.event.listen` also accepts a modifier for this event: - - :param propagate=False: When True, the listener function will - be established for any copies made of the target object, - i.e. those copies that are generated when - :meth:`.Table.tometadata` is used. - - """ - - def column_reflect(self, inspector, table, column_info): - """Called for each unit of 'column info' retrieved when - a :class:`.Table` is being reflected. - - The dictionary of column information as returned by the - dialect is passed, and can be modified. The dictionary - is that returned in each element of the list returned - by :meth:`.reflection.Inspector.get_columns`. - - The event is called before any action is taken against - this dictionary, and the contents can be modified. - The :class:`.Column` specific arguments ``info``, ``key``, - and ``quote`` can also be added to the dictionary and - will be passed to the constructor of :class:`.Column`. - - Note that this event is only meaningful if either - associated with the :class:`.Table` class across the - board, e.g.:: - - from sqlalchemy.schema import Table - from sqlalchemy import event - - def listen_for_reflect(inspector, table, column_info): - "receive a column_reflect event" - # ... - - event.listen( - Table, - 'column_reflect', - listen_for_reflect) - - ...or with a specific :class:`.Table` instance using - the ``listeners`` argument:: - - def listen_for_reflect(inspector, table, column_info): - "receive a column_reflect event" - # ... - - t = Table( - 'sometable', - autoload=True, - listeners=[ - ('column_reflect', listen_for_reflect) - ]) - - This because the reflection process initiated by ``autoload=True`` - completes within the scope of the constructor for :class:`.Table`. - - """ - - -class PoolEvents(event.Events): - """Available events for :class:`.Pool`. - - The methods here define the name of an event as well - as the names of members that are passed to listener - functions. - - e.g.:: - - from sqlalchemy import event - - def my_on_checkout(dbapi_conn, connection_rec, connection_proxy): - "handle an on checkout event" - - event.listen(Pool, 'checkout', my_on_checkout) - - In addition to accepting the :class:`.Pool` class and - :class:`.Pool` instances, :class:`.PoolEvents` also accepts - :class:`.Engine` objects and the :class:`.Engine` class as - targets, which will be resolved to the ``.pool`` attribute of the - given engine or the :class:`.Pool` class:: - - engine = create_engine("postgresql://scott:tiger@localhost/test") - - # will associate with engine.pool - event.listen(engine, 'checkout', my_on_checkout) - - """ - - _target_class_doc = "SomeEngineOrPool" - _dispatch_target = Pool - - @classmethod - def _accept_with(cls, target): - if isinstance(target, type): - if issubclass(target, Engine): - return Pool - elif issubclass(target, Pool): - return target - elif isinstance(target, Engine): - return target.pool - else: - return target - - def connect(self, dbapi_connection, connection_record): - """Called at the moment a particular DBAPI connection is first - created for a given :class:`.Pool`. - - This event allows one to capture the point directly after which - the DBAPI module-level ``.connect()`` method has been used in order - to produce a new DBAPI connection. - - :param dbapi_connection: a DBAPI connection. - - :param connection_record: the :class:`._ConnectionRecord` managing the - DBAPI connection. - - """ - - def first_connect(self, dbapi_connection, connection_record): - """Called exactly once for the first time a DBAPI connection is - checked out from a particular :class:`.Pool`. - - The rationale for :meth:`.PoolEvents.first_connect` is to determine - information about a particular series of database connections based - on the settings used for all connections. Since a particular - :class:`.Pool` refers to a single "creator" function (which in terms - of a :class:`.Engine` refers to the URL and connection options used), - it is typically valid to make observations about a single connection - that can be safely assumed to be valid about all subsequent - connections, such as the database version, the server and client - encoding settings, collation settings, and many others. - - :param dbapi_connection: a DBAPI connection. - - :param connection_record: the :class:`._ConnectionRecord` managing the - DBAPI connection. - - """ - - def checkout(self, dbapi_connection, connection_record, connection_proxy): - """Called when a connection is retrieved from the Pool. - - :param dbapi_connection: a DBAPI connection. - - :param connection_record: the :class:`._ConnectionRecord` managing the - DBAPI connection. - - :param connection_proxy: the :class:`._ConnectionFairy` object which - will proxy the public interface of the DBAPI connection for the - lifespan of the checkout. - - If you raise a :class:`~sqlalchemy.exc.DisconnectionError`, the current - connection will be disposed and a fresh connection retrieved. - Processing of all checkout listeners will abort and restart - using the new connection. - - .. seealso:: :meth:`.ConnectionEvents.engine_connect` - a similar event - which occurs upon creation of a new :class:`.Connection`. - - """ - - def checkin(self, dbapi_connection, connection_record): - """Called when a connection returns to the pool. - - Note that the connection may be closed, and may be None if the - connection has been invalidated. ``checkin`` will not be called - for detached connections. (They do not return to the pool.) - - :param dbapi_connection: a DBAPI connection. - - :param connection_record: the :class:`._ConnectionRecord` managing the - DBAPI connection. - - """ - - def reset(self, dbapi_connection, connection_record): - """Called before the "reset" action occurs for a pooled connection. - - This event represents - when the ``rollback()`` method is called on the DBAPI connection - before it is returned to the pool. The behavior of "reset" can - be controlled, including disabled, using the ``reset_on_return`` - pool argument. - - - The :meth:`.PoolEvents.reset` event is usually followed by the - :meth:`.PoolEvents.checkin` event is called, except in those - cases where the connection is discarded immediately after reset. - - :param dbapi_connection: a DBAPI connection. - - :param connection_record: the :class:`._ConnectionRecord` managing the - DBAPI connection. - - .. versionadded:: 0.8 - - .. seealso:: - - :meth:`.ConnectionEvents.rollback` - - :meth:`.ConnectionEvents.commit` - - """ - - def invalidate(self, dbapi_connection, connection_record, exception): - """Called when a DBAPI connection is to be "invalidated". - - This event is called any time the :meth:`._ConnectionRecord.invalidate` - method is invoked, either from API usage or via "auto-invalidation", - without the ``soft`` flag. - - The event occurs before a final attempt to call ``.close()`` on the - connection occurs. - - :param dbapi_connection: a DBAPI connection. - - :param connection_record: the :class:`._ConnectionRecord` managing the - DBAPI connection. - - :param exception: the exception object corresponding to the reason - for this invalidation, if any. May be ``None``. - - .. versionadded:: 0.9.2 Added support for connection invalidation - listening. - - .. seealso:: - - :ref:`pool_connection_invalidation` - - """ - - def soft_invalidate(self, dbapi_connection, connection_record, exception): - """Called when a DBAPI connection is to be "soft invalidated". - - This event is called any time the :meth:`._ConnectionRecord.invalidate` - method is invoked with the ``soft`` flag. - - Soft invalidation refers to when the connection record that tracks - this connection will force a reconnect after the current connection - is checked in. It does not actively close the dbapi_connection - at the point at which it is called. - - .. versionadded:: 1.0.3 - - """ - - -class ConnectionEvents(event.Events): - """Available events for :class:`.Connectable`, which includes - :class:`.Connection` and :class:`.Engine`. - - The methods here define the name of an event as well as the names of - members that are passed to listener functions. - - An event listener can be associated with any :class:`.Connectable` - class or instance, such as an :class:`.Engine`, e.g.:: - - from sqlalchemy import event, create_engine - - def before_cursor_execute(conn, cursor, statement, parameters, context, - executemany): - log.info("Received statement: %s" % statement) - - engine = create_engine('postgresql://scott:tiger@localhost/test') - event.listen(engine, "before_cursor_execute", before_cursor_execute) - - or with a specific :class:`.Connection`:: - - with engine.begin() as conn: - @event.listens_for(conn, 'before_cursor_execute') - def before_cursor_execute(conn, cursor, statement, parameters, - context, executemany): - log.info("Received statement: %s" % statement) - - When the methods are called with a `statement` parameter, such as in - :meth:`.after_cursor_execute`, :meth:`.before_cursor_execute` and - :meth:`.dbapi_error`, the statement is the exact SQL string that was - prepared for transmission to the DBAPI ``cursor`` in the connection's - :class:`.Dialect`. - - The :meth:`.before_execute` and :meth:`.before_cursor_execute` - events can also be established with the ``retval=True`` flag, which - allows modification of the statement and parameters to be sent - to the database. The :meth:`.before_cursor_execute` event is - particularly useful here to add ad-hoc string transformations, such - as comments, to all executions:: - - from sqlalchemy.engine import Engine - from sqlalchemy import event - - @event.listens_for(Engine, "before_cursor_execute", retval=True) - def comment_sql_calls(conn, cursor, statement, parameters, - context, executemany): - statement = statement + " -- some comment" - return statement, parameters - - .. note:: :class:`.ConnectionEvents` can be established on any - combination of :class:`.Engine`, :class:`.Connection`, as well - as instances of each of those classes. Events across all - four scopes will fire off for a given instance of - :class:`.Connection`. However, for performance reasons, the - :class:`.Connection` object determines at instantiation time - whether or not its parent :class:`.Engine` has event listeners - established. Event listeners added to the :class:`.Engine` - class or to an instance of :class:`.Engine` *after* the instantiation - of a dependent :class:`.Connection` instance will usually - *not* be available on that :class:`.Connection` instance. The newly - added listeners will instead take effect for :class:`.Connection` - instances created subsequent to those event listeners being - established on the parent :class:`.Engine` class or instance. - - :param retval=False: Applies to the :meth:`.before_execute` and - :meth:`.before_cursor_execute` events only. When True, the - user-defined event function must have a return value, which - is a tuple of parameters that replace the given statement - and parameters. See those methods for a description of - specific return arguments. - - .. versionchanged:: 0.8 :class:`.ConnectionEvents` can now be associated - with any :class:`.Connectable` including :class:`.Connection`, - in addition to the existing support for :class:`.Engine`. - - """ - - _target_class_doc = "SomeEngine" - _dispatch_target = Connectable - - @classmethod - def _listen(cls, event_key, retval=False): - target, identifier, fn = \ - event_key.dispatch_target, event_key.identifier, \ - event_key._listen_fn - - target._has_events = True - - if not retval: - if identifier == 'before_execute': - orig_fn = fn - - def wrap_before_execute(conn, clauseelement, - multiparams, params): - orig_fn(conn, clauseelement, multiparams, params) - return clauseelement, multiparams, params - fn = wrap_before_execute - elif identifier == 'before_cursor_execute': - orig_fn = fn - - def wrap_before_cursor_execute(conn, cursor, statement, - parameters, context, - executemany): - orig_fn(conn, cursor, statement, - parameters, context, executemany) - return statement, parameters - fn = wrap_before_cursor_execute - elif retval and \ - identifier not in ('before_execute', - 'before_cursor_execute', 'handle_error'): - raise exc.ArgumentError( - "Only the 'before_execute', " - "'before_cursor_execute' and 'handle_error' engine " - "event listeners accept the 'retval=True' " - "argument.") - event_key.with_wrapper(fn).base_listen() - - def before_execute(self, conn, clauseelement, multiparams, params): - """Intercept high level execute() events, receiving uncompiled - SQL constructs and other objects prior to rendering into SQL. - - This event is good for debugging SQL compilation issues as well - as early manipulation of the parameters being sent to the database, - as the parameter lists will be in a consistent format here. - - This event can be optionally established with the ``retval=True`` - flag. The ``clauseelement``, ``multiparams``, and ``params`` - arguments should be returned as a three-tuple in this case:: - - @event.listens_for(Engine, "before_execute", retval=True) - def before_execute(conn, conn, clauseelement, multiparams, params): - # do something with clauseelement, multiparams, params - return clauseelement, multiparams, params - - :param conn: :class:`.Connection` object - :param clauseelement: SQL expression construct, :class:`.Compiled` - instance, or string statement passed to :meth:`.Connection.execute`. - :param multiparams: Multiple parameter sets, a list of dictionaries. - :param params: Single parameter set, a single dictionary. - - See also: - - :meth:`.before_cursor_execute` - - """ - - def after_execute(self, conn, clauseelement, multiparams, params, result): - """Intercept high level execute() events after execute. - - - :param conn: :class:`.Connection` object - :param clauseelement: SQL expression construct, :class:`.Compiled` - instance, or string statement passed to :meth:`.Connection.execute`. - :param multiparams: Multiple parameter sets, a list of dictionaries. - :param params: Single parameter set, a single dictionary. - :param result: :class:`.ResultProxy` generated by the execution. - - """ - - def before_cursor_execute(self, conn, cursor, statement, - parameters, context, executemany): - """Intercept low-level cursor execute() events before execution, - receiving the string SQL statement and DBAPI-specific parameter list to - be invoked against a cursor. - - This event is a good choice for logging as well as late modifications - to the SQL string. It's less ideal for parameter modifications except - for those which are specific to a target backend. - - This event can be optionally established with the ``retval=True`` - flag. The ``statement`` and ``parameters`` arguments should be - returned as a two-tuple in this case:: - - @event.listens_for(Engine, "before_cursor_execute", retval=True) - def before_cursor_execute(conn, cursor, statement, - parameters, context, executemany): - # do something with statement, parameters - return statement, parameters - - See the example at :class:`.ConnectionEvents`. - - :param conn: :class:`.Connection` object - :param cursor: DBAPI cursor object - :param statement: string SQL statement, as to be passed to the DBAPI - :param parameters: Dictionary, tuple, or list of parameters being - passed to the ``execute()`` or ``executemany()`` method of the - DBAPI ``cursor``. In some cases may be ``None``. - :param context: :class:`.ExecutionContext` object in use. May - be ``None``. - :param executemany: boolean, if ``True``, this is an ``executemany()`` - call, if ``False``, this is an ``execute()`` call. - - See also: - - :meth:`.before_execute` - - :meth:`.after_cursor_execute` - - """ - - def after_cursor_execute(self, conn, cursor, statement, - parameters, context, executemany): - """Intercept low-level cursor execute() events after execution. - - :param conn: :class:`.Connection` object - :param cursor: DBAPI cursor object. Will have results pending - if the statement was a SELECT, but these should not be consumed - as they will be needed by the :class:`.ResultProxy`. - :param statement: string SQL statement, as passed to the DBAPI - :param parameters: Dictionary, tuple, or list of parameters being - passed to the ``execute()`` or ``executemany()`` method of the - DBAPI ``cursor``. In some cases may be ``None``. - :param context: :class:`.ExecutionContext` object in use. May - be ``None``. - :param executemany: boolean, if ``True``, this is an ``executemany()`` - call, if ``False``, this is an ``execute()`` call. - - """ - - def dbapi_error(self, conn, cursor, statement, parameters, - context, exception): - """Intercept a raw DBAPI error. - - This event is called with the DBAPI exception instance - received from the DBAPI itself, *before* SQLAlchemy wraps the - exception with it's own exception wrappers, and before any - other operations are performed on the DBAPI cursor; the - existing transaction remains in effect as well as any state - on the cursor. - - The use case here is to inject low-level exception handling - into an :class:`.Engine`, typically for logging and - debugging purposes. - - .. warning:: - - Code should **not** modify - any state or throw any exceptions here as this will - interfere with SQLAlchemy's cleanup and error handling - routines. For exception modification, please refer to the - new :meth:`.ConnectionEvents.handle_error` event. - - Subsequent to this hook, SQLAlchemy may attempt any - number of operations on the connection/cursor, including - closing the cursor, rolling back of the transaction in the - case of connectionless execution, and disposing of the entire - connection pool if a "disconnect" was detected. The - exception is then wrapped in a SQLAlchemy DBAPI exception - wrapper and re-thrown. - - :param conn: :class:`.Connection` object - :param cursor: DBAPI cursor object - :param statement: string SQL statement, as passed to the DBAPI - :param parameters: Dictionary, tuple, or list of parameters being - passed to the ``execute()`` or ``executemany()`` method of the - DBAPI ``cursor``. In some cases may be ``None``. - :param context: :class:`.ExecutionContext` object in use. May - be ``None``. - :param exception: The **unwrapped** exception emitted directly from the - DBAPI. The class here is specific to the DBAPI module in use. - - .. deprecated:: 0.9.7 - replaced by - :meth:`.ConnectionEvents.handle_error` - - """ - - def handle_error(self, exception_context): - """Intercept all exceptions processed by the :class:`.Connection`. - - This includes all exceptions emitted by the DBAPI as well as - within SQLAlchemy's statement invocation process, including - encoding errors and other statement validation errors. Other areas - in which the event is invoked include transaction begin and end, - result row fetching, cursor creation. - - Note that :meth:`.handle_error` may support new kinds of exceptions - and new calling scenarios at *any time*. Code which uses this - event must expect new calling patterns to be present in minor - releases. - - To support the wide variety of members that correspond to an exception, - as well as to allow extensibility of the event without backwards - incompatibility, the sole argument received is an instance of - :class:`.ExceptionContext`. This object contains data members - representing detail about the exception. - - Use cases supported by this hook include: - - * read-only, low-level exception handling for logging and - debugging purposes - * exception re-writing - - The hook is called while the cursor from the failed operation - (if any) is still open and accessible. Special cleanup operations - can be called on this cursor; SQLAlchemy will attempt to close - this cursor subsequent to this hook being invoked. If the connection - is in "autocommit" mode, the transaction also remains open within - the scope of this hook; the rollback of the per-statement transaction - also occurs after the hook is called. - - The user-defined event handler has two options for replacing - the SQLAlchemy-constructed exception into one that is user - defined. It can either raise this new exception directly, in - which case all further event listeners are bypassed and the - exception will be raised, after appropriate cleanup as taken - place:: - - @event.listens_for(Engine, "handle_error") - def handle_exception(context): - if isinstance(context.original_exception, - psycopg2.OperationalError) and \\ - "failed" in str(context.original_exception): - raise MySpecialException("failed operation") - - .. warning:: Because the :meth:`.ConnectionEvents.handle_error` - event specifically provides for exceptions to be re-thrown as - the ultimate exception raised by the failed statement, - **stack traces will be misleading** if the user-defined event - handler itself fails and throws an unexpected exception; - the stack trace may not illustrate the actual code line that - failed! It is advised to code carefully here and use - logging and/or inline debugging if unexpected exceptions are - occurring. - - Alternatively, a "chained" style of event handling can be - used, by configuring the handler with the ``retval=True`` - modifier and returning the new exception instance from the - function. In this case, event handling will continue onto the - next handler. The "chained" exception is available using - :attr:`.ExceptionContext.chained_exception`:: - - @event.listens_for(Engine, "handle_error", retval=True) - def handle_exception(context): - if context.chained_exception is not None and \\ - "special" in context.chained_exception.message: - return MySpecialException("failed", - cause=context.chained_exception) - - Handlers that return ``None`` may remain within this chain; the - last non-``None`` return value is the one that continues to be - passed to the next handler. - - When a custom exception is raised or returned, SQLAlchemy raises - this new exception as-is, it is not wrapped by any SQLAlchemy - object. If the exception is not a subclass of - :class:`sqlalchemy.exc.StatementError`, - certain features may not be available; currently this includes - the ORM's feature of adding a detail hint about "autoflush" to - exceptions raised within the autoflush process. - - :param context: an :class:`.ExceptionContext` object. See this - class for details on all available members. - - .. versionadded:: 0.9.7 Added the - :meth:`.ConnectionEvents.handle_error` hook. - - .. versionchanged:: 1.0.0 The :meth:`.handle_error` event is now - invoked when an :class:`.Engine` fails during the initial - call to :meth:`.Engine.connect`, as well as when a - :class:`.Connection` object encounters an error during a - reconnect operation. - - .. versionchanged:: 1.0.0 The :meth:`.handle_error` event is - not fired off when a dialect makes use of the - ``skip_user_error_events`` execution option. This is used - by dialects which intend to catch SQLAlchemy-specific exceptions - within specific operations, such as when the MySQL dialect detects - a table not present within the ``has_table()`` dialect method. - Prior to 1.0.0, code which implements :meth:`.handle_error` needs - to ensure that exceptions thrown in these scenarios are re-raised - without modification. - - """ - - def engine_connect(self, conn, branch): - """Intercept the creation of a new :class:`.Connection`. - - This event is called typically as the direct result of calling - the :meth:`.Engine.connect` method. - - It differs from the :meth:`.PoolEvents.connect` method, which - refers to the actual connection to a database at the DBAPI level; - a DBAPI connection may be pooled and reused for many operations. - In contrast, this event refers only to the production of a higher level - :class:`.Connection` wrapper around such a DBAPI connection. - - It also differs from the :meth:`.PoolEvents.checkout` event - in that it is specific to the :class:`.Connection` object, not the - DBAPI connection that :meth:`.PoolEvents.checkout` deals with, although - this DBAPI connection is available here via the - :attr:`.Connection.connection` attribute. But note there can in fact - be multiple :meth:`.PoolEvents.checkout` events within the lifespan - of a single :class:`.Connection` object, if that :class:`.Connection` - is invalidated and re-established. There can also be multiple - :class:`.Connection` objects generated for the same already-checked-out - DBAPI connection, in the case that a "branch" of a :class:`.Connection` - is produced. - - :param conn: :class:`.Connection` object. - :param branch: if True, this is a "branch" of an existing - :class:`.Connection`. A branch is generated within the course - of a statement execution to invoke supplemental statements, most - typically to pre-execute a SELECT of a default value for the purposes - of an INSERT statement. - - .. versionadded:: 0.9.0 - - .. seealso:: - - :meth:`.PoolEvents.checkout` the lower-level pool checkout event - for an individual DBAPI connection - - :meth:`.ConnectionEvents.set_connection_execution_options` - a copy - of a :class:`.Connection` is also made when the - :meth:`.Connection.execution_options` method is called. - - """ - - def set_connection_execution_options(self, conn, opts): - """Intercept when the :meth:`.Connection.execution_options` - method is called. - - This method is called after the new :class:`.Connection` has been - produced, with the newly updated execution options collection, but - before the :class:`.Dialect` has acted upon any of those new options. - - Note that this method is not called when a new :class:`.Connection` - is produced which is inheriting execution options from its parent - :class:`.Engine`; to intercept this condition, use the - :meth:`.ConnectionEvents.engine_connect` event. - - :param conn: The newly copied :class:`.Connection` object - - :param opts: dictionary of options that were passed to the - :meth:`.Connection.execution_options` method. - - .. versionadded:: 0.9.0 - - .. seealso:: - - :meth:`.ConnectionEvents.set_engine_execution_options` - event - which is called when :meth:`.Engine.execution_options` is called. - - - """ - - def set_engine_execution_options(self, engine, opts): - """Intercept when the :meth:`.Engine.execution_options` - method is called. - - The :meth:`.Engine.execution_options` method produces a shallow - copy of the :class:`.Engine` which stores the new options. That new - :class:`.Engine` is passed here. A particular application of this - method is to add a :meth:`.ConnectionEvents.engine_connect` event - handler to the given :class:`.Engine` which will perform some per- - :class:`.Connection` task specific to these execution options. - - :param conn: The newly copied :class:`.Engine` object - - :param opts: dictionary of options that were passed to the - :meth:`.Connection.execution_options` method. - - .. versionadded:: 0.9.0 - - .. seealso:: - - :meth:`.ConnectionEvents.set_connection_execution_options` - event - which is called when :meth:`.Connection.execution_options` is - called. - - """ - - def engine_disposed(self, engine): - """Intercept when the :meth:`.Engine.dispose` method is called. - - The :meth:`.Engine.dispose` method instructs the engine to - "dispose" of it's connection pool (e.g. :class:`.Pool`), and - replaces it with a new one. Disposing of the old pool has the - effect that existing checked-in connections are closed. The new - pool does not establish any new connections until it is first used. - - This event can be used to indicate that resources related to the - :class:`.Engine` should also be cleaned up, keeping in mind that the - :class:`.Engine` can still be used for new requests in which case - it re-acquires connection resources. - - .. versionadded:: 1.0.5 - - """ - def begin(self, conn): - """Intercept begin() events. - - :param conn: :class:`.Connection` object - - """ - - def rollback(self, conn): - """Intercept rollback() events, as initiated by a - :class:`.Transaction`. - - Note that the :class:`.Pool` also "auto-rolls back" - a DBAPI connection upon checkin, if the ``reset_on_return`` - flag is set to its default value of ``'rollback'``. - To intercept this - rollback, use the :meth:`.PoolEvents.reset` hook. - - :param conn: :class:`.Connection` object - - .. seealso:: - - :meth:`.PoolEvents.reset` - - """ - - def commit(self, conn): - """Intercept commit() events, as initiated by a - :class:`.Transaction`. - - Note that the :class:`.Pool` may also "auto-commit" - a DBAPI connection upon checkin, if the ``reset_on_return`` - flag is set to the value ``'commit'``. To intercept this - commit, use the :meth:`.PoolEvents.reset` hook. - - :param conn: :class:`.Connection` object - """ - - def savepoint(self, conn, name): - """Intercept savepoint() events. - - :param conn: :class:`.Connection` object - :param name: specified name used for the savepoint. - - """ - - def rollback_savepoint(self, conn, name, context): - """Intercept rollback_savepoint() events. - - :param conn: :class:`.Connection` object - :param name: specified name used for the savepoint. - :param context: :class:`.ExecutionContext` in use. May be ``None``. - - """ - - def release_savepoint(self, conn, name, context): - """Intercept release_savepoint() events. - - :param conn: :class:`.Connection` object - :param name: specified name used for the savepoint. - :param context: :class:`.ExecutionContext` in use. May be ``None``. - - """ - - def begin_twophase(self, conn, xid): - """Intercept begin_twophase() events. - - :param conn: :class:`.Connection` object - :param xid: two-phase XID identifier - - """ - - def prepare_twophase(self, conn, xid): - """Intercept prepare_twophase() events. - - :param conn: :class:`.Connection` object - :param xid: two-phase XID identifier - """ - - def rollback_twophase(self, conn, xid, is_prepared): - """Intercept rollback_twophase() events. - - :param conn: :class:`.Connection` object - :param xid: two-phase XID identifier - :param is_prepared: boolean, indicates if - :meth:`.TwoPhaseTransaction.prepare` was called. - - """ - - def commit_twophase(self, conn, xid, is_prepared): - """Intercept commit_twophase() events. - - :param conn: :class:`.Connection` object - :param xid: two-phase XID identifier - :param is_prepared: boolean, indicates if - :meth:`.TwoPhaseTransaction.prepare` was called. - - """ - - -class DialectEvents(event.Events): - """event interface for execution-replacement functions. - - These events allow direct instrumentation and replacement - of key dialect functions which interact with the DBAPI. - - .. note:: - - :class:`.DialectEvents` hooks should be considered **semi-public** - and experimental. - These hooks are not for general use and are only for those situations - where intricate re-statement of DBAPI mechanics must be injected onto - an existing dialect. For general-use statement-interception events, - please use the :class:`.ConnectionEvents` interface. - - .. seealso:: - - :meth:`.ConnectionEvents.before_cursor_execute` - - :meth:`.ConnectionEvents.before_execute` - - :meth:`.ConnectionEvents.after_cursor_execute` - - :meth:`.ConnectionEvents.after_execute` - - - .. versionadded:: 0.9.4 - - """ - - _target_class_doc = "SomeEngine" - _dispatch_target = Dialect - - @classmethod - def _listen(cls, event_key, retval=False): - target, identifier, fn = \ - event_key.dispatch_target, event_key.identifier, event_key.fn - - target._has_events = True - event_key.base_listen() - - @classmethod - def _accept_with(cls, target): - if isinstance(target, type): - if issubclass(target, Engine): - return Dialect - elif issubclass(target, Dialect): - return target - elif isinstance(target, Engine): - return target.dialect - else: - return target - - def do_connect(self, dialect, conn_rec, cargs, cparams): - """Receive connection arguments before a connection is made. - - Return a DBAPI connection to halt further events from invoking; - the returned connection will be used. - - Alternatively, the event can manipulate the cargs and/or cparams - collections; cargs will always be a Python list that can be mutated - in-place and cparams a Python dictionary. Return None to - allow control to pass to the next event handler and ultimately - to allow the dialect to connect normally, given the updated - arguments. - - .. versionadded:: 1.0.3 - - """ - - def do_executemany(self, cursor, statement, parameters, context): - """Receive a cursor to have executemany() called. - - Return the value True to halt further events from invoking, - and to indicate that the cursor execution has already taken - place within the event handler. - - """ - - def do_execute_no_params(self, cursor, statement, context): - """Receive a cursor to have execute() with no parameters called. - - Return the value True to halt further events from invoking, - and to indicate that the cursor execution has already taken - place within the event handler. - - """ - - def do_execute(self, cursor, statement, parameters, context): - """Receive a cursor to have execute() called. - - Return the value True to halt further events from invoking, - and to indicate that the cursor execution has already taken - place within the event handler. - - """ diff --git a/python/sqlalchemy/exc.py b/python/sqlalchemy/exc.py deleted file mode 100644 index 3a4f346e..00000000 --- a/python/sqlalchemy/exc.py +++ /dev/null @@ -1,374 +0,0 @@ -# sqlalchemy/exc.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Exceptions used with SQLAlchemy. - -The base exception class is :exc:`.SQLAlchemyError`. Exceptions which are -raised as a result of DBAPI exceptions are all subclasses of -:exc:`.DBAPIError`. - -""" - - -class SQLAlchemyError(Exception): - """Generic error class.""" - - -class ArgumentError(SQLAlchemyError): - """Raised when an invalid or conflicting function argument is supplied. - - This error generally corresponds to construction time state errors. - - """ - - -class NoSuchModuleError(ArgumentError): - """Raised when a dynamically-loaded module (usually a database dialect) - of a particular name cannot be located.""" - - -class NoForeignKeysError(ArgumentError): - """Raised when no foreign keys can be located between two selectables - during a join.""" - - -class AmbiguousForeignKeysError(ArgumentError): - """Raised when more than one foreign key matching can be located - between two selectables during a join.""" - - -class CircularDependencyError(SQLAlchemyError): - """Raised by topological sorts when a circular dependency is detected. - - There are two scenarios where this error occurs: - - * In a Session flush operation, if two objects are mutually dependent - on each other, they can not be inserted or deleted via INSERT or - DELETE statements alone; an UPDATE will be needed to post-associate - or pre-deassociate one of the foreign key constrained values. - The ``post_update`` flag described at :ref:`post_update` can resolve - this cycle. - * In a :attr:`.MetaData.sorted_tables` operation, two :class:`.ForeignKey` - or :class:`.ForeignKeyConstraint` objects mutually refer to each - other. Apply the ``use_alter=True`` flag to one or both, - see :ref:`use_alter`. - - """ - def __init__(self, message, cycles, edges, msg=None): - if msg is None: - message += " (%s)" % ", ".join(repr(s) for s in cycles) - else: - message = msg - SQLAlchemyError.__init__(self, message) - self.cycles = cycles - self.edges = edges - - def __reduce__(self): - return self.__class__, (None, self.cycles, - self.edges, self.args[0]) - - -class CompileError(SQLAlchemyError): - """Raised when an error occurs during SQL compilation""" - - -class UnsupportedCompilationError(CompileError): - """Raised when an operation is not supported by the given compiler. - - - .. versionadded:: 0.8.3 - - """ - - def __init__(self, compiler, element_type): - super(UnsupportedCompilationError, self).__init__( - "Compiler %r can't render element of type %s" % - (compiler, element_type)) - - -class IdentifierError(SQLAlchemyError): - """Raised when a schema name is beyond the max character limit""" - - -class DisconnectionError(SQLAlchemyError): - """A disconnect is detected on a raw DB-API connection. - - This error is raised and consumed internally by a connection pool. It can - be raised by the :meth:`.PoolEvents.checkout` event so that the host pool - forces a retry; the exception will be caught three times in a row before - the pool gives up and raises :class:`~sqlalchemy.exc.InvalidRequestError` - regarding the connection attempt. - - """ - - -class TimeoutError(SQLAlchemyError): - """Raised when a connection pool times out on getting a connection.""" - - -class InvalidRequestError(SQLAlchemyError): - """SQLAlchemy was asked to do something it can't do. - - This error generally corresponds to runtime state errors. - - """ - - -class NoInspectionAvailable(InvalidRequestError): - """A subject passed to :func:`sqlalchemy.inspection.inspect` produced - no context for inspection.""" - - -class ResourceClosedError(InvalidRequestError): - """An operation was requested from a connection, cursor, or other - object that's in a closed state.""" - - -class NoSuchColumnError(KeyError, InvalidRequestError): - """A nonexistent column is requested from a ``RowProxy``.""" - - -class NoReferenceError(InvalidRequestError): - """Raised by ``ForeignKey`` to indicate a reference cannot be resolved.""" - - -class NoReferencedTableError(NoReferenceError): - """Raised by ``ForeignKey`` when the referred ``Table`` cannot be - located. - - """ - def __init__(self, message, tname): - NoReferenceError.__init__(self, message) - self.table_name = tname - - def __reduce__(self): - return self.__class__, (self.args[0], self.table_name) - - -class NoReferencedColumnError(NoReferenceError): - """Raised by ``ForeignKey`` when the referred ``Column`` cannot be - located. - - """ - def __init__(self, message, tname, cname): - NoReferenceError.__init__(self, message) - self.table_name = tname - self.column_name = cname - - def __reduce__(self): - return self.__class__, (self.args[0], self.table_name, - self.column_name) - - -class NoSuchTableError(InvalidRequestError): - """Table does not exist or is not visible to a connection.""" - - -class UnboundExecutionError(InvalidRequestError): - """SQL was attempted without a database connection to execute it on.""" - - -class DontWrapMixin(object): - """A mixin class which, when applied to a user-defined Exception class, - will not be wrapped inside of :exc:`.StatementError` if the error is - emitted within the process of executing a statement. - - E.g.:: - - from sqlalchemy.exc import DontWrapMixin - - class MyCustomException(Exception, DontWrapMixin): - pass - - class MySpecialType(TypeDecorator): - impl = String - - def process_bind_param(self, value, dialect): - if value == 'invalid': - raise MyCustomException("invalid!") - - """ - -# Moved to orm.exc; compatibility definition installed by orm import until 0.6 -UnmappedColumnError = None - - -class StatementError(SQLAlchemyError): - """An error occurred during execution of a SQL statement. - - :class:`StatementError` wraps the exception raised - during execution, and features :attr:`.statement` - and :attr:`.params` attributes which supply context regarding - the specifics of the statement which had an issue. - - The wrapped exception object is available in - the :attr:`.orig` attribute. - - """ - - statement = None - """The string SQL statement being invoked when this exception occurred.""" - - params = None - """The parameter list being used when this exception occurred.""" - - orig = None - """The DBAPI exception object.""" - - def __init__(self, message, statement, params, orig): - SQLAlchemyError.__init__(self, message) - self.statement = statement - self.params = params - self.orig = orig - self.detail = [] - - def add_detail(self, msg): - self.detail.append(msg) - - def __reduce__(self): - return self.__class__, (self.args[0], self.statement, - self.params, self.orig) - - def __str__(self): - from sqlalchemy.sql import util - - details = [SQLAlchemyError.__str__(self)] - if self.statement: - details.append("[SQL: %r]" % self.statement) - if self.params: - params_repr = util._repr_params(self.params, 10) - details.append("[parameters: %r]" % params_repr) - return ' '.join([ - "(%s)" % det for det in self.detail - ] + details) - - def __unicode__(self): - return self.__str__() - - -class DBAPIError(StatementError): - """Raised when the execution of a database operation fails. - - Wraps exceptions raised by the DB-API underlying the - database operation. Driver-specific implementations of the standard - DB-API exception types are wrapped by matching sub-types of SQLAlchemy's - :class:`DBAPIError` when possible. DB-API's ``Error`` type maps to - :class:`DBAPIError` in SQLAlchemy, otherwise the names are identical. Note - that there is no guarantee that different DB-API implementations will - raise the same exception type for any given error condition. - - :class:`DBAPIError` features :attr:`~.StatementError.statement` - and :attr:`~.StatementError.params` attributes which supply context - regarding the specifics of the statement which had an issue, for the - typical case when the error was raised within the context of - emitting a SQL statement. - - The wrapped exception object is available in the - :attr:`~.StatementError.orig` attribute. Its type and properties are - DB-API implementation specific. - - """ - - @classmethod - def instance(cls, statement, params, - orig, dbapi_base_err, - connection_invalidated=False, - dialect=None): - # Don't ever wrap these, just return them directly as if - # DBAPIError didn't exist. - if (isinstance(orig, BaseException) and - not isinstance(orig, Exception)) or \ - isinstance(orig, DontWrapMixin): - return orig - - if orig is not None: - # not a DBAPI error, statement is present. - # raise a StatementError - if not isinstance(orig, dbapi_base_err) and statement: - return StatementError( - "(%s.%s) %s" % - (orig.__class__.__module__, orig.__class__.__name__, - orig), - statement, params, orig - ) - - glob = globals() - for super_ in orig.__class__.__mro__: - name = super_.__name__ - if dialect: - name = dialect.dbapi_exception_translation_map.get( - name, name) - if name in glob and issubclass(glob[name], DBAPIError): - cls = glob[name] - break - - return cls(statement, params, orig, connection_invalidated) - - def __reduce__(self): - return self.__class__, (self.statement, self.params, - self.orig, self.connection_invalidated) - - def __init__(self, statement, params, orig, connection_invalidated=False): - try: - text = str(orig) - except Exception as e: - text = 'Error in str() of DB-API-generated exception: ' + str(e) - StatementError.__init__( - self, - '(%s.%s) %s' % ( - orig.__class__.__module__, orig.__class__.__name__, text, ), - statement, - params, - orig - ) - self.connection_invalidated = connection_invalidated - - -class InterfaceError(DBAPIError): - """Wraps a DB-API InterfaceError.""" - - -class DatabaseError(DBAPIError): - """Wraps a DB-API DatabaseError.""" - - -class DataError(DatabaseError): - """Wraps a DB-API DataError.""" - - -class OperationalError(DatabaseError): - """Wraps a DB-API OperationalError.""" - - -class IntegrityError(DatabaseError): - """Wraps a DB-API IntegrityError.""" - - -class InternalError(DatabaseError): - """Wraps a DB-API InternalError.""" - - -class ProgrammingError(DatabaseError): - """Wraps a DB-API ProgrammingError.""" - - -class NotSupportedError(DatabaseError): - """Wraps a DB-API NotSupportedError.""" - - -# Warnings - -class SADeprecationWarning(DeprecationWarning): - """Issued once per usage of a deprecated API.""" - - -class SAPendingDeprecationWarning(PendingDeprecationWarning): - """Issued once per usage of a deprecated API.""" - - -class SAWarning(RuntimeWarning): - """Issued at runtime.""" diff --git a/python/sqlalchemy/ext/__init__.py b/python/sqlalchemy/ext/__init__.py deleted file mode 100644 index 60a17c65..00000000 --- a/python/sqlalchemy/ext/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -# ext/__init__.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -from .. import util as _sa_util - -_sa_util.dependencies.resolve_all("sqlalchemy.ext") - diff --git a/python/sqlalchemy/ext/associationproxy.py b/python/sqlalchemy/ext/associationproxy.py deleted file mode 100644 index 29064ef2..00000000 --- a/python/sqlalchemy/ext/associationproxy.py +++ /dev/null @@ -1,1068 +0,0 @@ -# ext/associationproxy.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Contain the ``AssociationProxy`` class. - -The ``AssociationProxy`` is a Python property object which provides -transparent proxied access to the endpoint of an association object. - -See the example ``examples/association/proxied_association.py``. - -""" -import itertools -import operator -import weakref -from .. import exc, orm, util -from ..orm import collections, interfaces -from ..sql import not_, or_ - - -def association_proxy(target_collection, attr, **kw): - """Return a Python property implementing a view of a target - attribute which references an attribute on members of the - target. - - The returned value is an instance of :class:`.AssociationProxy`. - - Implements a Python property representing a relationship as a collection - of simpler values, or a scalar value. The proxied property will mimic - the collection type of the target (list, dict or set), or, in the case of - a one to one relationship, a simple scalar value. - - :param target_collection: Name of the attribute we'll proxy to. - This attribute is typically mapped by - :func:`~sqlalchemy.orm.relationship` to link to a target collection, but - can also be a many-to-one or non-scalar relationship. - - :param attr: Attribute on the associated instance or instances we'll - proxy for. - - For example, given a target collection of [obj1, obj2], a list created - by this proxy property would look like [getattr(obj1, *attr*), - getattr(obj2, *attr*)] - - If the relationship is one-to-one or otherwise uselist=False, then - simply: getattr(obj, *attr*) - - :param creator: optional. - - When new items are added to this proxied collection, new instances of - the class collected by the target collection will be created. For list - and set collections, the target class constructor will be called with - the 'value' for the new instance. For dict types, two arguments are - passed: key and value. - - If you want to construct instances differently, supply a *creator* - function that takes arguments as above and returns instances. - - For scalar relationships, creator() will be called if the target is None. - If the target is present, set operations are proxied to setattr() on the - associated object. - - If you have an associated object with multiple attributes, you may set - up multiple association proxies mapping to different attributes. See - the unit tests for examples, and for examples of how creator() functions - can be used to construct the scalar relationship on-demand in this - situation. - - :param \*\*kw: Passes along any other keyword arguments to - :class:`.AssociationProxy`. - - """ - return AssociationProxy(target_collection, attr, **kw) - - -ASSOCIATION_PROXY = util.symbol('ASSOCIATION_PROXY') -"""Symbol indicating an :class:`InspectionAttr` that's - of type :class:`.AssociationProxy`. - - Is assigned to the :attr:`.InspectionAttr.extension_type` - attibute. - -""" - - -class AssociationProxy(interfaces.InspectionAttrInfo): - """A descriptor that presents a read/write view of an object attribute.""" - - is_attribute = False - extension_type = ASSOCIATION_PROXY - - def __init__(self, target_collection, attr, creator=None, - getset_factory=None, proxy_factory=None, - proxy_bulk_set=None, info=None): - """Construct a new :class:`.AssociationProxy`. - - The :func:`.association_proxy` function is provided as the usual - entrypoint here, though :class:`.AssociationProxy` can be instantiated - and/or subclassed directly. - - :param target_collection: Name of the collection we'll proxy to, - usually created with :func:`.relationship`. - - :param attr: Attribute on the collected instances we'll proxy - for. For example, given a target collection of [obj1, obj2], a - list created by this proxy property would look like - [getattr(obj1, attr), getattr(obj2, attr)] - - :param creator: Optional. When new items are added to this proxied - collection, new instances of the class collected by the target - collection will be created. For list and set collections, the - target class constructor will be called with the 'value' for the - new instance. For dict types, two arguments are passed: - key and value. - - If you want to construct instances differently, supply a 'creator' - function that takes arguments as above and returns instances. - - :param getset_factory: Optional. Proxied attribute access is - automatically handled by routines that get and set values based on - the `attr` argument for this proxy. - - If you would like to customize this behavior, you may supply a - `getset_factory` callable that produces a tuple of `getter` and - `setter` functions. The factory is called with two arguments, the - abstract type of the underlying collection and this proxy instance. - - :param proxy_factory: Optional. The type of collection to emulate is - determined by sniffing the target collection. If your collection - type can't be determined by duck typing or you'd like to use a - different collection implementation, you may supply a factory - function to produce those collections. Only applicable to - non-scalar relationships. - - :param proxy_bulk_set: Optional, use with proxy_factory. See - the _set() method for details. - - :param info: optional, will be assigned to - :attr:`.AssociationProxy.info` if present. - - .. versionadded:: 1.0.9 - - """ - self.target_collection = target_collection - self.value_attr = attr - self.creator = creator - self.getset_factory = getset_factory - self.proxy_factory = proxy_factory - self.proxy_bulk_set = proxy_bulk_set - - self.owning_class = None - self.key = '_%s_%s_%s' % ( - type(self).__name__, target_collection, id(self)) - self.collection_class = None - if info: - self.info = info - - @property - def remote_attr(self): - """The 'remote' :class:`.MapperProperty` referenced by this - :class:`.AssociationProxy`. - - .. versionadded:: 0.7.3 - - See also: - - :attr:`.AssociationProxy.attr` - - :attr:`.AssociationProxy.local_attr` - - """ - return getattr(self.target_class, self.value_attr) - - @property - def local_attr(self): - """The 'local' :class:`.MapperProperty` referenced by this - :class:`.AssociationProxy`. - - .. versionadded:: 0.7.3 - - See also: - - :attr:`.AssociationProxy.attr` - - :attr:`.AssociationProxy.remote_attr` - - """ - return getattr(self.owning_class, self.target_collection) - - @property - def attr(self): - """Return a tuple of ``(local_attr, remote_attr)``. - - This attribute is convenient when specifying a join - using :meth:`.Query.join` across two relationships:: - - sess.query(Parent).join(*Parent.proxied.attr) - - .. versionadded:: 0.7.3 - - See also: - - :attr:`.AssociationProxy.local_attr` - - :attr:`.AssociationProxy.remote_attr` - - """ - return (self.local_attr, self.remote_attr) - - def _get_property(self): - return (orm.class_mapper(self.owning_class). - get_property(self.target_collection)) - - @util.memoized_property - def target_class(self): - """The intermediary class handled by this :class:`.AssociationProxy`. - - Intercepted append/set/assignment events will result - in the generation of new instances of this class. - - """ - return self._get_property().mapper.class_ - - @util.memoized_property - def scalar(self): - """Return ``True`` if this :class:`.AssociationProxy` proxies a scalar - relationship on the local side.""" - - scalar = not self._get_property().uselist - if scalar: - self._initialize_scalar_accessors() - return scalar - - @util.memoized_property - def _value_is_scalar(self): - return not self._get_property().\ - mapper.get_property(self.value_attr).uselist - - @util.memoized_property - def _target_is_object(self): - return getattr(self.target_class, self.value_attr).impl.uses_objects - - def __get__(self, obj, class_): - if self.owning_class is None: - self.owning_class = class_ and class_ or type(obj) - if obj is None: - return self - - if self.scalar: - target = getattr(obj, self.target_collection) - return self._scalar_get(target) - else: - try: - # If the owning instance is reborn (orm session resurrect, - # etc.), refresh the proxy cache. - creator_id, proxy = getattr(obj, self.key) - if id(obj) == creator_id: - return proxy - except AttributeError: - pass - proxy = self._new(_lazy_collection(obj, self.target_collection)) - setattr(obj, self.key, (id(obj), proxy)) - return proxy - - def __set__(self, obj, values): - if self.owning_class is None: - self.owning_class = type(obj) - - if self.scalar: - creator = self.creator and self.creator or self.target_class - target = getattr(obj, self.target_collection) - if target is None: - setattr(obj, self.target_collection, creator(values)) - else: - self._scalar_set(target, values) - else: - proxy = self.__get__(obj, None) - if proxy is not values: - proxy.clear() - self._set(proxy, values) - - def __delete__(self, obj): - if self.owning_class is None: - self.owning_class = type(obj) - delattr(obj, self.key) - - def _initialize_scalar_accessors(self): - if self.getset_factory: - get, set = self.getset_factory(None, self) - else: - get, set = self._default_getset(None) - self._scalar_get, self._scalar_set = get, set - - def _default_getset(self, collection_class): - attr = self.value_attr - _getter = operator.attrgetter(attr) - getter = lambda target: _getter(target) if target is not None else None - if collection_class is dict: - setter = lambda o, k, v: setattr(o, attr, v) - else: - setter = lambda o, v: setattr(o, attr, v) - return getter, setter - - def _new(self, lazy_collection): - creator = self.creator and self.creator or self.target_class - self.collection_class = util.duck_type_collection(lazy_collection()) - - if self.proxy_factory: - return self.proxy_factory( - lazy_collection, creator, self.value_attr, self) - - if self.getset_factory: - getter, setter = self.getset_factory(self.collection_class, self) - else: - getter, setter = self._default_getset(self.collection_class) - - if self.collection_class is list: - return _AssociationList( - lazy_collection, creator, getter, setter, self) - elif self.collection_class is dict: - return _AssociationDict( - lazy_collection, creator, getter, setter, self) - elif self.collection_class is set: - return _AssociationSet( - lazy_collection, creator, getter, setter, self) - else: - raise exc.ArgumentError( - 'could not guess which interface to use for ' - 'collection_class "%s" backing "%s"; specify a ' - 'proxy_factory and proxy_bulk_set manually' % - (self.collection_class.__name__, self.target_collection)) - - def _inflate(self, proxy): - creator = self.creator and self.creator or self.target_class - - if self.getset_factory: - getter, setter = self.getset_factory(self.collection_class, self) - else: - getter, setter = self._default_getset(self.collection_class) - - proxy.creator = creator - proxy.getter = getter - proxy.setter = setter - - def _set(self, proxy, values): - if self.proxy_bulk_set: - self.proxy_bulk_set(proxy, values) - elif self.collection_class is list: - proxy.extend(values) - elif self.collection_class is dict: - proxy.update(values) - elif self.collection_class is set: - proxy.update(values) - else: - raise exc.ArgumentError( - 'no proxy_bulk_set supplied for custom ' - 'collection_class implementation') - - @property - def _comparator(self): - return self._get_property().comparator - - def any(self, criterion=None, **kwargs): - """Produce a proxied 'any' expression using EXISTS. - - This expression will be a composed product - using the :meth:`.RelationshipProperty.Comparator.any` - and/or :meth:`.RelationshipProperty.Comparator.has` - operators of the underlying proxied attributes. - - """ - if self._target_is_object: - if self._value_is_scalar: - value_expr = getattr( - self.target_class, self.value_attr).has( - criterion, **kwargs) - else: - value_expr = getattr( - self.target_class, self.value_attr).any( - criterion, **kwargs) - else: - value_expr = criterion - - # check _value_is_scalar here, otherwise - # we're scalar->scalar - call .any() so that - # the "can't call any() on a scalar" msg is raised. - if self.scalar and not self._value_is_scalar: - return self._comparator.has( - value_expr - ) - else: - return self._comparator.any( - value_expr - ) - - def has(self, criterion=None, **kwargs): - """Produce a proxied 'has' expression using EXISTS. - - This expression will be a composed product - using the :meth:`.RelationshipProperty.Comparator.any` - and/or :meth:`.RelationshipProperty.Comparator.has` - operators of the underlying proxied attributes. - - """ - - if self._target_is_object: - return self._comparator.has( - getattr(self.target_class, self.value_attr). - has(criterion, **kwargs) - ) - else: - if criterion is not None or kwargs: - raise exc.ArgumentError( - "Non-empty has() not allowed for " - "column-targeted association proxy; use ==") - return self._comparator.has() - - def contains(self, obj): - """Produce a proxied 'contains' expression using EXISTS. - - This expression will be a composed product - using the :meth:`.RelationshipProperty.Comparator.any` - , :meth:`.RelationshipProperty.Comparator.has`, - and/or :meth:`.RelationshipProperty.Comparator.contains` - operators of the underlying proxied attributes. - """ - - if self.scalar and not self._value_is_scalar: - return self._comparator.has( - getattr(self.target_class, self.value_attr).contains(obj) - ) - else: - return self._comparator.any(**{self.value_attr: obj}) - - def __eq__(self, obj): - # note the has() here will fail for collections; eq_() - # is only allowed with a scalar. - if obj is None: - return or_( - self._comparator.has(**{self.value_attr: obj}), - self._comparator == None - ) - else: - return self._comparator.has(**{self.value_attr: obj}) - - def __ne__(self, obj): - # note the has() here will fail for collections; eq_() - # is only allowed with a scalar. - return self._comparator.has( - getattr(self.target_class, self.value_attr) != obj) - - -class _lazy_collection(object): - def __init__(self, obj, target): - self.ref = weakref.ref(obj) - self.target = target - - def __call__(self): - obj = self.ref() - if obj is None: - raise exc.InvalidRequestError( - "stale association proxy, parent object has gone out of " - "scope") - return getattr(obj, self.target) - - def __getstate__(self): - return {'obj': self.ref(), 'target': self.target} - - def __setstate__(self, state): - self.ref = weakref.ref(state['obj']) - self.target = state['target'] - - -class _AssociationCollection(object): - def __init__(self, lazy_collection, creator, getter, setter, parent): - """Constructs an _AssociationCollection. - - This will always be a subclass of either _AssociationList, - _AssociationSet, or _AssociationDict. - - lazy_collection - A callable returning a list-based collection of entities (usually an - object attribute managed by a SQLAlchemy relationship()) - - creator - A function that creates new target entities. Given one parameter: - value. This assertion is assumed:: - - obj = creator(somevalue) - assert getter(obj) == somevalue - - getter - A function. Given an associated object, return the 'value'. - - setter - A function. Given an associated object and a value, store that - value on the object. - - """ - self.lazy_collection = lazy_collection - self.creator = creator - self.getter = getter - self.setter = setter - self.parent = parent - - col = property(lambda self: self.lazy_collection()) - - def __len__(self): - return len(self.col) - - def __bool__(self): - return bool(self.col) - - __nonzero__ = __bool__ - - def __getstate__(self): - return {'parent': self.parent, 'lazy_collection': self.lazy_collection} - - def __setstate__(self, state): - self.parent = state['parent'] - self.lazy_collection = state['lazy_collection'] - self.parent._inflate(self) - - -class _AssociationList(_AssociationCollection): - """Generic, converting, list-to-list proxy.""" - - def _create(self, value): - return self.creator(value) - - def _get(self, object): - return self.getter(object) - - def _set(self, object, value): - return self.setter(object, value) - - def __getitem__(self, index): - if not isinstance(index, slice): - return self._get(self.col[index]) - else: - return [self._get(member) for member in self.col[index]] - - def __setitem__(self, index, value): - if not isinstance(index, slice): - self._set(self.col[index], value) - else: - if index.stop is None: - stop = len(self) - elif index.stop < 0: - stop = len(self) + index.stop - else: - stop = index.stop - step = index.step or 1 - - start = index.start or 0 - rng = list(range(index.start or 0, stop, step)) - if step == 1: - for i in rng: - del self[start] - i = start - for item in value: - self.insert(i, item) - i += 1 - else: - if len(value) != len(rng): - raise ValueError( - "attempt to assign sequence of size %s to " - "extended slice of size %s" % (len(value), - len(rng))) - for i, item in zip(rng, value): - self._set(self.col[i], item) - - def __delitem__(self, index): - del self.col[index] - - def __contains__(self, value): - for member in self.col: - # testlib.pragma exempt:__eq__ - if self._get(member) == value: - return True - return False - - def __getslice__(self, start, end): - return [self._get(member) for member in self.col[start:end]] - - def __setslice__(self, start, end, values): - members = [self._create(v) for v in values] - self.col[start:end] = members - - def __delslice__(self, start, end): - del self.col[start:end] - - def __iter__(self): - """Iterate over proxied values. - - For the actual domain objects, iterate over .col instead or - just use the underlying collection directly from its property - on the parent. - """ - - for member in self.col: - yield self._get(member) - raise StopIteration - - def append(self, value): - item = self._create(value) - self.col.append(item) - - def count(self, value): - return sum([1 for _ in - util.itertools_filter(lambda v: v == value, iter(self))]) - - def extend(self, values): - for v in values: - self.append(v) - - def insert(self, index, value): - self.col[index:index] = [self._create(value)] - - def pop(self, index=-1): - return self.getter(self.col.pop(index)) - - def remove(self, value): - for i, val in enumerate(self): - if val == value: - del self.col[i] - return - raise ValueError("value not in list") - - def reverse(self): - """Not supported, use reversed(mylist)""" - - raise NotImplementedError - - def sort(self): - """Not supported, use sorted(mylist)""" - - raise NotImplementedError - - def clear(self): - del self.col[0:len(self.col)] - - def __eq__(self, other): - return list(self) == other - - def __ne__(self, other): - return list(self) != other - - def __lt__(self, other): - return list(self) < other - - def __le__(self, other): - return list(self) <= other - - def __gt__(self, other): - return list(self) > other - - def __ge__(self, other): - return list(self) >= other - - def __cmp__(self, other): - return cmp(list(self), other) - - def __add__(self, iterable): - try: - other = list(iterable) - except TypeError: - return NotImplemented - return list(self) + other - - def __radd__(self, iterable): - try: - other = list(iterable) - except TypeError: - return NotImplemented - return other + list(self) - - def __mul__(self, n): - if not isinstance(n, int): - return NotImplemented - return list(self) * n - __rmul__ = __mul__ - - def __iadd__(self, iterable): - self.extend(iterable) - return self - - def __imul__(self, n): - # unlike a regular list *=, proxied __imul__ will generate unique - # backing objects for each copy. *= on proxied lists is a bit of - # a stretch anyhow, and this interpretation of the __imul__ contract - # is more plausibly useful than copying the backing objects. - if not isinstance(n, int): - return NotImplemented - if n == 0: - self.clear() - elif n > 1: - self.extend(list(self) * (n - 1)) - return self - - def copy(self): - return list(self) - - def __repr__(self): - return repr(list(self)) - - def __hash__(self): - raise TypeError("%s objects are unhashable" % type(self).__name__) - - for func_name, func in list(locals().items()): - if (util.callable(func) and func.__name__ == func_name and - not func.__doc__ and hasattr(list, func_name)): - func.__doc__ = getattr(list, func_name).__doc__ - del func_name, func - - -_NotProvided = util.symbol('_NotProvided') - - -class _AssociationDict(_AssociationCollection): - """Generic, converting, dict-to-dict proxy.""" - - def _create(self, key, value): - return self.creator(key, value) - - def _get(self, object): - return self.getter(object) - - def _set(self, object, key, value): - return self.setter(object, key, value) - - def __getitem__(self, key): - return self._get(self.col[key]) - - def __setitem__(self, key, value): - if key in self.col: - self._set(self.col[key], key, value) - else: - self.col[key] = self._create(key, value) - - def __delitem__(self, key): - del self.col[key] - - def __contains__(self, key): - # testlib.pragma exempt:__hash__ - return key in self.col - - def has_key(self, key): - # testlib.pragma exempt:__hash__ - return key in self.col - - def __iter__(self): - return iter(self.col.keys()) - - def clear(self): - self.col.clear() - - def __eq__(self, other): - return dict(self) == other - - def __ne__(self, other): - return dict(self) != other - - def __lt__(self, other): - return dict(self) < other - - def __le__(self, other): - return dict(self) <= other - - def __gt__(self, other): - return dict(self) > other - - def __ge__(self, other): - return dict(self) >= other - - def __cmp__(self, other): - return cmp(dict(self), other) - - def __repr__(self): - return repr(dict(self.items())) - - def get(self, key, default=None): - try: - return self[key] - except KeyError: - return default - - def setdefault(self, key, default=None): - if key not in self.col: - self.col[key] = self._create(key, default) - return default - else: - return self[key] - - def keys(self): - return self.col.keys() - - if util.py2k: - def iteritems(self): - return ((key, self._get(self.col[key])) for key in self.col) - - def itervalues(self): - return (self._get(self.col[key]) for key in self.col) - - def iterkeys(self): - return self.col.iterkeys() - - def values(self): - return [self._get(member) for member in self.col.values()] - - def items(self): - return [(k, self._get(self.col[k])) for k in self] - else: - def items(self): - return ((key, self._get(self.col[key])) for key in self.col) - - def values(self): - return (self._get(self.col[key]) for key in self.col) - - def pop(self, key, default=_NotProvided): - if default is _NotProvided: - member = self.col.pop(key) - else: - member = self.col.pop(key, default) - return self._get(member) - - def popitem(self): - item = self.col.popitem() - return (item[0], self._get(item[1])) - - def update(self, *a, **kw): - if len(a) > 1: - raise TypeError('update expected at most 1 arguments, got %i' % - len(a)) - elif len(a) == 1: - seq_or_map = a[0] - # discern dict from sequence - took the advice from - # http://www.voidspace.org.uk/python/articles/duck_typing.shtml - # still not perfect :( - if hasattr(seq_or_map, 'keys'): - for item in seq_or_map: - self[item] = seq_or_map[item] - else: - try: - for k, v in seq_or_map: - self[k] = v - except ValueError: - raise ValueError( - "dictionary update sequence " - "requires 2-element tuples") - - for key, value in kw: - self[key] = value - - def copy(self): - return dict(self.items()) - - def __hash__(self): - raise TypeError("%s objects are unhashable" % type(self).__name__) - - for func_name, func in list(locals().items()): - if (util.callable(func) and func.__name__ == func_name and - not func.__doc__ and hasattr(dict, func_name)): - func.__doc__ = getattr(dict, func_name).__doc__ - del func_name, func - - -class _AssociationSet(_AssociationCollection): - """Generic, converting, set-to-set proxy.""" - - def _create(self, value): - return self.creator(value) - - def _get(self, object): - return self.getter(object) - - def _set(self, object, value): - return self.setter(object, value) - - def __len__(self): - return len(self.col) - - def __bool__(self): - if self.col: - return True - else: - return False - - __nonzero__ = __bool__ - - def __contains__(self, value): - for member in self.col: - # testlib.pragma exempt:__eq__ - if self._get(member) == value: - return True - return False - - def __iter__(self): - """Iterate over proxied values. - - For the actual domain objects, iterate over .col instead or just use - the underlying collection directly from its property on the parent. - - """ - for member in self.col: - yield self._get(member) - raise StopIteration - - def add(self, value): - if value not in self: - self.col.add(self._create(value)) - - # for discard and remove, choosing a more expensive check strategy rather - # than call self.creator() - def discard(self, value): - for member in self.col: - if self._get(member) == value: - self.col.discard(member) - break - - def remove(self, value): - for member in self.col: - if self._get(member) == value: - self.col.discard(member) - return - raise KeyError(value) - - def pop(self): - if not self.col: - raise KeyError('pop from an empty set') - member = self.col.pop() - return self._get(member) - - def update(self, other): - for value in other: - self.add(value) - - def __ior__(self, other): - if not collections._set_binops_check_strict(self, other): - return NotImplemented - for value in other: - self.add(value) - return self - - def _set(self): - return set(iter(self)) - - def union(self, other): - return set(self).union(other) - - __or__ = union - - def difference(self, other): - return set(self).difference(other) - - __sub__ = difference - - def difference_update(self, other): - for value in other: - self.discard(value) - - def __isub__(self, other): - if not collections._set_binops_check_strict(self, other): - return NotImplemented - for value in other: - self.discard(value) - return self - - def intersection(self, other): - return set(self).intersection(other) - - __and__ = intersection - - def intersection_update(self, other): - want, have = self.intersection(other), set(self) - - remove, add = have - want, want - have - - for value in remove: - self.remove(value) - for value in add: - self.add(value) - - def __iand__(self, other): - if not collections._set_binops_check_strict(self, other): - return NotImplemented - want, have = self.intersection(other), set(self) - - remove, add = have - want, want - have - - for value in remove: - self.remove(value) - for value in add: - self.add(value) - return self - - def symmetric_difference(self, other): - return set(self).symmetric_difference(other) - - __xor__ = symmetric_difference - - def symmetric_difference_update(self, other): - want, have = self.symmetric_difference(other), set(self) - - remove, add = have - want, want - have - - for value in remove: - self.remove(value) - for value in add: - self.add(value) - - def __ixor__(self, other): - if not collections._set_binops_check_strict(self, other): - return NotImplemented - want, have = self.symmetric_difference(other), set(self) - - remove, add = have - want, want - have - - for value in remove: - self.remove(value) - for value in add: - self.add(value) - return self - - def issubset(self, other): - return set(self).issubset(other) - - def issuperset(self, other): - return set(self).issuperset(other) - - def clear(self): - self.col.clear() - - def copy(self): - return set(self) - - def __eq__(self, other): - return set(self) == other - - def __ne__(self, other): - return set(self) != other - - def __lt__(self, other): - return set(self) < other - - def __le__(self, other): - return set(self) <= other - - def __gt__(self, other): - return set(self) > other - - def __ge__(self, other): - return set(self) >= other - - def __repr__(self): - return repr(set(self)) - - def __hash__(self): - raise TypeError("%s objects are unhashable" % type(self).__name__) - - for func_name, func in list(locals().items()): - if (util.callable(func) and func.__name__ == func_name and - not func.__doc__ and hasattr(set, func_name)): - func.__doc__ = getattr(set, func_name).__doc__ - del func_name, func diff --git a/python/sqlalchemy/ext/automap.py b/python/sqlalchemy/ext/automap.py deleted file mode 100644 index 330992e5..00000000 --- a/python/sqlalchemy/ext/automap.py +++ /dev/null @@ -1,1038 +0,0 @@ -# ext/automap.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Define an extension to the :mod:`sqlalchemy.ext.declarative` system -which automatically generates mapped classes and relationships from a database -schema, typically though not necessarily one which is reflected. - -.. versionadded:: 0.9.1 Added :mod:`sqlalchemy.ext.automap`. - -It is hoped that the :class:`.AutomapBase` system provides a quick -and modernized solution to the problem that the very famous -`SQLSoup `_ -also tries to solve, that of generating a quick and rudimentary object -model from an existing database on the fly. By addressing the issue strictly -at the mapper configuration level, and integrating fully with existing -Declarative class techniques, :class:`.AutomapBase` seeks to provide -a well-integrated approach to the issue of expediently auto-generating ad-hoc -mappings. - - -Basic Use -========= - -The simplest usage is to reflect an existing database into a new model. -We create a new :class:`.AutomapBase` class in a similar manner as to how -we create a declarative base class, using :func:`.automap_base`. -We then call :meth:`.AutomapBase.prepare` on the resulting base class, -asking it to reflect the schema and produce mappings:: - - from sqlalchemy.ext.automap import automap_base - from sqlalchemy.orm import Session - from sqlalchemy import create_engine - - Base = automap_base() - - # engine, suppose it has two tables 'user' and 'address' set up - engine = create_engine("sqlite:///mydatabase.db") - - # reflect the tables - Base.prepare(engine, reflect=True) - - # mapped classes are now created with names by default - # matching that of the table name. - User = Base.classes.user - Address = Base.classes.address - - session = Session(engine) - - # rudimentary relationships are produced - session.add(Address(email_address="foo@bar.com", user=User(name="foo"))) - session.commit() - - # collection-based relationships are by default named - # "_collection" - print (u1.address_collection) - -Above, calling :meth:`.AutomapBase.prepare` while passing along the -:paramref:`.AutomapBase.prepare.reflect` parameter indicates that the -:meth:`.MetaData.reflect` method will be called on this declarative base -classes' :class:`.MetaData` collection; then, each **viable** -:class:`.Table` within the :class:`.MetaData` will get a new mapped class -generated automatically. The :class:`.ForeignKeyConstraint` objects which -link the various tables together will be used to produce new, bidirectional -:func:`.relationship` objects between classes. The classes and relationships -follow along a default naming scheme that we can customize. At this point, -our basic mapping consisting of related ``User`` and ``Address`` classes is -ready to use in the traditional way. - -.. note:: By **viable**, we mean that for a table to be mapped, it must - specify a primary key. Additionally, if the table is detected as being - a pure association table between two other tables, it will not be directly - mapped and will instead be configured as a many-to-many table between - the mappings for the two referring tables. - -Generating Mappings from an Existing MetaData -============================================= - -We can pass a pre-declared :class:`.MetaData` object to :func:`.automap_base`. -This object can be constructed in any way, including programmatically, from -a serialized file, or from itself being reflected using -:meth:`.MetaData.reflect`. Below we illustrate a combination of reflection and -explicit table declaration:: - - from sqlalchemy import create_engine, MetaData, Table, Column, ForeignKey - engine = create_engine("sqlite:///mydatabase.db") - - # produce our own MetaData object - metadata = MetaData() - - # we can reflect it ourselves from a database, using options - # such as 'only' to limit what tables we look at... - metadata.reflect(engine, only=['user', 'address']) - - # ... or just define our own Table objects with it (or combine both) - Table('user_order', metadata, - Column('id', Integer, primary_key=True), - Column('user_id', ForeignKey('user.id')) - ) - - # we can then produce a set of mappings from this MetaData. - Base = automap_base(metadata=metadata) - - # calling prepare() just sets up mapped classes and relationships. - Base.prepare() - - # mapped classes are ready - User, Address, Order = Base.classes.user, Base.classes.address,\ - Base.classes.user_order - -Specifying Classes Explcitly -============================ - -The :mod:`.sqlalchemy.ext.automap` extension allows classes to be defined -explicitly, in a way similar to that of the :class:`.DeferredReflection` class. -Classes that extend from :class:`.AutomapBase` act like regular declarative -classes, but are not immediately mapped after their construction, and are -instead mapped when we call :meth:`.AutomapBase.prepare`. The -:meth:`.AutomapBase.prepare` method will make use of the classes we've -established based on the table name we use. If our schema contains tables -``user`` and ``address``, we can define one or both of the classes to be used:: - - from sqlalchemy.ext.automap import automap_base - from sqlalchemy import create_engine - - # automap base - Base = automap_base() - - # pre-declare User for the 'user' table - class User(Base): - __tablename__ = 'user' - - # override schema elements like Columns - user_name = Column('name', String) - - # override relationships too, if desired. - # we must use the same name that automap would use for the - # relationship, and also must refer to the class name that automap will - # generate for "address" - address_collection = relationship("address", collection_class=set) - - # reflect - engine = create_engine("sqlite:///mydatabase.db") - Base.prepare(engine, reflect=True) - - # we still have Address generated from the tablename "address", - # but User is the same as Base.classes.User now - - Address = Base.classes.address - - u1 = session.query(User).first() - print (u1.address_collection) - - # the backref is still there: - a1 = session.query(Address).first() - print (a1.user) - -Above, one of the more intricate details is that we illustrated overriding -one of the :func:`.relationship` objects that automap would have created. -To do this, we needed to make sure the names match up with what automap -would normally generate, in that the relationship name would be -``User.address_collection`` and the name of the class referred to, from -automap's perspective, is called ``address``, even though we are referring to -it as ``Address`` within our usage of this class. - -Overriding Naming Schemes -========================= - -:mod:`.sqlalchemy.ext.automap` is tasked with producing mapped classes and -relationship names based on a schema, which means it has decision points in how -these names are determined. These three decision points are provided using -functions which can be passed to the :meth:`.AutomapBase.prepare` method, and -are known as :func:`.classname_for_table`, -:func:`.name_for_scalar_relationship`, -and :func:`.name_for_collection_relationship`. Any or all of these -functions are provided as in the example below, where we use a "camel case" -scheme for class names and a "pluralizer" for collection names using the -`Inflect `_ package:: - - import re - import inflect - - def camelize_classname(base, tablename, table): - "Produce a 'camelized' class name, e.g. " - "'words_and_underscores' -> 'WordsAndUnderscores'" - - return str(tablename[0].upper() + \\ - re.sub(r'_([a-z])', lambda m: m.group(1).upper(), tablename[1:])) - - _pluralizer = inflect.engine() - def pluralize_collection(base, local_cls, referred_cls, constraint): - "Produce an 'uncamelized', 'pluralized' class name, e.g. " - "'SomeTerm' -> 'some_terms'" - - referred_name = referred_cls.__name__ - uncamelized = re.sub(r'[A-Z]', - lambda m: "_%s" % m.group(0).lower(), - referred_name)[1:] - pluralized = _pluralizer.plural(uncamelized) - return pluralized - - from sqlalchemy.ext.automap import automap_base - - Base = automap_base() - - engine = create_engine("sqlite:///mydatabase.db") - - Base.prepare(engine, reflect=True, - classname_for_table=camelize_classname, - name_for_collection_relationship=pluralize_collection - ) - -From the above mapping, we would now have classes ``User`` and ``Address``, -where the collection from ``User`` to ``Address`` is called -``User.addresses``:: - - User, Address = Base.classes.User, Base.classes.Address - - u1 = User(addresses=[Address(email="foo@bar.com")]) - -Relationship Detection -====================== - -The vast majority of what automap accomplishes is the generation of -:func:`.relationship` structures based on foreign keys. The mechanism -by which this works for many-to-one and one-to-many relationships is as -follows: - -1. A given :class:`.Table`, known to be mapped to a particular class, - is examined for :class:`.ForeignKeyConstraint` objects. - -2. From each :class:`.ForeignKeyConstraint`, the remote :class:`.Table` - object present is matched up to the class to which it is to be mapped, - if any, else it is skipped. - -3. As the :class:`.ForeignKeyConstraint` we are examining corresponds to a - reference from the immediate mapped class, the relationship will be set up - as a many-to-one referring to the referred class; a corresponding - one-to-many backref will be created on the referred class referring - to this class. - -4. If any of the columns that are part of the :class:`.ForeignKeyConstraint` - are not nullable (e.g. ``nullable=False``), a - :paramref:`~.relationship.cascade` keyword argument - of ``all, delete-orphan`` will be added to the keyword arguments to - be passed to the relationship or backref. If the - :class:`.ForeignKeyConstraint` reports that - :paramref:`.ForeignKeyConstraint.ondelete` - is set to ``CASCADE`` for a not null or ``SET NULL`` for a nullable - set of columns, the option :paramref:`~.relationship.passive_deletes` - flag is set to ``True`` in the set of relationship keyword arguments. - Note that not all backends support reflection of ON DELETE. - - .. versionadded:: 1.0.0 - automap will detect non-nullable foreign key - constraints when producing a one-to-many relationship and establish - a default cascade of ``all, delete-orphan`` if so; additionally, - if the constraint specifies :paramref:`.ForeignKeyConstraint.ondelete` - of ``CASCADE`` for non-nullable or ``SET NULL`` for nullable columns, - the ``passive_deletes=True`` option is also added. - -5. The names of the relationships are determined using the - :paramref:`.AutomapBase.prepare.name_for_scalar_relationship` and - :paramref:`.AutomapBase.prepare.name_for_collection_relationship` - callable functions. It is important to note that the default relationship - naming derives the name from the **the actual class name**. If you've - given a particular class an explicit name by declaring it, or specified an - alternate class naming scheme, that's the name from which the relationship - name will be derived. - -6. The classes are inspected for an existing mapped property matching these - names. If one is detected on one side, but none on the other side, - :class:`.AutomapBase` attempts to create a relationship on the missing side, - then uses the :paramref:`.relationship.back_populates` parameter in order to - point the new relationship to the other side. - -7. In the usual case where no relationship is on either side, - :meth:`.AutomapBase.prepare` produces a :func:`.relationship` on the - "many-to-one" side and matches it to the other using the - :paramref:`.relationship.backref` parameter. - -8. Production of the :func:`.relationship` and optionally the :func:`.backref` - is handed off to the :paramref:`.AutomapBase.prepare.generate_relationship` - function, which can be supplied by the end-user in order to augment - the arguments passed to :func:`.relationship` or :func:`.backref` or to - make use of custom implementations of these functions. - -Custom Relationship Arguments ------------------------------ - -The :paramref:`.AutomapBase.prepare.generate_relationship` hook can be used -to add parameters to relationships. For most cases, we can make use of the -existing :func:`.automap.generate_relationship` function to return -the object, after augmenting the given keyword dictionary with our own -arguments. - -Below is an illustration of how to send -:paramref:`.relationship.cascade` and -:paramref:`.relationship.passive_deletes` -options along to all one-to-many relationships:: - - from sqlalchemy.ext.automap import generate_relationship - - def _gen_relationship(base, direction, return_fn, - attrname, local_cls, referred_cls, **kw): - if direction is interfaces.ONETOMANY: - kw['cascade'] = 'all, delete-orphan' - kw['passive_deletes'] = True - # make use of the built-in function to actually return - # the result. - return generate_relationship(base, direction, return_fn, - attrname, local_cls, referred_cls, **kw) - - from sqlalchemy.ext.automap import automap_base - from sqlalchemy import create_engine - - # automap base - Base = automap_base() - - engine = create_engine("sqlite:///mydatabase.db") - Base.prepare(engine, reflect=True, - generate_relationship=_gen_relationship) - -Many-to-Many relationships --------------------------- - -:mod:`.sqlalchemy.ext.automap` will generate many-to-many relationships, e.g. -those which contain a ``secondary`` argument. The process for producing these -is as follows: - -1. A given :class:`.Table` is examined for :class:`.ForeignKeyConstraint` - objects, before any mapped class has been assigned to it. - -2. If the table contains two and exactly two :class:`.ForeignKeyConstraint` - objects, and all columns within this table are members of these two - :class:`.ForeignKeyConstraint` objects, the table is assumed to be a - "secondary" table, and will **not be mapped directly**. - -3. The two (or one, for self-referential) external tables to which the - :class:`.Table` refers to are matched to the classes to which they will be - mapped, if any. - -4. If mapped classes for both sides are located, a many-to-many bi-directional - :func:`.relationship` / :func:`.backref` pair is created between the two - classes. - -5. The override logic for many-to-many works the same as that of one-to-many/ - many-to-one; the :func:`.generate_relationship` function is called upon - to generate the strucures and existing attributes will be maintained. - -Relationships with Inheritance ------------------------------- - -:mod:`.sqlalchemy.ext.automap` will not generate any relationships between -two classes that are in an inheritance relationship. That is, with two -classes given as follows:: - - class Employee(Base): - __tablename__ = 'employee' - id = Column(Integer, primary_key=True) - type = Column(String(50)) - __mapper_args__ = { - 'polymorphic_identity':'employee', 'polymorphic_on': type - } - - class Engineer(Employee): - __tablename__ = 'engineer' - id = Column(Integer, ForeignKey('employee.id'), primary_key=True) - __mapper_args__ = { - 'polymorphic_identity':'engineer', - } - -The foreign key from ``Engineer`` to ``Employee`` is used not for a -relationship, but to establish joined inheritance between the two classes. - -Note that this means automap will not generate *any* relationships -for foreign keys that link from a subclass to a superclass. If a mapping -has actual relationships from subclass to superclass as well, those -need to be explicit. Below, as we have two separate foreign keys -from ``Engineer`` to ``Employee``, we need to set up both the relationship -we want as well as the ``inherit_condition``, as these are not things -SQLAlchemy can guess:: - - class Employee(Base): - __tablename__ = 'employee' - id = Column(Integer, primary_key=True) - type = Column(String(50)) - - __mapper_args__ = { - 'polymorphic_identity':'employee', 'polymorphic_on':type - } - - class Engineer(Employee): - __tablename__ = 'engineer' - id = Column(Integer, ForeignKey('employee.id'), primary_key=True) - favorite_employee_id = Column(Integer, ForeignKey('employee.id')) - - favorite_employee = relationship(Employee, - foreign_keys=favorite_employee_id) - - __mapper_args__ = { - 'polymorphic_identity':'engineer', - 'inherit_condition': id == Employee.id - } - -Handling Simple Naming Conflicts --------------------------------- - -In the case of naming conflicts during mapping, override any of -:func:`.classname_for_table`, :func:`.name_for_scalar_relationship`, -and :func:`.name_for_collection_relationship` as needed. For example, if -automap is attempting to name a many-to-one relationship the same as an -existing column, an alternate convention can be conditionally selected. Given -a schema: - -.. sourcecode:: sql - - CREATE TABLE table_a ( - id INTEGER PRIMARY KEY - ); - - CREATE TABLE table_b ( - id INTEGER PRIMARY KEY, - table_a INTEGER, - FOREIGN KEY(table_a) REFERENCES table_a(id) - ); - -The above schema will first automap the ``table_a`` table as a class named -``table_a``; it will then automap a relationship onto the class for ``table_b`` -with the same name as this related class, e.g. ``table_a``. This -relationship name conflicts with the mapping column ``table_b.table_a``, -and will emit an error on mapping. - -We can resolve this conflict by using an underscore as follows:: - - def name_for_scalar_relationship(base, local_cls, referred_cls, constraint): - name = referred_cls.__name__.lower() - local_table = local_cls.__table__ - if name in local_table.columns: - newname = name + "_" - warnings.warn( - "Already detected name %s present. using %s" % - (name, newname)) - return newname - return name - - - Base.prepare(engine, reflect=True, - name_for_scalar_relationship=name_for_scalar_relationship) - -Alternatively, we can change the name on the column side. The columns -that are mapped can be modified using the technique described at -:ref:`mapper_column_distinct_names`, by assigning the column explicitly -to a new name:: - - Base = automap_base() - - class TableB(Base): - __tablename__ = 'table_b' - _table_a = Column('table_a', ForeignKey('table_a.id')) - - Base.prepare(engine, reflect=True) - - -Using Automap with Explicit Declarations -======================================== - -As noted previously, automap has no dependency on reflection, and can make -use of any collection of :class:`.Table` objects within a :class:`.MetaData` -collection. From this, it follows that automap can also be used -generate missing relationships given an otherwise complete model that fully -defines table metadata:: - - from sqlalchemy.ext.automap import automap_base - from sqlalchemy import Column, Integer, String, ForeignKey - - Base = automap_base() - - class User(Base): - __tablename__ = 'user' - - id = Column(Integer, primary_key=True) - name = Column(String) - - class Address(Base): - __tablename__ = 'address' - - id = Column(Integer, primary_key=True) - email = Column(String) - user_id = Column(ForeignKey('user.id')) - - # produce relationships - Base.prepare() - - # mapping is complete, with "address_collection" and - # "user" relationships - a1 = Address(email='u1') - a2 = Address(email='u2') - u1 = User(address_collection=[a1, a2]) - assert a1.user is u1 - -Above, given mostly complete ``User`` and ``Address`` mappings, the -:class:`.ForeignKey` which we defined on ``Address.user_id`` allowed a -bidirectional relationship pair ``Address.user`` and -``User.address_collection`` to be generated on the mapped classes. - -Note that when subclassing :class:`.AutomapBase`, -the :meth:`.AutomapBase.prepare` method is required; if not called, the classes -we've declared are in an un-mapped state. - - -""" -from .declarative import declarative_base as _declarative_base -from .declarative.base import _DeferredMapperConfig -from ..sql import and_ -from ..schema import ForeignKeyConstraint -from ..orm import relationship, backref, interfaces -from .. import util - - -def classname_for_table(base, tablename, table): - """Return the class name that should be used, given the name - of a table. - - The default implementation is:: - - return str(tablename) - - Alternate implementations can be specified using the - :paramref:`.AutomapBase.prepare.classname_for_table` - parameter. - - :param base: the :class:`.AutomapBase` class doing the prepare. - - :param tablename: string name of the :class:`.Table`. - - :param table: the :class:`.Table` object itself. - - :return: a string class name. - - .. note:: - - In Python 2, the string used for the class name **must** be a - non-Unicode object, e.g. a ``str()`` object. The ``.name`` attribute - of :class:`.Table` is typically a Python unicode subclass, so the - ``str()`` function should be applied to this name, after accounting for - any non-ASCII characters. - - """ - return str(tablename) - - -def name_for_scalar_relationship(base, local_cls, referred_cls, constraint): - """Return the attribute name that should be used to refer from one - class to another, for a scalar object reference. - - The default implementation is:: - - return referred_cls.__name__.lower() - - Alternate implementations can be specified using the - :paramref:`.AutomapBase.prepare.name_for_scalar_relationship` - parameter. - - :param base: the :class:`.AutomapBase` class doing the prepare. - - :param local_cls: the class to be mapped on the local side. - - :param referred_cls: the class to be mapped on the referring side. - - :param constraint: the :class:`.ForeignKeyConstraint` that is being - inspected to produce this relationship. - - """ - return referred_cls.__name__.lower() - - -def name_for_collection_relationship( - base, local_cls, referred_cls, constraint): - """Return the attribute name that should be used to refer from one - class to another, for a collection reference. - - The default implementation is:: - - return referred_cls.__name__.lower() + "_collection" - - Alternate implementations - can be specified using the - :paramref:`.AutomapBase.prepare.name_for_collection_relationship` - parameter. - - :param base: the :class:`.AutomapBase` class doing the prepare. - - :param local_cls: the class to be mapped on the local side. - - :param referred_cls: the class to be mapped on the referring side. - - :param constraint: the :class:`.ForeignKeyConstraint` that is being - inspected to produce this relationship. - - """ - return referred_cls.__name__.lower() + "_collection" - - -def generate_relationship( - base, direction, return_fn, attrname, local_cls, referred_cls, **kw): - """Generate a :func:`.relationship` or :func:`.backref` on behalf of two - mapped classes. - - An alternate implementation of this function can be specified using the - :paramref:`.AutomapBase.prepare.generate_relationship` parameter. - - The default implementation of this function is as follows:: - - if return_fn is backref: - return return_fn(attrname, **kw) - elif return_fn is relationship: - return return_fn(referred_cls, **kw) - else: - raise TypeError("Unknown relationship function: %s" % return_fn) - - :param base: the :class:`.AutomapBase` class doing the prepare. - - :param direction: indicate the "direction" of the relationship; this will - be one of :data:`.ONETOMANY`, :data:`.MANYTOONE`, :data:`.MANYTOMANY`. - - :param return_fn: the function that is used by default to create the - relationship. This will be either :func:`.relationship` or - :func:`.backref`. The :func:`.backref` function's result will be used to - produce a new :func:`.relationship` in a second step, so it is critical - that user-defined implementations correctly differentiate between the two - functions, if a custom relationship function is being used. - - :attrname: the attribute name to which this relationship is being assigned. - If the value of :paramref:`.generate_relationship.return_fn` is the - :func:`.backref` function, then this name is the name that is being - assigned to the backref. - - :param local_cls: the "local" class to which this relationship or backref - will be locally present. - - :param referred_cls: the "referred" class to which the relationship or - backref refers to. - - :param \**kw: all additional keyword arguments are passed along to the - function. - - :return: a :func:`.relationship` or :func:`.backref` construct, as dictated - by the :paramref:`.generate_relationship.return_fn` parameter. - - """ - if return_fn is backref: - return return_fn(attrname, **kw) - elif return_fn is relationship: - return return_fn(referred_cls, **kw) - else: - raise TypeError("Unknown relationship function: %s" % return_fn) - - -class AutomapBase(object): - """Base class for an "automap" schema. - - The :class:`.AutomapBase` class can be compared to the "declarative base" - class that is produced by the :func:`.declarative.declarative_base` - function. In practice, the :class:`.AutomapBase` class is always used - as a mixin along with an actual declarative base. - - A new subclassable :class:`.AutomapBase` is typically instantated - using the :func:`.automap_base` function. - - .. seealso:: - - :ref:`automap_toplevel` - - """ - __abstract__ = True - - classes = None - """An instance of :class:`.util.Properties` containing classes. - - This object behaves much like the ``.c`` collection on a table. Classes - are present under the name they were given, e.g.:: - - Base = automap_base() - Base.prepare(engine=some_engine, reflect=True) - - User, Address = Base.classes.User, Base.classes.Address - - """ - - @classmethod - def prepare( - cls, - engine=None, - reflect=False, - classname_for_table=classname_for_table, - collection_class=list, - name_for_scalar_relationship=name_for_scalar_relationship, - name_for_collection_relationship=name_for_collection_relationship, - generate_relationship=generate_relationship): - """Extract mapped classes and relationships from the :class:`.MetaData` and - perform mappings. - - :param engine: an :class:`.Engine` or :class:`.Connection` with which - to perform schema reflection, if specified. - If the :paramref:`.AutomapBase.prepare.reflect` argument is False, - this object is not used. - - :param reflect: if True, the :meth:`.MetaData.reflect` method is called - on the :class:`.MetaData` associated with this :class:`.AutomapBase`. - The :class:`.Engine` passed via - :paramref:`.AutomapBase.prepare.engine` will be used to perform the - reflection if present; else, the :class:`.MetaData` should already be - bound to some engine else the operation will fail. - - :param classname_for_table: callable function which will be used to - produce new class names, given a table name. Defaults to - :func:`.classname_for_table`. - - :param name_for_scalar_relationship: callable function which will be - used to produce relationship names for scalar relationships. Defaults - to :func:`.name_for_scalar_relationship`. - - :param name_for_collection_relationship: callable function which will - be used to produce relationship names for collection-oriented - relationships. Defaults to :func:`.name_for_collection_relationship`. - - :param generate_relationship: callable function which will be used to - actually generate :func:`.relationship` and :func:`.backref` - constructs. Defaults to :func:`.generate_relationship`. - - :param collection_class: the Python collection class that will be used - when a new :func:`.relationship` object is created that represents a - collection. Defaults to ``list``. - - """ - if reflect: - cls.metadata.reflect( - engine, - extend_existing=True, - autoload_replace=False - ) - - table_to_map_config = dict( - (m.local_table, m) - for m in _DeferredMapperConfig. - classes_for_base(cls, sort=False) - ) - - many_to_many = [] - - for table in cls.metadata.tables.values(): - lcl_m2m, rem_m2m, m2m_const = _is_many_to_many(cls, table) - if lcl_m2m is not None: - many_to_many.append((lcl_m2m, rem_m2m, m2m_const, table)) - elif not table.primary_key: - continue - elif table not in table_to_map_config: - mapped_cls = type( - classname_for_table(cls, table.name, table), - (cls, ), - {"__table__": table} - ) - map_config = _DeferredMapperConfig.config_for_cls(mapped_cls) - cls.classes[map_config.cls.__name__] = mapped_cls - table_to_map_config[table] = map_config - - for map_config in table_to_map_config.values(): - _relationships_for_fks(cls, - map_config, - table_to_map_config, - collection_class, - name_for_scalar_relationship, - name_for_collection_relationship, - generate_relationship) - - for lcl_m2m, rem_m2m, m2m_const, table in many_to_many: - _m2m_relationship(cls, lcl_m2m, rem_m2m, m2m_const, table, - table_to_map_config, - collection_class, - name_for_scalar_relationship, - name_for_collection_relationship, - generate_relationship) - - for map_config in _DeferredMapperConfig.classes_for_base(cls): - map_config.map() - - _sa_decl_prepare = True - """Indicate that the mapping of classes should be deferred. - - The presence of this attribute name indicates to declarative - that the call to mapper() should not occur immediately; instead, - information about the table and attributes to be mapped are gathered - into an internal structure called _DeferredMapperConfig. These - objects can be collected later using classes_for_base(), additional - mapping decisions can be made, and then the map() method will actually - apply the mapping. - - The only real reason this deferral of the whole - thing is needed is to support primary key columns that aren't reflected - yet when the class is declared; everything else can theoretically be - added to the mapper later. However, the _DeferredMapperConfig is a - nice interface in any case which exists at that not usually exposed point - at which declarative has the class and the Table but hasn't called - mapper() yet. - - """ - - -def automap_base(declarative_base=None, **kw): - """Produce a declarative automap base. - - This function produces a new base class that is a product of the - :class:`.AutomapBase` class as well a declarative base produced by - :func:`.declarative.declarative_base`. - - All parameters other than ``declarative_base`` are keyword arguments - that are passed directly to the :func:`.declarative.declarative_base` - function. - - :param declarative_base: an existing class produced by - :func:`.declarative.declarative_base`. When this is passed, the function - no longer invokes :func:`.declarative.declarative_base` itself, and all - other keyword arguments are ignored. - - :param \**kw: keyword arguments are passed along to - :func:`.declarative.declarative_base`. - - """ - if declarative_base is None: - Base = _declarative_base(**kw) - else: - Base = declarative_base - - return type( - Base.__name__, - (AutomapBase, Base,), - {"__abstract__": True, "classes": util.Properties({})} - ) - - -def _is_many_to_many(automap_base, table): - fk_constraints = [const for const in table.constraints - if isinstance(const, ForeignKeyConstraint)] - if len(fk_constraints) != 2: - return None, None, None - - cols = sum( - [[fk.parent for fk in fk_constraint.elements] - for fk_constraint in fk_constraints], []) - - if set(cols) != set(table.c): - return None, None, None - - return ( - fk_constraints[0].elements[0].column.table, - fk_constraints[1].elements[0].column.table, - fk_constraints - ) - - -def _relationships_for_fks(automap_base, map_config, table_to_map_config, - collection_class, - name_for_scalar_relationship, - name_for_collection_relationship, - generate_relationship): - local_table = map_config.local_table - local_cls = map_config.cls - - if local_table is None: - return - for constraint in local_table.constraints: - if isinstance(constraint, ForeignKeyConstraint): - fks = constraint.elements - referred_table = fks[0].column.table - referred_cfg = table_to_map_config.get(referred_table, None) - if referred_cfg is None: - continue - referred_cls = referred_cfg.cls - - if local_cls is not referred_cls and issubclass( - local_cls, referred_cls): - continue - - relationship_name = name_for_scalar_relationship( - automap_base, - local_cls, - referred_cls, constraint) - backref_name = name_for_collection_relationship( - automap_base, - referred_cls, - local_cls, - constraint - ) - - o2m_kws = {} - nullable = False not in set([fk.parent.nullable for fk in fks]) - if not nullable: - o2m_kws['cascade'] = "all, delete-orphan" - - if constraint.ondelete and \ - constraint.ondelete.lower() == "cascade": - o2m_kws['passive_deletes'] = True - else: - if constraint.ondelete and \ - constraint.ondelete.lower() == "set null": - o2m_kws['passive_deletes'] = True - - create_backref = backref_name not in referred_cfg.properties - - if relationship_name not in map_config.properties: - if create_backref: - backref_obj = generate_relationship( - automap_base, - interfaces.ONETOMANY, backref, - backref_name, referred_cls, local_cls, - collection_class=collection_class, - **o2m_kws) - else: - backref_obj = None - rel = generate_relationship(automap_base, - interfaces.MANYTOONE, - relationship, - relationship_name, - local_cls, referred_cls, - foreign_keys=[ - fk.parent - for fk in constraint.elements], - backref=backref_obj, - remote_side=[ - fk.column - for fk in constraint.elements] - ) - if rel is not None: - map_config.properties[relationship_name] = rel - if not create_backref: - referred_cfg.properties[ - backref_name].back_populates = relationship_name - elif create_backref: - rel = generate_relationship(automap_base, - interfaces.ONETOMANY, - relationship, - backref_name, - referred_cls, local_cls, - foreign_keys=[ - fk.parent - for fk in constraint.elements], - back_populates=relationship_name, - collection_class=collection_class, - **o2m_kws) - if rel is not None: - referred_cfg.properties[backref_name] = rel - map_config.properties[ - relationship_name].back_populates = backref_name - - -def _m2m_relationship(automap_base, lcl_m2m, rem_m2m, m2m_const, table, - table_to_map_config, - collection_class, - name_for_scalar_relationship, - name_for_collection_relationship, - generate_relationship): - - map_config = table_to_map_config.get(lcl_m2m, None) - referred_cfg = table_to_map_config.get(rem_m2m, None) - if map_config is None or referred_cfg is None: - return - - local_cls = map_config.cls - referred_cls = referred_cfg.cls - - relationship_name = name_for_collection_relationship( - automap_base, - local_cls, - referred_cls, m2m_const[0]) - backref_name = name_for_collection_relationship( - automap_base, - referred_cls, - local_cls, - m2m_const[1] - ) - - create_backref = backref_name not in referred_cfg.properties - - if relationship_name not in map_config.properties: - if create_backref: - backref_obj = generate_relationship( - automap_base, - interfaces.MANYTOMANY, - backref, - backref_name, - referred_cls, local_cls, - collection_class=collection_class - ) - else: - backref_obj = None - rel = generate_relationship(automap_base, - interfaces.MANYTOMANY, - relationship, - relationship_name, - local_cls, referred_cls, - secondary=table, - primaryjoin=and_( - fk.column == fk.parent - for fk in m2m_const[0].elements), - secondaryjoin=and_( - fk.column == fk.parent - for fk in m2m_const[1].elements), - backref=backref_obj, - collection_class=collection_class - ) - if rel is not None: - map_config.properties[relationship_name] = rel - - if not create_backref: - referred_cfg.properties[ - backref_name].back_populates = relationship_name - elif create_backref: - rel = generate_relationship(automap_base, - interfaces.MANYTOMANY, - relationship, - backref_name, - referred_cls, local_cls, - secondary=table, - primaryjoin=and_( - fk.column == fk.parent - for fk in m2m_const[1].elements), - secondaryjoin=and_( - fk.column == fk.parent - for fk in m2m_const[0].elements), - back_populates=relationship_name, - collection_class=collection_class) - if rel is not None: - referred_cfg.properties[backref_name] = rel - map_config.properties[ - relationship_name].back_populates = backref_name diff --git a/python/sqlalchemy/ext/baked.py b/python/sqlalchemy/ext/baked.py deleted file mode 100644 index d8c8843f..00000000 --- a/python/sqlalchemy/ext/baked.py +++ /dev/null @@ -1,516 +0,0 @@ -# sqlalchemy/ext/baked.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php -"""Baked query extension. - -Provides a creational pattern for the :class:`.query.Query` object which -allows the fully constructed object, Core select statement, and string -compiled result to be fully cached. - - -""" - -from ..orm.query import Query -from ..orm import strategies, attributes, properties, \ - strategy_options, util as orm_util, interfaces -from .. import log as sqla_log -from ..sql import util as sql_util -from ..orm import exc as orm_exc -from .. import exc as sa_exc -from .. import util - -import copy -import logging - -log = logging.getLogger(__name__) - - -class BakedQuery(object): - """A builder object for :class:`.query.Query` objects.""" - - __slots__ = 'steps', '_bakery', '_cache_key', '_spoiled' - - def __init__(self, bakery, initial_fn, args=()): - self._cache_key = () - self._update_cache_key(initial_fn, args) - self.steps = [initial_fn] - self._spoiled = False - self._bakery = bakery - - @classmethod - def bakery(cls, size=200): - """Construct a new bakery.""" - - _bakery = util.LRUCache(size) - - def call(initial_fn, *args): - return cls(_bakery, initial_fn, args) - - return call - - def _clone(self): - b1 = BakedQuery.__new__(BakedQuery) - b1._cache_key = self._cache_key - b1.steps = list(self.steps) - b1._bakery = self._bakery - b1._spoiled = self._spoiled - return b1 - - def _update_cache_key(self, fn, args=()): - self._cache_key += (fn.__code__,) + args - - def __iadd__(self, other): - if isinstance(other, tuple): - self.add_criteria(*other) - else: - self.add_criteria(other) - return self - - def __add__(self, other): - if isinstance(other, tuple): - return self.with_criteria(*other) - else: - return self.with_criteria(other) - - def add_criteria(self, fn, *args): - """Add a criteria function to this :class:`.BakedQuery`. - - This is equivalent to using the ``+=`` operator to - modify a :class:`.BakedQuery` in-place. - - """ - self._update_cache_key(fn, args) - self.steps.append(fn) - return self - - def with_criteria(self, fn, *args): - """Add a criteria function to a :class:`.BakedQuery` cloned from this one. - - This is equivalent to using the ``+`` operator to - produce a new :class:`.BakedQuery` with modifications. - - """ - return self._clone().add_criteria(fn, *args) - - def for_session(self, session): - """Return a :class:`.Result` object for this :class:`.BakedQuery`. - - This is equivalent to calling the :class:`.BakedQuery` as a - Python callable, e.g. ``result = my_baked_query(session)``. - - """ - return Result(self, session) - - def __call__(self, session): - return self.for_session(session) - - def spoil(self, full=False): - """Cancel any query caching that will occur on this BakedQuery object. - - The BakedQuery can continue to be used normally, however additional - creational functions will not be cached; they will be called - on every invocation. - - This is to support the case where a particular step in constructing - a baked query disqualifies the query from being cacheable, such - as a variant that relies upon some uncacheable value. - - :param full: if False, only functions added to this - :class:`.BakedQuery` object subsequent to the spoil step will be - non-cached; the state of the :class:`.BakedQuery` up until - this point will be pulled from the cache. If True, then the - entire :class:`.Query` object is built from scratch each - time, with all creational functions being called on each - invocation. - - """ - if not full: - _spoil_point = self._clone() - _spoil_point._cache_key += ('_query_only', ) - self.steps = [_spoil_point._retrieve_baked_query] - self._spoiled = True - return self - - def _retrieve_baked_query(self, session): - query = self._bakery.get(self._cache_key, None) - if query is None: - query = self._as_query(session) - self._bakery[self._cache_key] = query.with_session(None) - return query.with_session(session) - - def _bake(self, session): - query = self._as_query(session) - - context = query._compile_context() - self._bake_subquery_loaders(session, context) - context.session = None - context.query = query = context.query.with_session(None) - query._execution_options = query._execution_options.union( - {"compiled_cache": self._bakery} - ) - # we'll be holding onto the query for some of its state, - # so delete some compilation-use-only attributes that can take up - # space - for attr in ( - '_correlate', '_from_obj', '_mapper_adapter_map', - '_joinpath', '_joinpoint'): - query.__dict__.pop(attr, None) - self._bakery[self._cache_key] = context - return context - - def _as_query(self, session): - query = self.steps[0](session) - - for step in self.steps[1:]: - query = step(query) - return query - - def _bake_subquery_loaders(self, session, context): - """convert subquery eager loaders in the cache into baked queries. - - For subquery eager loading to work, all we need here is that the - Query point to the correct session when it is run. However, since - we are "baking" anyway, we may as well also turn the query into - a "baked" query so that we save on performance too. - - """ - context.attributes['baked_queries'] = baked_queries = [] - for k, v in list(context.attributes.items()): - if isinstance(v, Query): - if 'subquery' in k: - bk = BakedQuery(self._bakery, lambda *args: v) - bk._cache_key = self._cache_key + k - bk._bake(session) - baked_queries.append((k, bk._cache_key, v)) - del context.attributes[k] - - def _unbake_subquery_loaders(self, session, context, params): - """Retrieve subquery eager loaders stored by _bake_subquery_loaders - and turn them back into Result objects that will iterate just - like a Query object. - - """ - for k, cache_key, query in context.attributes["baked_queries"]: - bk = BakedQuery(self._bakery, lambda sess: query.with_session(sess)) - bk._cache_key = cache_key - context.attributes[k] = bk.for_session(session).params(**params) - - -class Result(object): - """Invokes a :class:`.BakedQuery` against a :class:`.Session`. - - The :class:`.Result` object is where the actual :class:`.query.Query` - object gets created, or retrieved from the cache, - against a target :class:`.Session`, and is then invoked for results. - - """ - __slots__ = 'bq', 'session', '_params' - - def __init__(self, bq, session): - self.bq = bq - self.session = session - self._params = {} - - def params(self, *args, **kw): - """Specify parameters to be replaced into the string SQL statement.""" - - if len(args) == 1: - kw.update(args[0]) - elif len(args) > 0: - raise sa_exc.ArgumentError( - "params() takes zero or one positional argument, " - "which is a dictionary.") - self._params.update(kw) - return self - - def _as_query(self): - return self.bq._as_query(self.session).params(self._params) - - def __str__(self): - return str(self._as_query()) - - def __iter__(self): - bq = self.bq - if bq._spoiled: - return iter(self._as_query()) - - baked_context = bq._bakery.get(bq._cache_key, None) - if baked_context is None: - baked_context = bq._bake(self.session) - - context = copy.copy(baked_context) - context.session = self.session - context.attributes = context.attributes.copy() - - bq._unbake_subquery_loaders(self.session, context, self._params) - - context.statement.use_labels = True - if context.autoflush and not context.populate_existing: - self.session._autoflush() - return context.query.params(self._params).\ - with_session(self.session)._execute_and_instances(context) - - def first(self): - """Return the first row. - - Equivalent to :meth:`.Query.first`. - - """ - bq = self.bq.with_criteria(lambda q: q.slice(0, 1)) - ret = list(bq.for_session(self.session).params(self._params)) - if len(ret) > 0: - return ret[0] - else: - return None - - def one(self): - """Return exactly one result or raise an exception. - - Equivalent to :meth:`.Query.one`. - - """ - ret = list(self) - - l = len(ret) - if l == 1: - return ret[0] - elif l == 0: - raise orm_exc.NoResultFound("No row was found for one()") - else: - raise orm_exc.MultipleResultsFound( - "Multiple rows were found for one()") - - def one_or_none(self): - """Return one or zero results, or raise an exception for multiple - rows. - - Equivalent to :meth:`.Query.one_or_none`. - - .. versionadded:: 1.0.9 - - """ - ret = list(self) - - l = len(ret) - if l == 1: - return ret[0] - elif l == 0: - return None - else: - raise orm_exc.MultipleResultsFound( - "Multiple rows were found for one_or_none()") - - def all(self): - """Return all rows. - - Equivalent to :meth:`.Query.all`. - - """ - return list(self) - - def get(self, ident): - """Retrieve an object based on identity. - - Equivalent to :meth:`.Query.get`. - - """ - - query = self.bq.steps[0](self.session) - return query._get_impl(ident, self._load_on_ident) - - def _load_on_ident(self, query, key): - """Load the given identity key from the database.""" - - ident = key[1] - - mapper = query._mapper_zero() - - _get_clause, _get_params = mapper._get_clause - - def setup(query): - _lcl_get_clause = _get_clause - q = query._clone() - q._get_condition() - q._order_by = None - - # None present in ident - turn those comparisons - # into "IS NULL" - if None in ident: - nones = set([ - _get_params[col].key for col, value in - zip(mapper.primary_key, ident) if value is None - ]) - _lcl_get_clause = sql_util.adapt_criterion_to_null( - _lcl_get_clause, nones) - - _lcl_get_clause = q._adapt_clause(_lcl_get_clause, True, False) - q._criterion = _lcl_get_clause - return q - - # cache the query against a key that includes - # which positions in the primary key are NULL - # (remember, we can map to an OUTER JOIN) - bq = self.bq - - bq = bq.with_criteria(setup, tuple(elem is None for elem in ident)) - - params = dict([ - (_get_params[primary_key].key, id_val) - for id_val, primary_key in zip(ident, mapper.primary_key) - ]) - - result = list(bq.for_session(self.session).params(**params)) - l = len(result) - if l > 1: - raise orm_exc.MultipleResultsFound() - elif l: - return result[0] - else: - return None - - -def bake_lazy_loaders(): - """Enable the use of baked queries for all lazyloaders systemwide. - - This operation should be safe for all lazy loaders, and will reduce - Python overhead for these operations. - - """ - strategies.LazyLoader._strategy_keys[:] = [] - BakedLazyLoader._strategy_keys[:] = [] - - properties.RelationshipProperty.strategy_for( - lazy="select")(BakedLazyLoader) - properties.RelationshipProperty.strategy_for( - lazy=True)(BakedLazyLoader) - properties.RelationshipProperty.strategy_for( - lazy="baked_select")(BakedLazyLoader) - - -def unbake_lazy_loaders(): - """Disable the use of baked queries for all lazyloaders systemwide. - - This operation reverts the changes produced by :func:`.bake_lazy_loaders`. - - """ - strategies.LazyLoader._strategy_keys[:] = [] - BakedLazyLoader._strategy_keys[:] = [] - - properties.RelationshipProperty.strategy_for( - lazy="select")(strategies.LazyLoader) - properties.RelationshipProperty.strategy_for( - lazy=True)(strategies.LazyLoader) - properties.RelationshipProperty.strategy_for( - lazy="baked_select")(BakedLazyLoader) - assert strategies.LazyLoader._strategy_keys - - -@sqla_log.class_logger -@properties.RelationshipProperty.strategy_for(lazy="baked_select") -class BakedLazyLoader(strategies.LazyLoader): - - def _emit_lazyload(self, session, state, ident_key, passive): - q = BakedQuery( - self.mapper._compiled_cache, - lambda session: session.query(self.mapper)) - q.add_criteria( - lambda q: q._adapt_all_clauses()._with_invoke_all_eagers(False), - self.parent_property) - - if not self.parent_property.bake_queries: - q.spoil(full=True) - - if self.parent_property.secondary is not None: - q.add_criteria( - lambda q: - q.select_from(self.mapper, self.parent_property.secondary)) - - pending = not state.key - - # don't autoflush on pending - if pending or passive & attributes.NO_AUTOFLUSH: - q.add_criteria(lambda q: q.autoflush(False)) - - if state.load_path: - q.spoil() - q.add_criteria( - lambda q: - q._with_current_path(state.load_path[self.parent_property])) - - if state.load_options: - q.spoil() - q.add_criteria( - lambda q: q._conditional_options(*state.load_options)) - - if self.use_get: - return q(session)._load_on_ident( - session.query(self.mapper), ident_key) - - if self.parent_property.order_by: - q.add_criteria( - lambda q: - q.order_by(*util.to_list(self.parent_property.order_by))) - - for rev in self.parent_property._reverse_property: - # reverse props that are MANYTOONE are loading *this* - # object from get(), so don't need to eager out to those. - if rev.direction is interfaces.MANYTOONE and \ - rev._use_get and \ - not isinstance(rev.strategy, strategies.LazyLoader): - q.add_criteria( - lambda q: - q.options( - strategy_options.Load( - rev.parent).baked_lazyload(rev.key))) - - lazy_clause, params = self._generate_lazy_clause(state, passive) - - if pending: - if orm_util._none_set.intersection(params.values()): - return None - - q.add_criteria(lambda q: q.filter(lazy_clause)) - result = q(session).params(**params).all() - if self.uselist: - return result - else: - l = len(result) - if l: - if l > 1: - util.warn( - "Multiple rows returned with " - "uselist=False for lazily-loaded attribute '%s' " - % self.parent_property) - - return result[0] - else: - return None - - -@strategy_options.loader_option() -def baked_lazyload(loadopt, attr): - """Indicate that the given attribute should be loaded using "lazy" - loading with a "baked" query used in the load. - - """ - return loadopt.set_relationship_strategy(attr, {"lazy": "baked_select"}) - - -@baked_lazyload._add_unbound_fn -def baked_lazyload(*keys): - return strategy_options._UnboundLoad._from_keys( - strategy_options._UnboundLoad.baked_lazyload, keys, False, {}) - - -@baked_lazyload._add_unbound_all_fn -def baked_lazyload_all(*keys): - return strategy_options._UnboundLoad._from_keys( - strategy_options._UnboundLoad.baked_lazyload, keys, True, {}) - -baked_lazyload = baked_lazyload._unbound_fn -baked_lazyload_all = baked_lazyload_all._unbound_all_fn - -bakery = BakedQuery.bakery diff --git a/python/sqlalchemy/ext/compiler.py b/python/sqlalchemy/ext/compiler.py deleted file mode 100644 index 9717e41c..00000000 --- a/python/sqlalchemy/ext/compiler.py +++ /dev/null @@ -1,451 +0,0 @@ -# ext/compiler.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Provides an API for creation of custom ClauseElements and compilers. - -Synopsis -======== - -Usage involves the creation of one or more -:class:`~sqlalchemy.sql.expression.ClauseElement` subclasses and one or -more callables defining its compilation:: - - from sqlalchemy.ext.compiler import compiles - from sqlalchemy.sql.expression import ColumnClause - - class MyColumn(ColumnClause): - pass - - @compiles(MyColumn) - def compile_mycolumn(element, compiler, **kw): - return "[%s]" % element.name - -Above, ``MyColumn`` extends :class:`~sqlalchemy.sql.expression.ColumnClause`, -the base expression element for named column objects. The ``compiles`` -decorator registers itself with the ``MyColumn`` class so that it is invoked -when the object is compiled to a string:: - - from sqlalchemy import select - - s = select([MyColumn('x'), MyColumn('y')]) - print str(s) - -Produces:: - - SELECT [x], [y] - -Dialect-specific compilation rules -================================== - -Compilers can also be made dialect-specific. The appropriate compiler will be -invoked for the dialect in use:: - - from sqlalchemy.schema import DDLElement - - class AlterColumn(DDLElement): - - def __init__(self, column, cmd): - self.column = column - self.cmd = cmd - - @compiles(AlterColumn) - def visit_alter_column(element, compiler, **kw): - return "ALTER COLUMN %s ..." % element.column.name - - @compiles(AlterColumn, 'postgresql') - def visit_alter_column(element, compiler, **kw): - return "ALTER TABLE %s ALTER COLUMN %s ..." % (element.table.name, - element.column.name) - -The second ``visit_alter_table`` will be invoked when any ``postgresql`` -dialect is used. - -Compiling sub-elements of a custom expression construct -======================================================= - -The ``compiler`` argument is the -:class:`~sqlalchemy.engine.interfaces.Compiled` object in use. This object -can be inspected for any information about the in-progress compilation, -including ``compiler.dialect``, ``compiler.statement`` etc. The -:class:`~sqlalchemy.sql.compiler.SQLCompiler` and -:class:`~sqlalchemy.sql.compiler.DDLCompiler` both include a ``process()`` -method which can be used for compilation of embedded attributes:: - - from sqlalchemy.sql.expression import Executable, ClauseElement - - class InsertFromSelect(Executable, ClauseElement): - def __init__(self, table, select): - self.table = table - self.select = select - - @compiles(InsertFromSelect) - def visit_insert_from_select(element, compiler, **kw): - return "INSERT INTO %s (%s)" % ( - compiler.process(element.table, asfrom=True), - compiler.process(element.select) - ) - - insert = InsertFromSelect(t1, select([t1]).where(t1.c.x>5)) - print insert - -Produces:: - - "INSERT INTO mytable (SELECT mytable.x, mytable.y, mytable.z - FROM mytable WHERE mytable.x > :x_1)" - -.. note:: - - The above ``InsertFromSelect`` construct is only an example, this actual - functionality is already available using the - :meth:`.Insert.from_select` method. - -.. note:: - - The above ``InsertFromSelect`` construct probably wants to have "autocommit" - enabled. See :ref:`enabling_compiled_autocommit` for this step. - -Cross Compiling between SQL and DDL compilers ---------------------------------------------- - -SQL and DDL constructs are each compiled using different base compilers - -``SQLCompiler`` and ``DDLCompiler``. A common need is to access the -compilation rules of SQL expressions from within a DDL expression. The -``DDLCompiler`` includes an accessor ``sql_compiler`` for this reason, such as -below where we generate a CHECK constraint that embeds a SQL expression:: - - @compiles(MyConstraint) - def compile_my_constraint(constraint, ddlcompiler, **kw): - return "CONSTRAINT %s CHECK (%s)" % ( - constraint.name, - ddlcompiler.sql_compiler.process(constraint.expression) - ) - -.. _enabling_compiled_autocommit: - -Enabling Autocommit on a Construct -================================== - -Recall from the section :ref:`autocommit` that the :class:`.Engine`, when -asked to execute a construct in the absence of a user-defined transaction, -detects if the given construct represents DML or DDL, that is, a data -modification or data definition statement, which requires (or may require, -in the case of DDL) that the transaction generated by the DBAPI be committed -(recall that DBAPI always has a transaction going on regardless of what -SQLAlchemy does). Checking for this is actually accomplished by checking for -the "autocommit" execution option on the construct. When building a -construct like an INSERT derivation, a new DDL type, or perhaps a stored -procedure that alters data, the "autocommit" option needs to be set in order -for the statement to function with "connectionless" execution -(as described in :ref:`dbengine_implicit`). - -Currently a quick way to do this is to subclass :class:`.Executable`, then -add the "autocommit" flag to the ``_execution_options`` dictionary (note this -is a "frozen" dictionary which supplies a generative ``union()`` method):: - - from sqlalchemy.sql.expression import Executable, ClauseElement - - class MyInsertThing(Executable, ClauseElement): - _execution_options = \\ - Executable._execution_options.union({'autocommit': True}) - -More succinctly, if the construct is truly similar to an INSERT, UPDATE, or -DELETE, :class:`.UpdateBase` can be used, which already is a subclass -of :class:`.Executable`, :class:`.ClauseElement` and includes the -``autocommit`` flag:: - - from sqlalchemy.sql.expression import UpdateBase - - class MyInsertThing(UpdateBase): - def __init__(self, ...): - ... - - - - -DDL elements that subclass :class:`.DDLElement` already have the -"autocommit" flag turned on. - - - - -Changing the default compilation of existing constructs -======================================================= - -The compiler extension applies just as well to the existing constructs. When -overriding the compilation of a built in SQL construct, the @compiles -decorator is invoked upon the appropriate class (be sure to use the class, -i.e. ``Insert`` or ``Select``, instead of the creation function such -as ``insert()`` or ``select()``). - -Within the new compilation function, to get at the "original" compilation -routine, use the appropriate visit_XXX method - this -because compiler.process() will call upon the overriding routine and cause -an endless loop. Such as, to add "prefix" to all insert statements:: - - from sqlalchemy.sql.expression import Insert - - @compiles(Insert) - def prefix_inserts(insert, compiler, **kw): - return compiler.visit_insert(insert.prefix_with("some prefix"), **kw) - -The above compiler will prefix all INSERT statements with "some prefix" when -compiled. - -.. _type_compilation_extension: - -Changing Compilation of Types -============================= - -``compiler`` works for types, too, such as below where we implement the -MS-SQL specific 'max' keyword for ``String``/``VARCHAR``:: - - @compiles(String, 'mssql') - @compiles(VARCHAR, 'mssql') - def compile_varchar(element, compiler, **kw): - if element.length == 'max': - return "VARCHAR('max')" - else: - return compiler.visit_VARCHAR(element, **kw) - - foo = Table('foo', metadata, - Column('data', VARCHAR('max')) - ) - -Subclassing Guidelines -====================== - -A big part of using the compiler extension is subclassing SQLAlchemy -expression constructs. To make this easier, the expression and -schema packages feature a set of "bases" intended for common tasks. -A synopsis is as follows: - -* :class:`~sqlalchemy.sql.expression.ClauseElement` - This is the root - expression class. Any SQL expression can be derived from this base, and is - probably the best choice for longer constructs such as specialized INSERT - statements. - -* :class:`~sqlalchemy.sql.expression.ColumnElement` - The root of all - "column-like" elements. Anything that you'd place in the "columns" clause of - a SELECT statement (as well as order by and group by) can derive from this - - the object will automatically have Python "comparison" behavior. - - :class:`~sqlalchemy.sql.expression.ColumnElement` classes want to have a - ``type`` member which is expression's return type. This can be established - at the instance level in the constructor, or at the class level if its - generally constant:: - - class timestamp(ColumnElement): - type = TIMESTAMP() - -* :class:`~sqlalchemy.sql.functions.FunctionElement` - This is a hybrid of a - ``ColumnElement`` and a "from clause" like object, and represents a SQL - function or stored procedure type of call. Since most databases support - statements along the line of "SELECT FROM " - ``FunctionElement`` adds in the ability to be used in the FROM clause of a - ``select()`` construct:: - - from sqlalchemy.sql.expression import FunctionElement - - class coalesce(FunctionElement): - name = 'coalesce' - - @compiles(coalesce) - def compile(element, compiler, **kw): - return "coalesce(%s)" % compiler.process(element.clauses) - - @compiles(coalesce, 'oracle') - def compile(element, compiler, **kw): - if len(element.clauses) > 2: - raise TypeError("coalesce only supports two arguments on Oracle") - return "nvl(%s)" % compiler.process(element.clauses) - -* :class:`~sqlalchemy.schema.DDLElement` - The root of all DDL expressions, - like CREATE TABLE, ALTER TABLE, etc. Compilation of ``DDLElement`` - subclasses is issued by a ``DDLCompiler`` instead of a ``SQLCompiler``. - ``DDLElement`` also features ``Table`` and ``MetaData`` event hooks via the - ``execute_at()`` method, allowing the construct to be invoked during CREATE - TABLE and DROP TABLE sequences. - -* :class:`~sqlalchemy.sql.expression.Executable` - This is a mixin which - should be used with any expression class that represents a "standalone" - SQL statement that can be passed directly to an ``execute()`` method. It - is already implicit within ``DDLElement`` and ``FunctionElement``. - -Further Examples -================ - -"UTC timestamp" function -------------------------- - -A function that works like "CURRENT_TIMESTAMP" except applies the -appropriate conversions so that the time is in UTC time. Timestamps are best -stored in relational databases as UTC, without time zones. UTC so that your -database doesn't think time has gone backwards in the hour when daylight -savings ends, without timezones because timezones are like character -encodings - they're best applied only at the endpoints of an application -(i.e. convert to UTC upon user input, re-apply desired timezone upon display). - -For Postgresql and Microsoft SQL Server:: - - from sqlalchemy.sql import expression - from sqlalchemy.ext.compiler import compiles - from sqlalchemy.types import DateTime - - class utcnow(expression.FunctionElement): - type = DateTime() - - @compiles(utcnow, 'postgresql') - def pg_utcnow(element, compiler, **kw): - return "TIMEZONE('utc', CURRENT_TIMESTAMP)" - - @compiles(utcnow, 'mssql') - def ms_utcnow(element, compiler, **kw): - return "GETUTCDATE()" - -Example usage:: - - from sqlalchemy import ( - Table, Column, Integer, String, DateTime, MetaData - ) - metadata = MetaData() - event = Table("event", metadata, - Column("id", Integer, primary_key=True), - Column("description", String(50), nullable=False), - Column("timestamp", DateTime, server_default=utcnow()) - ) - -"GREATEST" function -------------------- - -The "GREATEST" function is given any number of arguments and returns the one -that is of the highest value - its equivalent to Python's ``max`` -function. A SQL standard version versus a CASE based version which only -accommodates two arguments:: - - from sqlalchemy.sql import expression - from sqlalchemy.ext.compiler import compiles - from sqlalchemy.types import Numeric - - class greatest(expression.FunctionElement): - type = Numeric() - name = 'greatest' - - @compiles(greatest) - def default_greatest(element, compiler, **kw): - return compiler.visit_function(element) - - @compiles(greatest, 'sqlite') - @compiles(greatest, 'mssql') - @compiles(greatest, 'oracle') - def case_greatest(element, compiler, **kw): - arg1, arg2 = list(element.clauses) - return "CASE WHEN %s > %s THEN %s ELSE %s END" % ( - compiler.process(arg1), - compiler.process(arg2), - compiler.process(arg1), - compiler.process(arg2), - ) - -Example usage:: - - Session.query(Account).\\ - filter( - greatest( - Account.checking_balance, - Account.savings_balance) > 10000 - ) - -"false" expression ------------------- - -Render a "false" constant expression, rendering as "0" on platforms that -don't have a "false" constant:: - - from sqlalchemy.sql import expression - from sqlalchemy.ext.compiler import compiles - - class sql_false(expression.ColumnElement): - pass - - @compiles(sql_false) - def default_false(element, compiler, **kw): - return "false" - - @compiles(sql_false, 'mssql') - @compiles(sql_false, 'mysql') - @compiles(sql_false, 'oracle') - def int_false(element, compiler, **kw): - return "0" - -Example usage:: - - from sqlalchemy import select, union_all - - exp = union_all( - select([users.c.name, sql_false().label("enrolled")]), - select([customers.c.name, customers.c.enrolled]) - ) - -""" -from .. import exc -from ..sql import visitors - - -def compiles(class_, *specs): - """Register a function as a compiler for a - given :class:`.ClauseElement` type.""" - - def decorate(fn): - existing = class_.__dict__.get('_compiler_dispatcher', None) - existing_dispatch = class_.__dict__.get('_compiler_dispatch') - if not existing: - existing = _dispatcher() - - if existing_dispatch: - existing.specs['default'] = existing_dispatch - - # TODO: why is the lambda needed ? - setattr(class_, '_compiler_dispatch', - lambda *arg, **kw: existing(*arg, **kw)) - setattr(class_, '_compiler_dispatcher', existing) - - if specs: - for s in specs: - existing.specs[s] = fn - - else: - existing.specs['default'] = fn - return fn - return decorate - - -def deregister(class_): - """Remove all custom compilers associated with a given - :class:`.ClauseElement` type.""" - - if hasattr(class_, '_compiler_dispatcher'): - # regenerate default _compiler_dispatch - visitors._generate_dispatch(class_) - # remove custom directive - del class_._compiler_dispatcher - - -class _dispatcher(object): - def __init__(self): - self.specs = {} - - def __call__(self, element, compiler, **kw): - # TODO: yes, this could also switch off of DBAPI in use. - fn = self.specs.get(compiler.dialect.name, None) - if not fn: - try: - fn = self.specs['default'] - except KeyError: - raise exc.CompileError( - "%s construct has no default " - "compilation handler." % type(element)) - return fn(element, compiler, **kw) diff --git a/python/sqlalchemy/ext/declarative/__init__.py b/python/sqlalchemy/ext/declarative/__init__.py deleted file mode 100644 index f703000b..00000000 --- a/python/sqlalchemy/ext/declarative/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# ext/declarative/__init__.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -from .api import declarative_base, synonym_for, comparable_using, \ - instrument_declarative, ConcreteBase, AbstractConcreteBase, \ - DeclarativeMeta, DeferredReflection, has_inherited_table,\ - declared_attr, as_declarative - - -__all__ = ['declarative_base', 'synonym_for', 'has_inherited_table', - 'comparable_using', 'instrument_declarative', 'declared_attr', - 'as_declarative', - 'ConcreteBase', 'AbstractConcreteBase', 'DeclarativeMeta', - 'DeferredReflection'] diff --git a/python/sqlalchemy/ext/declarative/api.py b/python/sqlalchemy/ext/declarative/api.py deleted file mode 100644 index dfc47ce9..00000000 --- a/python/sqlalchemy/ext/declarative/api.py +++ /dev/null @@ -1,671 +0,0 @@ -# ext/declarative/api.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php -"""Public API functions and helpers for declarative.""" - - -from ...schema import Table, MetaData, Column -from ...orm import synonym as _orm_synonym, \ - comparable_property,\ - interfaces, properties, attributes -from ...orm.util import polymorphic_union -from ...orm.base import _mapper_or_none -from ...util import OrderedDict, hybridmethod, hybridproperty -from ... import util -from ... import exc -import weakref - -from .base import _as_declarative, \ - _declarative_constructor,\ - _DeferredMapperConfig, _add_attribute -from .clsregistry import _class_resolver - - -def instrument_declarative(cls, registry, metadata): - """Given a class, configure the class declaratively, - using the given registry, which can be any dictionary, and - MetaData object. - - """ - if '_decl_class_registry' in cls.__dict__: - raise exc.InvalidRequestError( - "Class %r already has been " - "instrumented declaratively" % cls) - cls._decl_class_registry = registry - cls.metadata = metadata - _as_declarative(cls, cls.__name__, cls.__dict__) - - -def has_inherited_table(cls): - """Given a class, return True if any of the classes it inherits from has a - mapped table, otherwise return False. - """ - for class_ in cls.__mro__[1:]: - if getattr(class_, '__table__', None) is not None: - return True - return False - - -class DeclarativeMeta(type): - def __init__(cls, classname, bases, dict_): - if '_decl_class_registry' not in cls.__dict__: - _as_declarative(cls, classname, cls.__dict__) - type.__init__(cls, classname, bases, dict_) - - def __setattr__(cls, key, value): - _add_attribute(cls, key, value) - - -def synonym_for(name, map_column=False): - """Decorator, make a Python @property a query synonym for a column. - - A decorator version of :func:`~sqlalchemy.orm.synonym`. The function being - decorated is the 'descriptor', otherwise passes its arguments through to - synonym():: - - @synonym_for('col') - @property - def prop(self): - return 'special sauce' - - The regular ``synonym()`` is also usable directly in a declarative setting - and may be convenient for read/write properties:: - - prop = synonym('col', descriptor=property(_read_prop, _write_prop)) - - """ - def decorate(fn): - return _orm_synonym(name, map_column=map_column, descriptor=fn) - return decorate - - -def comparable_using(comparator_factory): - """Decorator, allow a Python @property to be used in query criteria. - - This is a decorator front end to - :func:`~sqlalchemy.orm.comparable_property` that passes - through the comparator_factory and the function being decorated:: - - @comparable_using(MyComparatorType) - @property - def prop(self): - return 'special sauce' - - The regular ``comparable_property()`` is also usable directly in a - declarative setting and may be convenient for read/write properties:: - - prop = comparable_property(MyComparatorType) - - """ - def decorate(fn): - return comparable_property(comparator_factory, fn) - return decorate - - -class declared_attr(interfaces._MappedAttribute, property): - """Mark a class-level method as representing the definition of - a mapped property or special declarative member name. - - @declared_attr turns the attribute into a scalar-like - property that can be invoked from the uninstantiated class. - Declarative treats attributes specifically marked with - @declared_attr as returning a construct that is specific - to mapping or declarative table configuration. The name - of the attribute is that of what the non-dynamic version - of the attribute would be. - - @declared_attr is more often than not applicable to mixins, - to define relationships that are to be applied to different - implementors of the class:: - - class ProvidesUser(object): - "A mixin that adds a 'user' relationship to classes." - - @declared_attr - def user(self): - return relationship("User") - - It also can be applied to mapped classes, such as to provide - a "polymorphic" scheme for inheritance:: - - class Employee(Base): - id = Column(Integer, primary_key=True) - type = Column(String(50), nullable=False) - - @declared_attr - def __tablename__(cls): - return cls.__name__.lower() - - @declared_attr - def __mapper_args__(cls): - if cls.__name__ == 'Employee': - return { - "polymorphic_on":cls.type, - "polymorphic_identity":"Employee" - } - else: - return {"polymorphic_identity":cls.__name__} - - .. versionchanged:: 0.8 :class:`.declared_attr` can be used with - non-ORM or extension attributes, such as user-defined attributes - or :func:`.association_proxy` objects, which will be assigned - to the class at class construction time. - - - """ - - def __init__(self, fget, cascading=False): - super(declared_attr, self).__init__(fget) - self.__doc__ = fget.__doc__ - self._cascading = cascading - - def __get__(desc, self, cls): - reg = cls.__dict__.get('_sa_declared_attr_reg', None) - if reg is None: - manager = attributes.manager_of_class(cls) - if manager is None: - util.warn( - "Unmanaged access of declarative attribute %s from " - "non-mapped class %s" % - (desc.fget.__name__, cls.__name__)) - return desc.fget(cls) - - if reg is None: - return desc.fget(cls) - elif desc in reg: - return reg[desc] - else: - reg[desc] = obj = desc.fget(cls) - return obj - - @hybridmethod - def _stateful(cls, **kw): - return _stateful_declared_attr(**kw) - - @hybridproperty - def cascading(cls): - """Mark a :class:`.declared_attr` as cascading. - - This is a special-use modifier which indicates that a column - or MapperProperty-based declared attribute should be configured - distinctly per mapped subclass, within a mapped-inheritance scenario. - - Below, both MyClass as well as MySubClass will have a distinct - ``id`` Column object established:: - - class HasSomeAttribute(object): - @declared_attr.cascading - def some_id(cls): - if has_inherited_table(cls): - return Column( - ForeignKey('myclass.id'), primary_key=True) - else: - return Column(Integer, primary_key=True) - - return Column('id', Integer, primary_key=True) - - class MyClass(HasSomeAttribute, Base): - "" - # ... - - class MySubClass(MyClass): - "" - # ... - - The behavior of the above configuration is that ``MySubClass`` - will refer to both its own ``id`` column as well as that of - ``MyClass`` underneath the attribute named ``some_id``. - - .. seealso:: - - :ref:`declarative_inheritance` - - :ref:`mixin_inheritance_columns` - - - """ - return cls._stateful(cascading=True) - - -class _stateful_declared_attr(declared_attr): - def __init__(self, **kw): - self.kw = kw - - def _stateful(self, **kw): - new_kw = self.kw.copy() - new_kw.update(kw) - return _stateful_declared_attr(**new_kw) - - def __call__(self, fn): - return declared_attr(fn, **self.kw) - - -def declarative_base(bind=None, metadata=None, mapper=None, cls=object, - name='Base', constructor=_declarative_constructor, - class_registry=None, - metaclass=DeclarativeMeta): - """Construct a base class for declarative class definitions. - - The new base class will be given a metaclass that produces - appropriate :class:`~sqlalchemy.schema.Table` objects and makes - the appropriate :func:`~sqlalchemy.orm.mapper` calls based on the - information provided declaratively in the class and any subclasses - of the class. - - :param bind: An optional - :class:`~sqlalchemy.engine.Connectable`, will be assigned - the ``bind`` attribute on the :class:`~sqlalchemy.schema.MetaData` - instance. - - :param metadata: - An optional :class:`~sqlalchemy.schema.MetaData` instance. All - :class:`~sqlalchemy.schema.Table` objects implicitly declared by - subclasses of the base will share this MetaData. A MetaData instance - will be created if none is provided. The - :class:`~sqlalchemy.schema.MetaData` instance will be available via the - `metadata` attribute of the generated declarative base class. - - :param mapper: - An optional callable, defaults to :func:`~sqlalchemy.orm.mapper`. Will - be used to map subclasses to their Tables. - - :param cls: - Defaults to :class:`object`. A type to use as the base for the generated - declarative base class. May be a class or tuple of classes. - - :param name: - Defaults to ``Base``. The display name for the generated - class. Customizing this is not required, but can improve clarity in - tracebacks and debugging. - - :param constructor: - Defaults to - :func:`~sqlalchemy.ext.declarative._declarative_constructor`, an - __init__ implementation that assigns \**kwargs for declared - fields and relationships to an instance. If ``None`` is supplied, - no __init__ will be provided and construction will fall back to - cls.__init__ by way of the normal Python semantics. - - :param class_registry: optional dictionary that will serve as the - registry of class names-> mapped classes when string names - are used to identify classes inside of :func:`.relationship` - and others. Allows two or more declarative base classes - to share the same registry of class names for simplified - inter-base relationships. - - :param metaclass: - Defaults to :class:`.DeclarativeMeta`. A metaclass or __metaclass__ - compatible callable to use as the meta type of the generated - declarative base class. - - .. seealso:: - - :func:`.as_declarative` - - """ - lcl_metadata = metadata or MetaData() - if bind: - lcl_metadata.bind = bind - - if class_registry is None: - class_registry = weakref.WeakValueDictionary() - - bases = not isinstance(cls, tuple) and (cls,) or cls - class_dict = dict(_decl_class_registry=class_registry, - metadata=lcl_metadata) - - if constructor: - class_dict['__init__'] = constructor - if mapper: - class_dict['__mapper_cls__'] = mapper - - return metaclass(name, bases, class_dict) - - -def as_declarative(**kw): - """ - Class decorator for :func:`.declarative_base`. - - Provides a syntactical shortcut to the ``cls`` argument - sent to :func:`.declarative_base`, allowing the base class - to be converted in-place to a "declarative" base:: - - from sqlalchemy.ext.declarative import as_declarative - - @as_declarative() - class Base(object): - @declared_attr - def __tablename__(cls): - return cls.__name__.lower() - id = Column(Integer, primary_key=True) - - class MyMappedClass(Base): - # ... - - All keyword arguments passed to :func:`.as_declarative` are passed - along to :func:`.declarative_base`. - - .. versionadded:: 0.8.3 - - .. seealso:: - - :func:`.declarative_base` - - """ - def decorate(cls): - kw['cls'] = cls - kw['name'] = cls.__name__ - return declarative_base(**kw) - - return decorate - - -class ConcreteBase(object): - """A helper class for 'concrete' declarative mappings. - - :class:`.ConcreteBase` will use the :func:`.polymorphic_union` - function automatically, against all tables mapped as a subclass - to this class. The function is called via the - ``__declare_last__()`` function, which is essentially - a hook for the :meth:`.after_configured` event. - - :class:`.ConcreteBase` produces a mapped - table for the class itself. Compare to :class:`.AbstractConcreteBase`, - which does not. - - Example:: - - from sqlalchemy.ext.declarative import ConcreteBase - - class Employee(ConcreteBase, Base): - __tablename__ = 'employee' - employee_id = Column(Integer, primary_key=True) - name = Column(String(50)) - __mapper_args__ = { - 'polymorphic_identity':'employee', - 'concrete':True} - - class Manager(Employee): - __tablename__ = 'manager' - employee_id = Column(Integer, primary_key=True) - name = Column(String(50)) - manager_data = Column(String(40)) - __mapper_args__ = { - 'polymorphic_identity':'manager', - 'concrete':True} - - """ - - @classmethod - def _create_polymorphic_union(cls, mappers): - return polymorphic_union(OrderedDict( - (mp.polymorphic_identity, mp.local_table) - for mp in mappers - ), 'type', 'pjoin') - - @classmethod - def __declare_first__(cls): - m = cls.__mapper__ - if m.with_polymorphic: - return - - mappers = list(m.self_and_descendants) - pjoin = cls._create_polymorphic_union(mappers) - m._set_with_polymorphic(("*", pjoin)) - m._set_polymorphic_on(pjoin.c.type) - - -class AbstractConcreteBase(ConcreteBase): - """A helper class for 'concrete' declarative mappings. - - :class:`.AbstractConcreteBase` will use the :func:`.polymorphic_union` - function automatically, against all tables mapped as a subclass - to this class. The function is called via the - ``__declare_last__()`` function, which is essentially - a hook for the :meth:`.after_configured` event. - - :class:`.AbstractConcreteBase` does produce a mapped class - for the base class, however it is not persisted to any table; it - is instead mapped directly to the "polymorphic" selectable directly - and is only used for selecting. Compare to :class:`.ConcreteBase`, - which does create a persisted table for the base class. - - Example:: - - from sqlalchemy.ext.declarative import AbstractConcreteBase - - class Employee(AbstractConcreteBase, Base): - pass - - class Manager(Employee): - __tablename__ = 'manager' - employee_id = Column(Integer, primary_key=True) - name = Column(String(50)) - manager_data = Column(String(40)) - - __mapper_args__ = { - 'polymorphic_identity':'manager', - 'concrete':True} - - The abstract base class is handled by declarative in a special way; - at class configuration time, it behaves like a declarative mixin - or an ``__abstract__`` base class. Once classes are configured - and mappings are produced, it then gets mapped itself, but - after all of its decscendants. This is a very unique system of mapping - not found in any other SQLAlchemy system. - - Using this approach, we can specify columns and properties - that will take place on mapped subclasses, in the way that - we normally do as in :ref:`declarative_mixins`:: - - class Company(Base): - __tablename__ = 'company' - id = Column(Integer, primary_key=True) - - class Employee(AbstractConcreteBase, Base): - employee_id = Column(Integer, primary_key=True) - - @declared_attr - def company_id(cls): - return Column(ForeignKey('company.id')) - - @declared_attr - def company(cls): - return relationship("Company") - - class Manager(Employee): - __tablename__ = 'manager' - - name = Column(String(50)) - manager_data = Column(String(40)) - - __mapper_args__ = { - 'polymorphic_identity':'manager', - 'concrete':True} - - When we make use of our mappings however, both ``Manager`` and - ``Employee`` will have an independently usable ``.company`` attribute:: - - session.query(Employee).filter(Employee.company.has(id=5)) - - .. versionchanged:: 1.0.0 - The mechanics of :class:`.AbstractConcreteBase` - have been reworked to support relationships established directly - on the abstract base, without any special configurational steps. - - - """ - - __no_table__ = True - - @classmethod - def __declare_first__(cls): - cls._sa_decl_prepare_nocascade() - - @classmethod - def _sa_decl_prepare_nocascade(cls): - if getattr(cls, '__mapper__', None): - return - - to_map = _DeferredMapperConfig.config_for_cls(cls) - - # can't rely on 'self_and_descendants' here - # since technically an immediate subclass - # might not be mapped, but a subclass - # may be. - mappers = [] - stack = list(cls.__subclasses__()) - while stack: - klass = stack.pop() - stack.extend(klass.__subclasses__()) - mn = _mapper_or_none(klass) - if mn is not None: - mappers.append(mn) - pjoin = cls._create_polymorphic_union(mappers) - - # For columns that were declared on the class, these - # are normally ignored with the "__no_table__" mapping, - # unless they have a different attribute key vs. col name - # and are in the properties argument. - # In that case, ensure we update the properties entry - # to the correct column from the pjoin target table. - declared_cols = set(to_map.declared_columns) - for k, v in list(to_map.properties.items()): - if v in declared_cols: - to_map.properties[k] = pjoin.c[v.key] - - to_map.local_table = pjoin - - m_args = to_map.mapper_args_fn or dict - - def mapper_args(): - args = m_args() - args['polymorphic_on'] = pjoin.c.type - return args - to_map.mapper_args_fn = mapper_args - - m = to_map.map() - - for scls in cls.__subclasses__(): - sm = _mapper_or_none(scls) - if sm and sm.concrete and cls in scls.__bases__: - sm._set_concrete_base(m) - - -class DeferredReflection(object): - """A helper class for construction of mappings based on - a deferred reflection step. - - Normally, declarative can be used with reflection by - setting a :class:`.Table` object using autoload=True - as the ``__table__`` attribute on a declarative class. - The caveat is that the :class:`.Table` must be fully - reflected, or at the very least have a primary key column, - at the point at which a normal declarative mapping is - constructed, meaning the :class:`.Engine` must be available - at class declaration time. - - The :class:`.DeferredReflection` mixin moves the construction - of mappers to be at a later point, after a specific - method is called which first reflects all :class:`.Table` - objects created so far. Classes can define it as such:: - - from sqlalchemy.ext.declarative import declarative_base - from sqlalchemy.ext.declarative import DeferredReflection - Base = declarative_base() - - class MyClass(DeferredReflection, Base): - __tablename__ = 'mytable' - - Above, ``MyClass`` is not yet mapped. After a series of - classes have been defined in the above fashion, all tables - can be reflected and mappings created using - :meth:`.prepare`:: - - engine = create_engine("someengine://...") - DeferredReflection.prepare(engine) - - The :class:`.DeferredReflection` mixin can be applied to individual - classes, used as the base for the declarative base itself, - or used in a custom abstract class. Using an abstract base - allows that only a subset of classes to be prepared for a - particular prepare step, which is necessary for applications - that use more than one engine. For example, if an application - has two engines, you might use two bases, and prepare each - separately, e.g.:: - - class ReflectedOne(DeferredReflection, Base): - __abstract__ = True - - class ReflectedTwo(DeferredReflection, Base): - __abstract__ = True - - class MyClass(ReflectedOne): - __tablename__ = 'mytable' - - class MyOtherClass(ReflectedOne): - __tablename__ = 'myothertable' - - class YetAnotherClass(ReflectedTwo): - __tablename__ = 'yetanothertable' - - # ... etc. - - Above, the class hierarchies for ``ReflectedOne`` and - ``ReflectedTwo`` can be configured separately:: - - ReflectedOne.prepare(engine_one) - ReflectedTwo.prepare(engine_two) - - .. versionadded:: 0.8 - - """ - @classmethod - def prepare(cls, engine): - """Reflect all :class:`.Table` objects for all current - :class:`.DeferredReflection` subclasses""" - - to_map = _DeferredMapperConfig.classes_for_base(cls) - for thingy in to_map: - cls._sa_decl_prepare(thingy.local_table, engine) - thingy.map() - mapper = thingy.cls.__mapper__ - metadata = mapper.class_.metadata - for rel in mapper._props.values(): - if isinstance(rel, properties.RelationshipProperty) and \ - rel.secondary is not None: - if isinstance(rel.secondary, Table): - cls._reflect_table(rel.secondary, engine) - elif isinstance(rel.secondary, _class_resolver): - rel.secondary._resolvers += ( - cls._sa_deferred_table_resolver(engine, metadata), - ) - - @classmethod - def _sa_deferred_table_resolver(cls, engine, metadata): - def _resolve(key): - t1 = Table(key, metadata) - cls._reflect_table(t1, engine) - return t1 - return _resolve - - @classmethod - def _sa_decl_prepare(cls, local_table, engine): - # autoload Table, which is already - # present in the metadata. This - # will fill in db-loaded columns - # into the existing Table object. - if local_table is not None: - cls._reflect_table(local_table, engine) - - @classmethod - def _reflect_table(cls, table, engine): - Table(table.name, - table.metadata, - extend_existing=True, - autoload_replace=False, - autoload=True, - autoload_with=engine, - schema=table.schema) diff --git a/python/sqlalchemy/ext/declarative/base.py b/python/sqlalchemy/ext/declarative/base.py deleted file mode 100644 index 57305748..00000000 --- a/python/sqlalchemy/ext/declarative/base.py +++ /dev/null @@ -1,657 +0,0 @@ -# ext/declarative/base.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php -"""Internal implementation for declarative.""" - -from ...schema import Table, Column -from ...orm import mapper, class_mapper, synonym -from ...orm.interfaces import MapperProperty -from ...orm.properties import ColumnProperty, CompositeProperty -from ...orm.attributes import QueryableAttribute -from ...orm.base import _is_mapped_class -from ... import util, exc -from ...util import topological -from ...sql import expression -from ... import event -from . import clsregistry -import collections -import weakref -from sqlalchemy.orm import instrumentation - -declared_attr = declarative_props = None - - -def _declared_mapping_info(cls): - # deferred mapping - if _DeferredMapperConfig.has_cls(cls): - return _DeferredMapperConfig.config_for_cls(cls) - # regular mapping - elif _is_mapped_class(cls): - return class_mapper(cls, configure=False) - else: - return None - - -def _resolve_for_abstract(cls): - if cls is object: - return None - - if _get_immediate_cls_attr(cls, '__abstract__', strict=True): - for sup in cls.__bases__: - sup = _resolve_for_abstract(sup) - if sup is not None: - return sup - else: - return None - else: - return cls - - -def _get_immediate_cls_attr(cls, attrname, strict=False): - """return an attribute of the class that is either present directly - on the class, e.g. not on a superclass, or is from a superclass but - this superclass is a mixin, that is, not a descendant of - the declarative base. - - This is used to detect attributes that indicate something about - a mapped class independently from any mapped classes that it may - inherit from. - - """ - if not issubclass(cls, object): - return None - - for base in cls.__mro__: - _is_declarative_inherits = hasattr(base, '_decl_class_registry') - if attrname in base.__dict__ and ( - base is cls or - ((base in cls.__bases__ if strict else True) - and not _is_declarative_inherits) - ): - return getattr(base, attrname) - else: - return None - - -def _as_declarative(cls, classname, dict_): - global declared_attr, declarative_props - if declared_attr is None: - from .api import declared_attr - declarative_props = (declared_attr, util.classproperty) - - if _get_immediate_cls_attr(cls, '__abstract__', strict=True): - return - - _MapperConfig.setup_mapping(cls, classname, dict_) - - -class _MapperConfig(object): - - @classmethod - def setup_mapping(cls, cls_, classname, dict_): - defer_map = _get_immediate_cls_attr( - cls_, '_sa_decl_prepare_nocascade', strict=True) or \ - hasattr(cls_, '_sa_decl_prepare') - - if defer_map: - cfg_cls = _DeferredMapperConfig - else: - cfg_cls = _MapperConfig - cfg_cls(cls_, classname, dict_) - - def __init__(self, cls_, classname, dict_): - - self.cls = cls_ - - # dict_ will be a dictproxy, which we can't write to, and we need to! - self.dict_ = dict(dict_) - self.classname = classname - self.mapped_table = None - self.properties = util.OrderedDict() - self.declared_columns = set() - self.column_copies = {} - self._setup_declared_events() - - # temporary registry. While early 1.0 versions - # set up the ClassManager here, by API contract - # we can't do that until there's a mapper. - self.cls._sa_declared_attr_reg = {} - - self._scan_attributes() - - clsregistry.add_class(self.classname, self.cls) - - self._extract_mappable_attributes() - - self._extract_declared_columns() - - self._setup_table() - - self._setup_inheritance() - - self._early_mapping() - - def _early_mapping(self): - self.map() - - def _setup_declared_events(self): - if _get_immediate_cls_attr(self.cls, '__declare_last__'): - @event.listens_for(mapper, "after_configured") - def after_configured(): - self.cls.__declare_last__() - - if _get_immediate_cls_attr(self.cls, '__declare_first__'): - @event.listens_for(mapper, "before_configured") - def before_configured(): - self.cls.__declare_first__() - - def _scan_attributes(self): - cls = self.cls - dict_ = self.dict_ - column_copies = self.column_copies - mapper_args_fn = None - table_args = inherited_table_args = None - tablename = None - - for base in cls.__mro__: - class_mapped = base is not cls and \ - _declared_mapping_info(base) is not None and \ - not _get_immediate_cls_attr( - base, '_sa_decl_prepare_nocascade', strict=True) - - if not class_mapped and base is not cls: - self._produce_column_copies(base) - - for name, obj in vars(base).items(): - if name == '__mapper_args__': - if not mapper_args_fn and ( - not class_mapped or - isinstance(obj, declarative_props) - ): - # don't even invoke __mapper_args__ until - # after we've determined everything about the - # mapped table. - # make a copy of it so a class-level dictionary - # is not overwritten when we update column-based - # arguments. - mapper_args_fn = lambda: dict(cls.__mapper_args__) - elif name == '__tablename__': - if not tablename and ( - not class_mapped or - isinstance(obj, declarative_props) - ): - tablename = cls.__tablename__ - elif name == '__table_args__': - if not table_args and ( - not class_mapped or - isinstance(obj, declarative_props) - ): - table_args = cls.__table_args__ - if not isinstance( - table_args, (tuple, dict, type(None))): - raise exc.ArgumentError( - "__table_args__ value must be a tuple, " - "dict, or None") - if base is not cls: - inherited_table_args = True - elif class_mapped: - if isinstance(obj, declarative_props): - util.warn("Regular (i.e. not __special__) " - "attribute '%s.%s' uses @declared_attr, " - "but owning class %s is mapped - " - "not applying to subclass %s." - % (base.__name__, name, base, cls)) - continue - elif base is not cls: - # we're a mixin, abstract base, or something that is - # acting like that for now. - if isinstance(obj, Column): - # already copied columns to the mapped class. - continue - elif isinstance(obj, MapperProperty): - raise exc.InvalidRequestError( - "Mapper properties (i.e. deferred," - "column_property(), relationship(), etc.) must " - "be declared as @declared_attr callables " - "on declarative mixin classes.") - elif isinstance(obj, declarative_props): - oldclassprop = isinstance(obj, util.classproperty) - if not oldclassprop and obj._cascading: - dict_[name] = column_copies[obj] = \ - ret = obj.__get__(obj, cls) - setattr(cls, name, ret) - else: - if oldclassprop: - util.warn_deprecated( - "Use of sqlalchemy.util.classproperty on " - "declarative classes is deprecated.") - dict_[name] = column_copies[obj] = \ - ret = getattr(cls, name) - if isinstance(ret, (Column, MapperProperty)) and \ - ret.doc is None: - ret.doc = obj.__doc__ - - if inherited_table_args and not tablename: - table_args = None - - self.table_args = table_args - self.tablename = tablename - self.mapper_args_fn = mapper_args_fn - - def _produce_column_copies(self, base): - cls = self.cls - dict_ = self.dict_ - column_copies = self.column_copies - # copy mixin columns to the mapped class - for name, obj in vars(base).items(): - if isinstance(obj, Column): - if getattr(cls, name) is not obj: - # if column has been overridden - # (like by the InstrumentedAttribute of the - # superclass), skip - continue - elif obj.foreign_keys: - raise exc.InvalidRequestError( - "Columns with foreign keys to other columns " - "must be declared as @declared_attr callables " - "on declarative mixin classes. ") - elif name not in dict_ and not ( - '__table__' in dict_ and - (obj.name or name) in dict_['__table__'].c - ): - column_copies[obj] = copy_ = obj.copy() - copy_._creation_order = obj._creation_order - setattr(cls, name, copy_) - dict_[name] = copy_ - - def _extract_mappable_attributes(self): - cls = self.cls - dict_ = self.dict_ - - our_stuff = self.properties - - for k in list(dict_): - - if k in ('__table__', '__tablename__', '__mapper_args__'): - continue - - value = dict_[k] - if isinstance(value, declarative_props): - value = getattr(cls, k) - - elif isinstance(value, QueryableAttribute) and \ - value.class_ is not cls and \ - value.key != k: - # detect a QueryableAttribute that's already mapped being - # assigned elsewhere in userland, turn into a synonym() - value = synonym(value.key) - setattr(cls, k, value) - - if (isinstance(value, tuple) and len(value) == 1 and - isinstance(value[0], (Column, MapperProperty))): - util.warn("Ignoring declarative-like tuple value of attribute " - "%s: possibly a copy-and-paste error with a comma " - "left at the end of the line?" % k) - continue - elif not isinstance(value, (Column, MapperProperty)): - # using @declared_attr for some object that - # isn't Column/MapperProperty; remove from the dict_ - # and place the evaluated value onto the class. - if not k.startswith('__'): - dict_.pop(k) - setattr(cls, k, value) - continue - # we expect to see the name 'metadata' in some valid cases; - # however at this point we see it's assigned to something trying - # to be mapped, so raise for that. - elif k == 'metadata': - raise exc.InvalidRequestError( - "Attribute name 'metadata' is reserved " - "for the MetaData instance when using a " - "declarative base class." - ) - prop = clsregistry._deferred_relationship(cls, value) - our_stuff[k] = prop - - def _extract_declared_columns(self): - our_stuff = self.properties - - # set up attributes in the order they were created - our_stuff.sort(key=lambda key: our_stuff[key]._creation_order) - - # extract columns from the class dict - declared_columns = self.declared_columns - name_to_prop_key = collections.defaultdict(set) - for key, c in list(our_stuff.items()): - if isinstance(c, (ColumnProperty, CompositeProperty)): - for col in c.columns: - if isinstance(col, Column) and \ - col.table is None: - _undefer_column_name(key, col) - if not isinstance(c, CompositeProperty): - name_to_prop_key[col.name].add(key) - declared_columns.add(col) - elif isinstance(c, Column): - _undefer_column_name(key, c) - name_to_prop_key[c.name].add(key) - declared_columns.add(c) - # if the column is the same name as the key, - # remove it from the explicit properties dict. - # the normal rules for assigning column-based properties - # will take over, including precedence of columns - # in multi-column ColumnProperties. - if key == c.key: - del our_stuff[key] - - for name, keys in name_to_prop_key.items(): - if len(keys) > 1: - util.warn( - "On class %r, Column object %r named " - "directly multiple times, " - "only one will be used: %s" % - (self.classname, name, (", ".join(sorted(keys)))) - ) - - def _setup_table(self): - cls = self.cls - tablename = self.tablename - table_args = self.table_args - dict_ = self.dict_ - declared_columns = self.declared_columns - - declared_columns = self.declared_columns = sorted( - declared_columns, key=lambda c: c._creation_order) - table = None - - if hasattr(cls, '__table_cls__'): - table_cls = util.unbound_method_to_callable(cls.__table_cls__) - else: - table_cls = Table - - if '__table__' not in dict_: - if tablename is not None: - - args, table_kw = (), {} - if table_args: - if isinstance(table_args, dict): - table_kw = table_args - elif isinstance(table_args, tuple): - if isinstance(table_args[-1], dict): - args, table_kw = table_args[0:-1], table_args[-1] - else: - args = table_args - - autoload = dict_.get('__autoload__') - if autoload: - table_kw['autoload'] = True - - cls.__table__ = table = table_cls( - tablename, cls.metadata, - *(tuple(declared_columns) + tuple(args)), - **table_kw) - else: - table = cls.__table__ - if declared_columns: - for c in declared_columns: - if not table.c.contains_column(c): - raise exc.ArgumentError( - "Can't add additional column %r when " - "specifying __table__" % c.key - ) - self.local_table = table - - def _setup_inheritance(self): - table = self.local_table - cls = self.cls - table_args = self.table_args - declared_columns = self.declared_columns - for c in cls.__bases__: - c = _resolve_for_abstract(c) - if c is None: - continue - if _declared_mapping_info(c) is not None and \ - not _get_immediate_cls_attr( - c, '_sa_decl_prepare_nocascade', strict=True): - self.inherits = c - break - else: - self.inherits = None - - if table is None and self.inherits is None and \ - not _get_immediate_cls_attr(cls, '__no_table__'): - - raise exc.InvalidRequestError( - "Class %r does not have a __table__ or __tablename__ " - "specified and does not inherit from an existing " - "table-mapped class." % cls - ) - elif self.inherits: - inherited_mapper = _declared_mapping_info(self.inherits) - inherited_table = inherited_mapper.local_table - inherited_mapped_table = inherited_mapper.mapped_table - - if table is None: - # single table inheritance. - # ensure no table args - if table_args: - raise exc.ArgumentError( - "Can't place __table_args__ on an inherited class " - "with no table." - ) - # add any columns declared here to the inherited table. - for c in declared_columns: - if c.primary_key: - raise exc.ArgumentError( - "Can't place primary key columns on an inherited " - "class with no table." - ) - if c.name in inherited_table.c: - if inherited_table.c[c.name] is c: - continue - raise exc.ArgumentError( - "Column '%s' on class %s conflicts with " - "existing column '%s'" % - (c, cls, inherited_table.c[c.name]) - ) - inherited_table.append_column(c) - if inherited_mapped_table is not None and \ - inherited_mapped_table is not inherited_table: - inherited_mapped_table._refresh_for_new_column(c) - - def _prepare_mapper_arguments(self): - properties = self.properties - if self.mapper_args_fn: - mapper_args = self.mapper_args_fn() - else: - mapper_args = {} - - # make sure that column copies are used rather - # than the original columns from any mixins - for k in ('version_id_col', 'polymorphic_on',): - if k in mapper_args: - v = mapper_args[k] - mapper_args[k] = self.column_copies.get(v, v) - - assert 'inherits' not in mapper_args, \ - "Can't specify 'inherits' explicitly with declarative mappings" - - if self.inherits: - mapper_args['inherits'] = self.inherits - - if self.inherits and not mapper_args.get('concrete', False): - # single or joined inheritance - # exclude any cols on the inherited table which are - # not mapped on the parent class, to avoid - # mapping columns specific to sibling/nephew classes - inherited_mapper = _declared_mapping_info(self.inherits) - inherited_table = inherited_mapper.local_table - - if 'exclude_properties' not in mapper_args: - mapper_args['exclude_properties'] = exclude_properties = \ - set([c.key for c in inherited_table.c - if c not in inherited_mapper._columntoproperty]) - exclude_properties.difference_update( - [c.key for c in self.declared_columns]) - - # look through columns in the current mapper that - # are keyed to a propname different than the colname - # (if names were the same, we'd have popped it out above, - # in which case the mapper makes this combination). - # See if the superclass has a similar column property. - # If so, join them together. - for k, col in list(properties.items()): - if not isinstance(col, expression.ColumnElement): - continue - if k in inherited_mapper._props: - p = inherited_mapper._props[k] - if isinstance(p, ColumnProperty): - # note here we place the subclass column - # first. See [ticket:1892] for background. - properties[k] = [col] + p.columns - result_mapper_args = mapper_args.copy() - result_mapper_args['properties'] = properties - self.mapper_args = result_mapper_args - - def map(self): - self._prepare_mapper_arguments() - if hasattr(self.cls, '__mapper_cls__'): - mapper_cls = util.unbound_method_to_callable( - self.cls.__mapper_cls__) - else: - mapper_cls = mapper - - self.cls.__mapper__ = mp_ = mapper_cls( - self.cls, - self.local_table, - **self.mapper_args - ) - del self.cls._sa_declared_attr_reg - return mp_ - - -class _DeferredMapperConfig(_MapperConfig): - _configs = util.OrderedDict() - - def _early_mapping(self): - pass - - @property - def cls(self): - return self._cls() - - @cls.setter - def cls(self, class_): - self._cls = weakref.ref(class_, self._remove_config_cls) - self._configs[self._cls] = self - - @classmethod - def _remove_config_cls(cls, ref): - cls._configs.pop(ref, None) - - @classmethod - def has_cls(cls, class_): - # 2.6 fails on weakref if class_ is an old style class - return isinstance(class_, type) and \ - weakref.ref(class_) in cls._configs - - @classmethod - def config_for_cls(cls, class_): - return cls._configs[weakref.ref(class_)] - - @classmethod - def classes_for_base(cls, base_cls, sort=True): - classes_for_base = [m for m in cls._configs.values() - if issubclass(m.cls, base_cls)] - if not sort: - return classes_for_base - - all_m_by_cls = dict( - (m.cls, m) - for m in classes_for_base - ) - - tuples = [] - for m_cls in all_m_by_cls: - tuples.extend( - (all_m_by_cls[base_cls], all_m_by_cls[m_cls]) - for base_cls in m_cls.__bases__ - if base_cls in all_m_by_cls - ) - return list( - topological.sort( - tuples, - classes_for_base - ) - ) - - def map(self): - self._configs.pop(self._cls, None) - return super(_DeferredMapperConfig, self).map() - - -def _add_attribute(cls, key, value): - """add an attribute to an existing declarative class. - - This runs through the logic to determine MapperProperty, - adds it to the Mapper, adds a column to the mapped Table, etc. - - """ - - if '__mapper__' in cls.__dict__: - if isinstance(value, Column): - _undefer_column_name(key, value) - cls.__table__.append_column(value) - cls.__mapper__.add_property(key, value) - elif isinstance(value, ColumnProperty): - for col in value.columns: - if isinstance(col, Column) and col.table is None: - _undefer_column_name(key, col) - cls.__table__.append_column(col) - cls.__mapper__.add_property(key, value) - elif isinstance(value, MapperProperty): - cls.__mapper__.add_property( - key, - clsregistry._deferred_relationship(cls, value) - ) - elif isinstance(value, QueryableAttribute) and value.key != key: - # detect a QueryableAttribute that's already mapped being - # assigned elsewhere in userland, turn into a synonym() - value = synonym(value.key) - cls.__mapper__.add_property( - key, - clsregistry._deferred_relationship(cls, value) - ) - else: - type.__setattr__(cls, key, value) - else: - type.__setattr__(cls, key, value) - - -def _declarative_constructor(self, **kwargs): - """A simple constructor that allows initialization from kwargs. - - Sets attributes on the constructed instance using the names and - values in ``kwargs``. - - Only keys that are present as - attributes of the instance's class are allowed. These could be, - for example, any mapped columns or relationships. - """ - cls_ = type(self) - for k in kwargs: - if not hasattr(cls_, k): - raise TypeError( - "%r is an invalid keyword argument for %s" % - (k, cls_.__name__)) - setattr(self, k, kwargs[k]) -_declarative_constructor.__name__ = '__init__' - - -def _undefer_column_name(key, column): - if column.key is None: - column.key = key - if column.name is None: - column.name = key diff --git a/python/sqlalchemy/ext/declarative/clsregistry.py b/python/sqlalchemy/ext/declarative/clsregistry.py deleted file mode 100644 index c3887d6c..00000000 --- a/python/sqlalchemy/ext/declarative/clsregistry.py +++ /dev/null @@ -1,327 +0,0 @@ -# ext/declarative/clsregistry.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php -"""Routines to handle the string class registry used by declarative. - -This system allows specification of classes and expressions used in -:func:`.relationship` using strings. - -""" -from ...orm.properties import ColumnProperty, RelationshipProperty, \ - SynonymProperty -from ...schema import _get_table_key -from ...orm import class_mapper, interfaces -from ... import util -from ... import inspection -from ... import exc -import weakref - -# strong references to registries which we place in -# the _decl_class_registry, which is usually weak referencing. -# the internal registries here link to classes with weakrefs and remove -# themselves when all references to contained classes are removed. -_registries = set() - - -def add_class(classname, cls): - """Add a class to the _decl_class_registry associated with the - given declarative class. - - """ - if classname in cls._decl_class_registry: - # class already exists. - existing = cls._decl_class_registry[classname] - if not isinstance(existing, _MultipleClassMarker): - existing = \ - cls._decl_class_registry[classname] = \ - _MultipleClassMarker([cls, existing]) - else: - cls._decl_class_registry[classname] = cls - - try: - root_module = cls._decl_class_registry['_sa_module_registry'] - except KeyError: - cls._decl_class_registry['_sa_module_registry'] = \ - root_module = _ModuleMarker('_sa_module_registry', None) - - tokens = cls.__module__.split(".") - - # build up a tree like this: - # modulename: myapp.snacks.nuts - # - # myapp->snack->nuts->(classes) - # snack->nuts->(classes) - # nuts->(classes) - # - # this allows partial token paths to be used. - while tokens: - token = tokens.pop(0) - module = root_module.get_module(token) - for token in tokens: - module = module.get_module(token) - module.add_class(classname, cls) - - -class _MultipleClassMarker(object): - """refers to multiple classes of the same name - within _decl_class_registry. - - """ - - __slots__ = 'on_remove', 'contents', '__weakref__' - - def __init__(self, classes, on_remove=None): - self.on_remove = on_remove - self.contents = set([ - weakref.ref(item, self._remove_item) for item in classes]) - _registries.add(self) - - def __iter__(self): - return (ref() for ref in self.contents) - - def attempt_get(self, path, key): - if len(self.contents) > 1: - raise exc.InvalidRequestError( - "Multiple classes found for path \"%s\" " - "in the registry of this declarative " - "base. Please use a fully module-qualified path." % - (".".join(path + [key])) - ) - else: - ref = list(self.contents)[0] - cls = ref() - if cls is None: - raise NameError(key) - return cls - - def _remove_item(self, ref): - self.contents.remove(ref) - if not self.contents: - _registries.discard(self) - if self.on_remove: - self.on_remove() - - def add_item(self, item): - # protect against class registration race condition against - # asynchronous garbage collection calling _remove_item, - # [ticket:3208] - modules = set([ - cls.__module__ for cls in - [ref() for ref in self.contents] if cls is not None]) - if item.__module__ in modules: - util.warn( - "This declarative base already contains a class with the " - "same class name and module name as %s.%s, and will " - "be replaced in the string-lookup table." % ( - item.__module__, - item.__name__ - ) - ) - self.contents.add(weakref.ref(item, self._remove_item)) - - -class _ModuleMarker(object): - """"refers to a module name within - _decl_class_registry. - - """ - - __slots__ = 'parent', 'name', 'contents', 'mod_ns', 'path', '__weakref__' - - def __init__(self, name, parent): - self.parent = parent - self.name = name - self.contents = {} - self.mod_ns = _ModNS(self) - if self.parent: - self.path = self.parent.path + [self.name] - else: - self.path = [] - _registries.add(self) - - def __contains__(self, name): - return name in self.contents - - def __getitem__(self, name): - return self.contents[name] - - def _remove_item(self, name): - self.contents.pop(name, None) - if not self.contents and self.parent is not None: - self.parent._remove_item(self.name) - _registries.discard(self) - - def resolve_attr(self, key): - return getattr(self.mod_ns, key) - - def get_module(self, name): - if name not in self.contents: - marker = _ModuleMarker(name, self) - self.contents[name] = marker - else: - marker = self.contents[name] - return marker - - def add_class(self, name, cls): - if name in self.contents: - existing = self.contents[name] - existing.add_item(cls) - else: - existing = self.contents[name] = \ - _MultipleClassMarker([cls], - on_remove=lambda: self._remove_item(name)) - - -class _ModNS(object): - __slots__ = '__parent', - - def __init__(self, parent): - self.__parent = parent - - def __getattr__(self, key): - try: - value = self.__parent.contents[key] - except KeyError: - pass - else: - if value is not None: - if isinstance(value, _ModuleMarker): - return value.mod_ns - else: - assert isinstance(value, _MultipleClassMarker) - return value.attempt_get(self.__parent.path, key) - raise AttributeError("Module %r has no mapped classes " - "registered under the name %r" % ( - self.__parent.name, key)) - - -class _GetColumns(object): - __slots__ = 'cls', - - def __init__(self, cls): - self.cls = cls - - def __getattr__(self, key): - mp = class_mapper(self.cls, configure=False) - if mp: - if key not in mp.all_orm_descriptors: - raise exc.InvalidRequestError( - "Class %r does not have a mapped column named %r" - % (self.cls, key)) - - desc = mp.all_orm_descriptors[key] - if desc.extension_type is interfaces.NOT_EXTENSION: - prop = desc.property - if isinstance(prop, SynonymProperty): - key = prop.name - elif not isinstance(prop, ColumnProperty): - raise exc.InvalidRequestError( - "Property %r is not an instance of" - " ColumnProperty (i.e. does not correspond" - " directly to a Column)." % key) - return getattr(self.cls, key) - -inspection._inspects(_GetColumns)( - lambda target: inspection.inspect(target.cls)) - - -class _GetTable(object): - __slots__ = 'key', 'metadata' - - def __init__(self, key, metadata): - self.key = key - self.metadata = metadata - - def __getattr__(self, key): - return self.metadata.tables[ - _get_table_key(key, self.key) - ] - - -def _determine_container(key, value): - if isinstance(value, _MultipleClassMarker): - value = value.attempt_get([], key) - return _GetColumns(value) - - -class _class_resolver(object): - def __init__(self, cls, prop, fallback, arg): - self.cls = cls - self.prop = prop - self.arg = self._declarative_arg = arg - self.fallback = fallback - self._dict = util.PopulateDict(self._access_cls) - self._resolvers = () - - def _access_cls(self, key): - cls = self.cls - if key in cls._decl_class_registry: - return _determine_container(key, cls._decl_class_registry[key]) - elif key in cls.metadata.tables: - return cls.metadata.tables[key] - elif key in cls.metadata._schemas: - return _GetTable(key, cls.metadata) - elif '_sa_module_registry' in cls._decl_class_registry and \ - key in cls._decl_class_registry['_sa_module_registry']: - registry = cls._decl_class_registry['_sa_module_registry'] - return registry.resolve_attr(key) - elif self._resolvers: - for resolv in self._resolvers: - value = resolv(key) - if value is not None: - return value - - return self.fallback[key] - - def __call__(self): - try: - x = eval(self.arg, globals(), self._dict) - - if isinstance(x, _GetColumns): - return x.cls - else: - return x - except NameError as n: - raise exc.InvalidRequestError( - "When initializing mapper %s, expression %r failed to " - "locate a name (%r). If this is a class name, consider " - "adding this relationship() to the %r class after " - "both dependent classes have been defined." % - (self.prop.parent, self.arg, n.args[0], self.cls) - ) - - -def _resolver(cls, prop): - import sqlalchemy - from sqlalchemy.orm import foreign, remote - - fallback = sqlalchemy.__dict__.copy() - fallback.update({'foreign': foreign, 'remote': remote}) - - def resolve_arg(arg): - return _class_resolver(cls, prop, fallback, arg) - return resolve_arg - - -def _deferred_relationship(cls, prop): - - if isinstance(prop, RelationshipProperty): - resolve_arg = _resolver(cls, prop) - - for attr in ('argument', 'order_by', 'primaryjoin', 'secondaryjoin', - 'secondary', '_user_defined_foreign_keys', 'remote_side'): - v = getattr(prop, attr) - if isinstance(v, util.string_types): - setattr(prop, attr, resolve_arg(v)) - - if prop.backref and isinstance(prop.backref, tuple): - key, kwargs = prop.backref - for attr in ('primaryjoin', 'secondaryjoin', 'secondary', - 'foreign_keys', 'remote_side', 'order_by'): - if attr in kwargs and isinstance(kwargs[attr], str): - kwargs[attr] = resolve_arg(kwargs[attr]) - - return prop diff --git a/python/sqlalchemy/ext/horizontal_shard.py b/python/sqlalchemy/ext/horizontal_shard.py deleted file mode 100644 index c9fb0b04..00000000 --- a/python/sqlalchemy/ext/horizontal_shard.py +++ /dev/null @@ -1,131 +0,0 @@ -# ext/horizontal_shard.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Horizontal sharding support. - -Defines a rudimental 'horizontal sharding' system which allows a Session to -distribute queries and persistence operations across multiple databases. - -For a usage example, see the :ref:`examples_sharding` example included in -the source distribution. - -""" - -from .. import util -from ..orm.session import Session -from ..orm.query import Query - -__all__ = ['ShardedSession', 'ShardedQuery'] - - -class ShardedQuery(Query): - def __init__(self, *args, **kwargs): - super(ShardedQuery, self).__init__(*args, **kwargs) - self.id_chooser = self.session.id_chooser - self.query_chooser = self.session.query_chooser - self._shard_id = None - - def set_shard(self, shard_id): - """return a new query, limited to a single shard ID. - - all subsequent operations with the returned query will - be against the single shard regardless of other state. - """ - - q = self._clone() - q._shard_id = shard_id - return q - - def _execute_and_instances(self, context): - def iter_for_shard(shard_id): - context.attributes['shard_id'] = shard_id - result = self._connection_from_session( - mapper=self._mapper_zero(), - shard_id=shard_id).execute( - context.statement, - self._params) - return self.instances(result, context) - - if self._shard_id is not None: - return iter_for_shard(self._shard_id) - else: - partial = [] - for shard_id in self.query_chooser(self): - partial.extend(iter_for_shard(shard_id)) - - # if some kind of in memory 'sorting' - # were done, this is where it would happen - return iter(partial) - - def get(self, ident, **kwargs): - if self._shard_id is not None: - return super(ShardedQuery, self).get(ident) - else: - ident = util.to_list(ident) - for shard_id in self.id_chooser(self, ident): - o = self.set_shard(shard_id).get(ident, **kwargs) - if o is not None: - return o - else: - return None - - -class ShardedSession(Session): - def __init__(self, shard_chooser, id_chooser, query_chooser, shards=None, - query_cls=ShardedQuery, **kwargs): - """Construct a ShardedSession. - - :param shard_chooser: A callable which, passed a Mapper, a mapped - instance, and possibly a SQL clause, returns a shard ID. This id - may be based off of the attributes present within the object, or on - some round-robin scheme. If the scheme is based on a selection, it - should set whatever state on the instance to mark it in the future as - participating in that shard. - - :param id_chooser: A callable, passed a query and a tuple of identity - values, which should return a list of shard ids where the ID might - reside. The databases will be queried in the order of this listing. - - :param query_chooser: For a given Query, returns the list of shard_ids - where the query should be issued. Results from all shards returned - will be combined together into a single listing. - - :param shards: A dictionary of string shard names - to :class:`~sqlalchemy.engine.Engine` objects. - - """ - super(ShardedSession, self).__init__(query_cls=query_cls, **kwargs) - self.shard_chooser = shard_chooser - self.id_chooser = id_chooser - self.query_chooser = query_chooser - self.__binds = {} - self.connection_callable = self.connection - if shards is not None: - for k in shards: - self.bind_shard(k, shards[k]) - - def connection(self, mapper=None, instance=None, shard_id=None, **kwargs): - if shard_id is None: - shard_id = self.shard_chooser(mapper, instance) - - if self.transaction is not None: - return self.transaction.connection(mapper, shard_id=shard_id) - else: - return self.get_bind( - mapper, - shard_id=shard_id, - instance=instance - ).contextual_connect(**kwargs) - - def get_bind(self, mapper, shard_id=None, - instance=None, clause=None, **kw): - if shard_id is None: - shard_id = self.shard_chooser(mapper, instance, clause=clause) - return self.__binds[shard_id] - - def bind_shard(self, shard_id, bind): - self.__binds[shard_id] = bind diff --git a/python/sqlalchemy/ext/hybrid.py b/python/sqlalchemy/ext/hybrid.py deleted file mode 100644 index 0073494b..00000000 --- a/python/sqlalchemy/ext/hybrid.py +++ /dev/null @@ -1,810 +0,0 @@ -# ext/hybrid.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Define attributes on ORM-mapped classes that have "hybrid" behavior. - -"hybrid" means the attribute has distinct behaviors defined at the -class level and at the instance level. - -The :mod:`~sqlalchemy.ext.hybrid` extension provides a special form of -method decorator, is around 50 lines of code and has almost no -dependencies on the rest of SQLAlchemy. It can, in theory, work with -any descriptor-based expression system. - -Consider a mapping ``Interval``, representing integer ``start`` and ``end`` -values. We can define higher level functions on mapped classes that produce -SQL expressions at the class level, and Python expression evaluation at the -instance level. Below, each function decorated with :class:`.hybrid_method` or -:class:`.hybrid_property` may receive ``self`` as an instance of the class, or -as the class itself:: - - from sqlalchemy import Column, Integer - from sqlalchemy.ext.declarative import declarative_base - from sqlalchemy.orm import Session, aliased - from sqlalchemy.ext.hybrid import hybrid_property, hybrid_method - - Base = declarative_base() - - class Interval(Base): - __tablename__ = 'interval' - - id = Column(Integer, primary_key=True) - start = Column(Integer, nullable=False) - end = Column(Integer, nullable=False) - - def __init__(self, start, end): - self.start = start - self.end = end - - @hybrid_property - def length(self): - return self.end - self.start - - @hybrid_method - def contains(self, point): - return (self.start <= point) & (point <= self.end) - - @hybrid_method - def intersects(self, other): - return self.contains(other.start) | self.contains(other.end) - -Above, the ``length`` property returns the difference between the -``end`` and ``start`` attributes. With an instance of ``Interval``, -this subtraction occurs in Python, using normal Python descriptor -mechanics:: - - >>> i1 = Interval(5, 10) - >>> i1.length - 5 - -When dealing with the ``Interval`` class itself, the :class:`.hybrid_property` -descriptor evaluates the function body given the ``Interval`` class as -the argument, which when evaluated with SQLAlchemy expression mechanics -returns a new SQL expression:: - - >>> print Interval.length - interval."end" - interval.start - - >>> print Session().query(Interval).filter(Interval.length > 10) - SELECT interval.id AS interval_id, interval.start AS interval_start, - interval."end" AS interval_end - FROM interval - WHERE interval."end" - interval.start > :param_1 - -ORM methods such as :meth:`~.Query.filter_by` generally use ``getattr()`` to -locate attributes, so can also be used with hybrid attributes:: - - >>> print Session().query(Interval).filter_by(length=5) - SELECT interval.id AS interval_id, interval.start AS interval_start, - interval."end" AS interval_end - FROM interval - WHERE interval."end" - interval.start = :param_1 - -The ``Interval`` class example also illustrates two methods, -``contains()`` and ``intersects()``, decorated with -:class:`.hybrid_method`. This decorator applies the same idea to -methods that :class:`.hybrid_property` applies to attributes. The -methods return boolean values, and take advantage of the Python ``|`` -and ``&`` bitwise operators to produce equivalent instance-level and -SQL expression-level boolean behavior:: - - >>> i1.contains(6) - True - >>> i1.contains(15) - False - >>> i1.intersects(Interval(7, 18)) - True - >>> i1.intersects(Interval(25, 29)) - False - - >>> print Session().query(Interval).filter(Interval.contains(15)) - SELECT interval.id AS interval_id, interval.start AS interval_start, - interval."end" AS interval_end - FROM interval - WHERE interval.start <= :start_1 AND interval."end" > :end_1 - - >>> ia = aliased(Interval) - >>> print Session().query(Interval, ia).filter(Interval.intersects(ia)) - SELECT interval.id AS interval_id, interval.start AS interval_start, - interval."end" AS interval_end, interval_1.id AS interval_1_id, - interval_1.start AS interval_1_start, interval_1."end" AS interval_1_end - FROM interval, interval AS interval_1 - WHERE interval.start <= interval_1.start - AND interval."end" > interval_1.start - OR interval.start <= interval_1."end" - AND interval."end" > interval_1."end" - -Defining Expression Behavior Distinct from Attribute Behavior --------------------------------------------------------------- - -Our usage of the ``&`` and ``|`` bitwise operators above was -fortunate, considering our functions operated on two boolean values to -return a new one. In many cases, the construction of an in-Python -function and a SQLAlchemy SQL expression have enough differences that -two separate Python expressions should be defined. The -:mod:`~sqlalchemy.ext.hybrid` decorators define the -:meth:`.hybrid_property.expression` modifier for this purpose. As an -example we'll define the radius of the interval, which requires the -usage of the absolute value function:: - - from sqlalchemy import func - - class Interval(object): - # ... - - @hybrid_property - def radius(self): - return abs(self.length) / 2 - - @radius.expression - def radius(cls): - return func.abs(cls.length) / 2 - -Above the Python function ``abs()`` is used for instance-level -operations, the SQL function ``ABS()`` is used via the :data:`.func` -object for class-level expressions:: - - >>> i1.radius - 2 - - >>> print Session().query(Interval).filter(Interval.radius > 5) - SELECT interval.id AS interval_id, interval.start AS interval_start, - interval."end" AS interval_end - FROM interval - WHERE abs(interval."end" - interval.start) / :abs_1 > :param_1 - -Defining Setters ----------------- - -Hybrid properties can also define setter methods. If we wanted -``length`` above, when set, to modify the endpoint value:: - - class Interval(object): - # ... - - @hybrid_property - def length(self): - return self.end - self.start - - @length.setter - def length(self, value): - self.end = self.start + value - -The ``length(self, value)`` method is now called upon set:: - - >>> i1 = Interval(5, 10) - >>> i1.length - 5 - >>> i1.length = 12 - >>> i1.end - 17 - -Working with Relationships --------------------------- - -There's no essential difference when creating hybrids that work with -related objects as opposed to column-based data. The need for distinct -expressions tends to be greater. Two variants of we'll illustrate -are the "join-dependent" hybrid, and the "correlated subquery" hybrid. - -Join-Dependent Relationship Hybrid -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Consider the following declarative -mapping which relates a ``User`` to a ``SavingsAccount``:: - - from sqlalchemy import Column, Integer, ForeignKey, Numeric, String - from sqlalchemy.orm import relationship - from sqlalchemy.ext.declarative import declarative_base - from sqlalchemy.ext.hybrid import hybrid_property - - Base = declarative_base() - - class SavingsAccount(Base): - __tablename__ = 'account' - id = Column(Integer, primary_key=True) - user_id = Column(Integer, ForeignKey('user.id'), nullable=False) - balance = Column(Numeric(15, 5)) - - class User(Base): - __tablename__ = 'user' - id = Column(Integer, primary_key=True) - name = Column(String(100), nullable=False) - - accounts = relationship("SavingsAccount", backref="owner") - - @hybrid_property - def balance(self): - if self.accounts: - return self.accounts[0].balance - else: - return None - - @balance.setter - def balance(self, value): - if not self.accounts: - account = Account(owner=self) - else: - account = self.accounts[0] - account.balance = value - - @balance.expression - def balance(cls): - return SavingsAccount.balance - -The above hybrid property ``balance`` works with the first -``SavingsAccount`` entry in the list of accounts for this user. The -in-Python getter/setter methods can treat ``accounts`` as a Python -list available on ``self``. - -However, at the expression level, it's expected that the ``User`` class will -be used in an appropriate context such that an appropriate join to -``SavingsAccount`` will be present:: - - >>> print Session().query(User, User.balance).\\ - ... join(User.accounts).filter(User.balance > 5000) - SELECT "user".id AS user_id, "user".name AS user_name, - account.balance AS account_balance - FROM "user" JOIN account ON "user".id = account.user_id - WHERE account.balance > :balance_1 - -Note however, that while the instance level accessors need to worry -about whether ``self.accounts`` is even present, this issue expresses -itself differently at the SQL expression level, where we basically -would use an outer join:: - - >>> from sqlalchemy import or_ - >>> print (Session().query(User, User.balance).outerjoin(User.accounts). - ... filter(or_(User.balance < 5000, User.balance == None))) - SELECT "user".id AS user_id, "user".name AS user_name, - account.balance AS account_balance - FROM "user" LEFT OUTER JOIN account ON "user".id = account.user_id - WHERE account.balance < :balance_1 OR account.balance IS NULL - -Correlated Subquery Relationship Hybrid -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -We can, of course, forego being dependent on the enclosing query's usage -of joins in favor of the correlated subquery, which can portably be packed -into a single column expression. A correlated subquery is more portable, but -often performs more poorly at the SQL level. Using the same technique -illustrated at :ref:`mapper_column_property_sql_expressions`, -we can adjust our ``SavingsAccount`` example to aggregate the balances for -*all* accounts, and use a correlated subquery for the column expression:: - - from sqlalchemy import Column, Integer, ForeignKey, Numeric, String - from sqlalchemy.orm import relationship - from sqlalchemy.ext.declarative import declarative_base - from sqlalchemy.ext.hybrid import hybrid_property - from sqlalchemy import select, func - - Base = declarative_base() - - class SavingsAccount(Base): - __tablename__ = 'account' - id = Column(Integer, primary_key=True) - user_id = Column(Integer, ForeignKey('user.id'), nullable=False) - balance = Column(Numeric(15, 5)) - - class User(Base): - __tablename__ = 'user' - id = Column(Integer, primary_key=True) - name = Column(String(100), nullable=False) - - accounts = relationship("SavingsAccount", backref="owner") - - @hybrid_property - def balance(self): - return sum(acc.balance for acc in self.accounts) - - @balance.expression - def balance(cls): - return select([func.sum(SavingsAccount.balance)]).\\ - where(SavingsAccount.user_id==cls.id).\\ - label('total_balance') - -The above recipe will give us the ``balance`` column which renders -a correlated SELECT:: - - >>> print s.query(User).filter(User.balance > 400) - SELECT "user".id AS user_id, "user".name AS user_name - FROM "user" - WHERE (SELECT sum(account.balance) AS sum_1 - FROM account - WHERE account.user_id = "user".id) > :param_1 - -.. _hybrid_custom_comparators: - -Building Custom Comparators ---------------------------- - -The hybrid property also includes a helper that allows construction of -custom comparators. A comparator object allows one to customize the -behavior of each SQLAlchemy expression operator individually. They -are useful when creating custom types that have some highly -idiosyncratic behavior on the SQL side. - -The example class below allows case-insensitive comparisons on the attribute -named ``word_insensitive``:: - - from sqlalchemy.ext.hybrid import Comparator, hybrid_property - from sqlalchemy import func, Column, Integer, String - from sqlalchemy.orm import Session - from sqlalchemy.ext.declarative import declarative_base - - Base = declarative_base() - - class CaseInsensitiveComparator(Comparator): - def __eq__(self, other): - return func.lower(self.__clause_element__()) == func.lower(other) - - class SearchWord(Base): - __tablename__ = 'searchword' - id = Column(Integer, primary_key=True) - word = Column(String(255), nullable=False) - - @hybrid_property - def word_insensitive(self): - return self.word.lower() - - @word_insensitive.comparator - def word_insensitive(cls): - return CaseInsensitiveComparator(cls.word) - -Above, SQL expressions against ``word_insensitive`` will apply the ``LOWER()`` -SQL function to both sides:: - - >>> print Session().query(SearchWord).filter_by(word_insensitive="Trucks") - SELECT searchword.id AS searchword_id, searchword.word AS searchword_word - FROM searchword - WHERE lower(searchword.word) = lower(:lower_1) - -The ``CaseInsensitiveComparator`` above implements part of the -:class:`.ColumnOperators` interface. A "coercion" operation like -lowercasing can be applied to all comparison operations (i.e. ``eq``, -``lt``, ``gt``, etc.) using :meth:`.Operators.operate`:: - - class CaseInsensitiveComparator(Comparator): - def operate(self, op, other): - return op(func.lower(self.__clause_element__()), func.lower(other)) - -Hybrid Value Objects --------------------- - -Note in our previous example, if we were to compare the -``word_insensitive`` attribute of a ``SearchWord`` instance to a plain -Python string, the plain Python string would not be coerced to lower -case - the ``CaseInsensitiveComparator`` we built, being returned by -``@word_insensitive.comparator``, only applies to the SQL side. - -A more comprehensive form of the custom comparator is to construct a -*Hybrid Value Object*. This technique applies the target value or -expression to a value object which is then returned by the accessor in -all cases. The value object allows control of all operations upon -the value as well as how compared values are treated, both on the SQL -expression side as well as the Python value side. Replacing the -previous ``CaseInsensitiveComparator`` class with a new -``CaseInsensitiveWord`` class:: - - class CaseInsensitiveWord(Comparator): - "Hybrid value representing a lower case representation of a word." - - def __init__(self, word): - if isinstance(word, basestring): - self.word = word.lower() - elif isinstance(word, CaseInsensitiveWord): - self.word = word.word - else: - self.word = func.lower(word) - - def operate(self, op, other): - if not isinstance(other, CaseInsensitiveWord): - other = CaseInsensitiveWord(other) - return op(self.word, other.word) - - def __clause_element__(self): - return self.word - - def __str__(self): - return self.word - - key = 'word' - "Label to apply to Query tuple results" - -Above, the ``CaseInsensitiveWord`` object represents ``self.word``, -which may be a SQL function, or may be a Python native. By -overriding ``operate()`` and ``__clause_element__()`` to work in terms -of ``self.word``, all comparison operations will work against the -"converted" form of ``word``, whether it be SQL side or Python side. -Our ``SearchWord`` class can now deliver the ``CaseInsensitiveWord`` -object unconditionally from a single hybrid call:: - - class SearchWord(Base): - __tablename__ = 'searchword' - id = Column(Integer, primary_key=True) - word = Column(String(255), nullable=False) - - @hybrid_property - def word_insensitive(self): - return CaseInsensitiveWord(self.word) - -The ``word_insensitive`` attribute now has case-insensitive comparison -behavior universally, including SQL expression vs. Python expression -(note the Python value is converted to lower case on the Python side -here):: - - >>> print Session().query(SearchWord).filter_by(word_insensitive="Trucks") - SELECT searchword.id AS searchword_id, searchword.word AS searchword_word - FROM searchword - WHERE lower(searchword.word) = :lower_1 - -SQL expression versus SQL expression:: - - >>> sw1 = aliased(SearchWord) - >>> sw2 = aliased(SearchWord) - >>> print Session().query( - ... sw1.word_insensitive, - ... sw2.word_insensitive).\\ - ... filter( - ... sw1.word_insensitive > sw2.word_insensitive - ... ) - SELECT lower(searchword_1.word) AS lower_1, - lower(searchword_2.word) AS lower_2 - FROM searchword AS searchword_1, searchword AS searchword_2 - WHERE lower(searchword_1.word) > lower(searchword_2.word) - -Python only expression:: - - >>> ws1 = SearchWord(word="SomeWord") - >>> ws1.word_insensitive == "sOmEwOrD" - True - >>> ws1.word_insensitive == "XOmEwOrX" - False - >>> print ws1.word_insensitive - someword - -The Hybrid Value pattern is very useful for any kind of value that may -have multiple representations, such as timestamps, time deltas, units -of measurement, currencies and encrypted passwords. - -.. seealso:: - - `Hybrids and Value Agnostic Types - `_ - - on the techspot.zzzeek.org blog - - `Value Agnostic Types, Part II - `_ - - on the techspot.zzzeek.org blog - -.. _hybrid_transformers: - -Building Transformers ----------------------- - -A *transformer* is an object which can receive a :class:`.Query` -object and return a new one. The :class:`.Query` object includes a -method :meth:`.with_transformation` that returns a new :class:`.Query` -transformed by the given function. - -We can combine this with the :class:`.Comparator` class to produce one type -of recipe which can both set up the FROM clause of a query as well as assign -filtering criterion. - -Consider a mapped class ``Node``, which assembles using adjacency list -into a hierarchical tree pattern:: - - from sqlalchemy import Column, Integer, ForeignKey - from sqlalchemy.orm import relationship - from sqlalchemy.ext.declarative import declarative_base - Base = declarative_base() - - class Node(Base): - __tablename__ = 'node' - id =Column(Integer, primary_key=True) - parent_id = Column(Integer, ForeignKey('node.id')) - parent = relationship("Node", remote_side=id) - -Suppose we wanted to add an accessor ``grandparent``. This would -return the ``parent`` of ``Node.parent``. When we have an instance of -``Node``, this is simple:: - - from sqlalchemy.ext.hybrid import hybrid_property - - class Node(Base): - # ... - - @hybrid_property - def grandparent(self): - return self.parent.parent - -For the expression, things are not so clear. We'd need to construct -a :class:`.Query` where we :meth:`~.Query.join` twice along -``Node.parent`` to get to the ``grandparent``. We can instead return -a transforming callable that we'll combine with the -:class:`.Comparator` class to receive any :class:`.Query` object, and -return a new one that's joined to the ``Node.parent`` attribute and -filtered based on the given criterion:: - - from sqlalchemy.ext.hybrid import Comparator - - class GrandparentTransformer(Comparator): - def operate(self, op, other): - def transform(q): - cls = self.__clause_element__() - parent_alias = aliased(cls) - return q.join(parent_alias, cls.parent).\\ - filter(op(parent_alias.parent, other)) - return transform - - Base = declarative_base() - - class Node(Base): - __tablename__ = 'node' - id =Column(Integer, primary_key=True) - parent_id = Column(Integer, ForeignKey('node.id')) - parent = relationship("Node", remote_side=id) - - @hybrid_property - def grandparent(self): - return self.parent.parent - - @grandparent.comparator - def grandparent(cls): - return GrandparentTransformer(cls) - -The ``GrandparentTransformer`` overrides the core -:meth:`.Operators.operate` method at the base of the -:class:`.Comparator` hierarchy to return a query-transforming -callable, which then runs the given comparison operation in a -particular context. Such as, in the example above, the ``operate`` -method is called, given the :attr:`.Operators.eq` callable as well as -the right side of the comparison ``Node(id=5)``. A function -``transform`` is then returned which will transform a :class:`.Query` -first to join to ``Node.parent``, then to compare ``parent_alias`` -using :attr:`.Operators.eq` against the left and right sides, passing -into :class:`.Query.filter`: - -.. sourcecode:: pycon+sql - - >>> from sqlalchemy.orm import Session - >>> session = Session() - {sql}>>> session.query(Node).\\ - ... with_transformation(Node.grandparent==Node(id=5)).\\ - ... all() - SELECT node.id AS node_id, node.parent_id AS node_parent_id - FROM node JOIN node AS node_1 ON node_1.id = node.parent_id - WHERE :param_1 = node_1.parent_id - {stop} - -We can modify the pattern to be more verbose but flexible by separating -the "join" step from the "filter" step. The tricky part here is ensuring -that successive instances of ``GrandparentTransformer`` use the same -:class:`.AliasedClass` object against ``Node``. Below we use a simple -memoizing approach that associates a ``GrandparentTransformer`` -with each class:: - - class Node(Base): - - # ... - - @grandparent.comparator - def grandparent(cls): - # memoize a GrandparentTransformer - # per class - if '_gp' not in cls.__dict__: - cls._gp = GrandparentTransformer(cls) - return cls._gp - - class GrandparentTransformer(Comparator): - - def __init__(self, cls): - self.parent_alias = aliased(cls) - - @property - def join(self): - def go(q): - return q.join(self.parent_alias, Node.parent) - return go - - def operate(self, op, other): - return op(self.parent_alias.parent, other) - -.. sourcecode:: pycon+sql - - {sql}>>> session.query(Node).\\ - ... with_transformation(Node.grandparent.join).\\ - ... filter(Node.grandparent==Node(id=5)) - SELECT node.id AS node_id, node.parent_id AS node_parent_id - FROM node JOIN node AS node_1 ON node_1.id = node.parent_id - WHERE :param_1 = node_1.parent_id - {stop} - -The "transformer" pattern is an experimental pattern that starts -to make usage of some functional programming paradigms. -While it's only recommended for advanced and/or patient developers, -there's probably a whole lot of amazing things it can be used for. - -""" -from .. import util -from ..orm import attributes, interfaces - -HYBRID_METHOD = util.symbol('HYBRID_METHOD') -"""Symbol indicating an :class:`InspectionAttr` that's - of type :class:`.hybrid_method`. - - Is assigned to the :attr:`.InspectionAttr.extension_type` - attibute. - - .. seealso:: - - :attr:`.Mapper.all_orm_attributes` - -""" - -HYBRID_PROPERTY = util.symbol('HYBRID_PROPERTY') -"""Symbol indicating an :class:`InspectionAttr` that's - of type :class:`.hybrid_method`. - - Is assigned to the :attr:`.InspectionAttr.extension_type` - attibute. - - .. seealso:: - - :attr:`.Mapper.all_orm_attributes` - -""" - - -class hybrid_method(interfaces.InspectionAttrInfo): - """A decorator which allows definition of a Python object method with both - instance-level and class-level behavior. - - """ - - is_attribute = True - extension_type = HYBRID_METHOD - - def __init__(self, func, expr=None): - """Create a new :class:`.hybrid_method`. - - Usage is typically via decorator:: - - from sqlalchemy.ext.hybrid import hybrid_method - - class SomeClass(object): - @hybrid_method - def value(self, x, y): - return self._value + x + y - - @value.expression - def value(self, x, y): - return func.some_function(self._value, x, y) - - """ - self.func = func - self.expr = expr or func - - def __get__(self, instance, owner): - if instance is None: - return self.expr.__get__(owner, owner.__class__) - else: - return self.func.__get__(instance, owner) - - def expression(self, expr): - """Provide a modifying decorator that defines a - SQL-expression producing method.""" - - self.expr = expr - return self - - -class hybrid_property(interfaces.InspectionAttrInfo): - """A decorator which allows definition of a Python descriptor with both - instance-level and class-level behavior. - - """ - - is_attribute = True - extension_type = HYBRID_PROPERTY - - def __init__(self, fget, fset=None, fdel=None, expr=None): - """Create a new :class:`.hybrid_property`. - - Usage is typically via decorator:: - - from sqlalchemy.ext.hybrid import hybrid_property - - class SomeClass(object): - @hybrid_property - def value(self): - return self._value - - @value.setter - def value(self, value): - self._value = value - - """ - self.fget = fget - self.fset = fset - self.fdel = fdel - self.expr = expr or fget - util.update_wrapper(self, fget) - - def __get__(self, instance, owner): - if instance is None: - return self.expr(owner) - else: - return self.fget(instance) - - def __set__(self, instance, value): - if self.fset is None: - raise AttributeError("can't set attribute") - self.fset(instance, value) - - def __delete__(self, instance): - if self.fdel is None: - raise AttributeError("can't delete attribute") - self.fdel(instance) - - def setter(self, fset): - """Provide a modifying decorator that defines a value-setter method.""" - - self.fset = fset - return self - - def deleter(self, fdel): - """Provide a modifying decorator that defines a - value-deletion method.""" - - self.fdel = fdel - return self - - def expression(self, expr): - """Provide a modifying decorator that defines a SQL-expression - producing method.""" - - self.expr = expr - return self - - def comparator(self, comparator): - """Provide a modifying decorator that defines a custom - comparator producing method. - - The return value of the decorated method should be an instance of - :class:`~.hybrid.Comparator`. - - """ - - proxy_attr = attributes.\ - create_proxied_attribute(self) - - def expr(owner): - return proxy_attr(owner, self.__name__, self, comparator(owner)) - self.expr = expr - return self - - -class Comparator(interfaces.PropComparator): - """A helper class that allows easy construction of custom - :class:`~.orm.interfaces.PropComparator` - classes for usage with hybrids.""" - - property = None - - def __init__(self, expression): - self.expression = expression - - def __clause_element__(self): - expr = self.expression - while hasattr(expr, '__clause_element__'): - expr = expr.__clause_element__() - return expr - - def adapt_to_entity(self, adapt_to_entity): - # interesting.... - return self diff --git a/python/sqlalchemy/ext/instrumentation.py b/python/sqlalchemy/ext/instrumentation.py deleted file mode 100644 index 30a0ab7d..00000000 --- a/python/sqlalchemy/ext/instrumentation.py +++ /dev/null @@ -1,414 +0,0 @@ -"""Extensible class instrumentation. - -The :mod:`sqlalchemy.ext.instrumentation` package provides for alternate -systems of class instrumentation within the ORM. Class instrumentation -refers to how the ORM places attributes on the class which maintain -data and track changes to that data, as well as event hooks installed -on the class. - -.. note:: - The extension package is provided for the benefit of integration - with other object management packages, which already perform - their own instrumentation. It is not intended for general use. - -For examples of how the instrumentation extension is used, -see the example :ref:`examples_instrumentation`. - -.. versionchanged:: 0.8 - The :mod:`sqlalchemy.orm.instrumentation` was split out so - that all functionality having to do with non-standard - instrumentation was moved out to :mod:`sqlalchemy.ext.instrumentation`. - When imported, the module installs itself within - :mod:`sqlalchemy.orm.instrumentation` so that it - takes effect, including recognition of - ``__sa_instrumentation_manager__`` on mapped classes, as - well :data:`.instrumentation_finders` - being used to determine class instrumentation resolution. - -""" -from ..orm import instrumentation as orm_instrumentation -from ..orm.instrumentation import ( - ClassManager, InstrumentationFactory, _default_state_getter, - _default_dict_getter, _default_manager_getter -) -from ..orm import attributes, collections, base as orm_base -from .. import util -from ..orm import exc as orm_exc -import weakref - -INSTRUMENTATION_MANAGER = '__sa_instrumentation_manager__' -"""Attribute, elects custom instrumentation when present on a mapped class. - -Allows a class to specify a slightly or wildly different technique for -tracking changes made to mapped attributes and collections. - -Only one instrumentation implementation is allowed in a given object -inheritance hierarchy. - -The value of this attribute must be a callable and will be passed a class -object. The callable must return one of: - - - An instance of an InstrumentationManager or subclass - - An object implementing all or some of InstrumentationManager (TODO) - - A dictionary of callables, implementing all or some of the above (TODO) - - An instance of a ClassManager or subclass - -This attribute is consulted by SQLAlchemy instrumentation -resolution, once the :mod:`sqlalchemy.ext.instrumentation` module -has been imported. If custom finders are installed in the global -instrumentation_finders list, they may or may not choose to honor this -attribute. - -""" - - -def find_native_user_instrumentation_hook(cls): - """Find user-specified instrumentation management for a class.""" - return getattr(cls, INSTRUMENTATION_MANAGER, None) - -instrumentation_finders = [find_native_user_instrumentation_hook] -"""An extensible sequence of callables which return instrumentation -implementations - -When a class is registered, each callable will be passed a class object. -If None is returned, the -next finder in the sequence is consulted. Otherwise the return must be an -instrumentation factory that follows the same guidelines as -sqlalchemy.ext.instrumentation.INSTRUMENTATION_MANAGER. - -By default, the only finder is find_native_user_instrumentation_hook, which -searches for INSTRUMENTATION_MANAGER. If all finders return None, standard -ClassManager instrumentation is used. - -""" - - -class ExtendedInstrumentationRegistry(InstrumentationFactory): - """Extends :class:`.InstrumentationFactory` with additional - bookkeeping, to accommodate multiple types of - class managers. - - """ - _manager_finders = weakref.WeakKeyDictionary() - _state_finders = weakref.WeakKeyDictionary() - _dict_finders = weakref.WeakKeyDictionary() - _extended = False - - def _locate_extended_factory(self, class_): - for finder in instrumentation_finders: - factory = finder(class_) - if factory is not None: - manager = self._extended_class_manager(class_, factory) - return manager, factory - else: - return None, None - - def _check_conflicts(self, class_, factory): - existing_factories = self._collect_management_factories_for(class_).\ - difference([factory]) - if existing_factories: - raise TypeError( - "multiple instrumentation implementations specified " - "in %s inheritance hierarchy: %r" % ( - class_.__name__, list(existing_factories))) - - def _extended_class_manager(self, class_, factory): - manager = factory(class_) - if not isinstance(manager, ClassManager): - manager = _ClassInstrumentationAdapter(class_, manager) - - if factory != ClassManager and not self._extended: - # somebody invoked a custom ClassManager. - # reinstall global "getter" functions with the more - # expensive ones. - self._extended = True - _install_instrumented_lookups() - - self._manager_finders[class_] = manager.manager_getter() - self._state_finders[class_] = manager.state_getter() - self._dict_finders[class_] = manager.dict_getter() - return manager - - def _collect_management_factories_for(self, cls): - """Return a collection of factories in play or specified for a - hierarchy. - - Traverses the entire inheritance graph of a cls and returns a - collection of instrumentation factories for those classes. Factories - are extracted from active ClassManagers, if available, otherwise - instrumentation_finders is consulted. - - """ - hierarchy = util.class_hierarchy(cls) - factories = set() - for member in hierarchy: - manager = self.manager_of_class(member) - if manager is not None: - factories.add(manager.factory) - else: - for finder in instrumentation_finders: - factory = finder(member) - if factory is not None: - break - else: - factory = None - factories.add(factory) - factories.discard(None) - return factories - - def unregister(self, class_): - if class_ in self._manager_finders: - del self._manager_finders[class_] - del self._state_finders[class_] - del self._dict_finders[class_] - super(ExtendedInstrumentationRegistry, self).unregister(class_) - - def manager_of_class(self, cls): - if cls is None: - return None - try: - finder = self._manager_finders.get(cls, _default_manager_getter) - except TypeError: - # due to weakref lookup on invalid object - return None - else: - return finder(cls) - - def state_of(self, instance): - if instance is None: - raise AttributeError("None has no persistent state.") - return self._state_finders.get( - instance.__class__, _default_state_getter)(instance) - - def dict_of(self, instance): - if instance is None: - raise AttributeError("None has no persistent state.") - return self._dict_finders.get( - instance.__class__, _default_dict_getter)(instance) - - -orm_instrumentation._instrumentation_factory = \ - _instrumentation_factory = ExtendedInstrumentationRegistry() -orm_instrumentation.instrumentation_finders = instrumentation_finders - - -class InstrumentationManager(object): - """User-defined class instrumentation extension. - - :class:`.InstrumentationManager` can be subclassed in order - to change - how class instrumentation proceeds. This class exists for - the purposes of integration with other object management - frameworks which would like to entirely modify the - instrumentation methodology of the ORM, and is not intended - for regular usage. For interception of class instrumentation - events, see :class:`.InstrumentationEvents`. - - The API for this class should be considered as semi-stable, - and may change slightly with new releases. - - .. versionchanged:: 0.8 - :class:`.InstrumentationManager` was moved from - :mod:`sqlalchemy.orm.instrumentation` to - :mod:`sqlalchemy.ext.instrumentation`. - - """ - - # r4361 added a mandatory (cls) constructor to this interface. - # given that, perhaps class_ should be dropped from all of these - # signatures. - - def __init__(self, class_): - pass - - def manage(self, class_, manager): - setattr(class_, '_default_class_manager', manager) - - def dispose(self, class_, manager): - delattr(class_, '_default_class_manager') - - def manager_getter(self, class_): - def get(cls): - return cls._default_class_manager - return get - - def instrument_attribute(self, class_, key, inst): - pass - - def post_configure_attribute(self, class_, key, inst): - pass - - def install_descriptor(self, class_, key, inst): - setattr(class_, key, inst) - - def uninstall_descriptor(self, class_, key): - delattr(class_, key) - - def install_member(self, class_, key, implementation): - setattr(class_, key, implementation) - - def uninstall_member(self, class_, key): - delattr(class_, key) - - def instrument_collection_class(self, class_, key, collection_class): - return collections.prepare_instrumentation(collection_class) - - def get_instance_dict(self, class_, instance): - return instance.__dict__ - - def initialize_instance_dict(self, class_, instance): - pass - - def install_state(self, class_, instance, state): - setattr(instance, '_default_state', state) - - def remove_state(self, class_, instance): - delattr(instance, '_default_state') - - def state_getter(self, class_): - return lambda instance: getattr(instance, '_default_state') - - def dict_getter(self, class_): - return lambda inst: self.get_instance_dict(class_, inst) - - -class _ClassInstrumentationAdapter(ClassManager): - """Adapts a user-defined InstrumentationManager to a ClassManager.""" - - def __init__(self, class_, override): - self._adapted = override - self._get_state = self._adapted.state_getter(class_) - self._get_dict = self._adapted.dict_getter(class_) - - ClassManager.__init__(self, class_) - - def manage(self): - self._adapted.manage(self.class_, self) - - def dispose(self): - self._adapted.dispose(self.class_) - - def manager_getter(self): - return self._adapted.manager_getter(self.class_) - - def instrument_attribute(self, key, inst, propagated=False): - ClassManager.instrument_attribute(self, key, inst, propagated) - if not propagated: - self._adapted.instrument_attribute(self.class_, key, inst) - - def post_configure_attribute(self, key): - super(_ClassInstrumentationAdapter, self).post_configure_attribute(key) - self._adapted.post_configure_attribute(self.class_, key, self[key]) - - def install_descriptor(self, key, inst): - self._adapted.install_descriptor(self.class_, key, inst) - - def uninstall_descriptor(self, key): - self._adapted.uninstall_descriptor(self.class_, key) - - def install_member(self, key, implementation): - self._adapted.install_member(self.class_, key, implementation) - - def uninstall_member(self, key): - self._adapted.uninstall_member(self.class_, key) - - def instrument_collection_class(self, key, collection_class): - return self._adapted.instrument_collection_class( - self.class_, key, collection_class) - - def initialize_collection(self, key, state, factory): - delegate = getattr(self._adapted, 'initialize_collection', None) - if delegate: - return delegate(key, state, factory) - else: - return ClassManager.initialize_collection(self, key, - state, factory) - - def new_instance(self, state=None): - instance = self.class_.__new__(self.class_) - self.setup_instance(instance, state) - return instance - - def _new_state_if_none(self, instance): - """Install a default InstanceState if none is present. - - A private convenience method used by the __init__ decorator. - """ - if self.has_state(instance): - return False - else: - return self.setup_instance(instance) - - def setup_instance(self, instance, state=None): - self._adapted.initialize_instance_dict(self.class_, instance) - - if state is None: - state = self._state_constructor(instance, self) - - # the given instance is assumed to have no state - self._adapted.install_state(self.class_, instance, state) - return state - - def teardown_instance(self, instance): - self._adapted.remove_state(self.class_, instance) - - def has_state(self, instance): - try: - self._get_state(instance) - except orm_exc.NO_STATE: - return False - else: - return True - - def state_getter(self): - return self._get_state - - def dict_getter(self): - return self._get_dict - - -def _install_instrumented_lookups(): - """Replace global class/object management functions - with ExtendedInstrumentationRegistry implementations, which - allow multiple types of class managers to be present, - at the cost of performance. - - This function is called only by ExtendedInstrumentationRegistry - and unit tests specific to this behavior. - - The _reinstall_default_lookups() function can be called - after this one to re-establish the default functions. - - """ - _install_lookups( - dict( - instance_state=_instrumentation_factory.state_of, - instance_dict=_instrumentation_factory.dict_of, - manager_of_class=_instrumentation_factory.manager_of_class - ) - ) - - -def _reinstall_default_lookups(): - """Restore simplified lookups.""" - _install_lookups( - dict( - instance_state=_default_state_getter, - instance_dict=_default_dict_getter, - manager_of_class=_default_manager_getter - ) - ) - _instrumentation_factory._extended = False - - -def _install_lookups(lookups): - global instance_state, instance_dict, manager_of_class - instance_state = lookups['instance_state'] - instance_dict = lookups['instance_dict'] - manager_of_class = lookups['manager_of_class'] - orm_base.instance_state = attributes.instance_state = \ - orm_instrumentation.instance_state = instance_state - orm_base.instance_dict = attributes.instance_dict = \ - orm_instrumentation.instance_dict = instance_dict - orm_base.manager_of_class = attributes.manager_of_class = \ - orm_instrumentation.manager_of_class = manager_of_class diff --git a/python/sqlalchemy/ext/mutable.py b/python/sqlalchemy/ext/mutable.py deleted file mode 100644 index 501b18f3..00000000 --- a/python/sqlalchemy/ext/mutable.py +++ /dev/null @@ -1,679 +0,0 @@ -# ext/mutable.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Provide support for tracking of in-place changes to scalar values, -which are propagated into ORM change events on owning parent objects. - -.. versionadded:: 0.7 :mod:`sqlalchemy.ext.mutable` replaces SQLAlchemy's - legacy approach to in-place mutations of scalar values; see - :ref:`07_migration_mutation_extension`. - -.. _mutable_scalars: - -Establishing Mutability on Scalar Column Values -=============================================== - -A typical example of a "mutable" structure is a Python dictionary. -Following the example introduced in :ref:`types_toplevel`, we -begin with a custom type that marshals Python dictionaries into -JSON strings before being persisted:: - - from sqlalchemy.types import TypeDecorator, VARCHAR - import json - - class JSONEncodedDict(TypeDecorator): - "Represents an immutable structure as a json-encoded string." - - impl = VARCHAR - - def process_bind_param(self, value, dialect): - if value is not None: - value = json.dumps(value) - return value - - def process_result_value(self, value, dialect): - if value is not None: - value = json.loads(value) - return value - -The usage of ``json`` is only for the purposes of example. The -:mod:`sqlalchemy.ext.mutable` extension can be used -with any type whose target Python type may be mutable, including -:class:`.PickleType`, :class:`.postgresql.ARRAY`, etc. - -When using the :mod:`sqlalchemy.ext.mutable` extension, the value itself -tracks all parents which reference it. Below, we illustrate the a simple -version of the :class:`.MutableDict` dictionary object, which applies -the :class:`.Mutable` mixin to a plain Python dictionary:: - - from sqlalchemy.ext.mutable import Mutable - - class MutableDict(Mutable, dict): - @classmethod - def coerce(cls, key, value): - "Convert plain dictionaries to MutableDict." - - if not isinstance(value, MutableDict): - if isinstance(value, dict): - return MutableDict(value) - - # this call will raise ValueError - return Mutable.coerce(key, value) - else: - return value - - def __setitem__(self, key, value): - "Detect dictionary set events and emit change events." - - dict.__setitem__(self, key, value) - self.changed() - - def __delitem__(self, key): - "Detect dictionary del events and emit change events." - - dict.__delitem__(self, key) - self.changed() - -The above dictionary class takes the approach of subclassing the Python -built-in ``dict`` to produce a dict -subclass which routes all mutation events through ``__setitem__``. There are -variants on this approach, such as subclassing ``UserDict.UserDict`` or -``collections.MutableMapping``; the part that's important to this example is -that the :meth:`.Mutable.changed` method is called whenever an in-place -change to the datastructure takes place. - -We also redefine the :meth:`.Mutable.coerce` method which will be used to -convert any values that are not instances of ``MutableDict``, such -as the plain dictionaries returned by the ``json`` module, into the -appropriate type. Defining this method is optional; we could just as well -created our ``JSONEncodedDict`` such that it always returns an instance -of ``MutableDict``, and additionally ensured that all calling code -uses ``MutableDict`` explicitly. When :meth:`.Mutable.coerce` is not -overridden, any values applied to a parent object which are not instances -of the mutable type will raise a ``ValueError``. - -Our new ``MutableDict`` type offers a class method -:meth:`~.Mutable.as_mutable` which we can use within column metadata -to associate with types. This method grabs the given type object or -class and associates a listener that will detect all future mappings -of this type, applying event listening instrumentation to the mapped -attribute. Such as, with classical table metadata:: - - from sqlalchemy import Table, Column, Integer - - my_data = Table('my_data', metadata, - Column('id', Integer, primary_key=True), - Column('data', MutableDict.as_mutable(JSONEncodedDict)) - ) - -Above, :meth:`~.Mutable.as_mutable` returns an instance of ``JSONEncodedDict`` -(if the type object was not an instance already), which will intercept any -attributes which are mapped against this type. Below we establish a simple -mapping against the ``my_data`` table:: - - from sqlalchemy import mapper - - class MyDataClass(object): - pass - - # associates mutation listeners with MyDataClass.data - mapper(MyDataClass, my_data) - -The ``MyDataClass.data`` member will now be notified of in place changes -to its value. - -There's no difference in usage when using declarative:: - - from sqlalchemy.ext.declarative import declarative_base - - Base = declarative_base() - - class MyDataClass(Base): - __tablename__ = 'my_data' - id = Column(Integer, primary_key=True) - data = Column(MutableDict.as_mutable(JSONEncodedDict)) - -Any in-place changes to the ``MyDataClass.data`` member -will flag the attribute as "dirty" on the parent object:: - - >>> from sqlalchemy.orm import Session - - >>> sess = Session() - >>> m1 = MyDataClass(data={'value1':'foo'}) - >>> sess.add(m1) - >>> sess.commit() - - >>> m1.data['value1'] = 'bar' - >>> assert m1 in sess.dirty - True - -The ``MutableDict`` can be associated with all future instances -of ``JSONEncodedDict`` in one step, using -:meth:`~.Mutable.associate_with`. This is similar to -:meth:`~.Mutable.as_mutable` except it will intercept all occurrences -of ``MutableDict`` in all mappings unconditionally, without -the need to declare it individually:: - - MutableDict.associate_with(JSONEncodedDict) - - class MyDataClass(Base): - __tablename__ = 'my_data' - id = Column(Integer, primary_key=True) - data = Column(JSONEncodedDict) - - -Supporting Pickling --------------------- - -The key to the :mod:`sqlalchemy.ext.mutable` extension relies upon the -placement of a ``weakref.WeakKeyDictionary`` upon the value object, which -stores a mapping of parent mapped objects keyed to the attribute name under -which they are associated with this value. ``WeakKeyDictionary`` objects are -not picklable, due to the fact that they contain weakrefs and function -callbacks. In our case, this is a good thing, since if this dictionary were -picklable, it could lead to an excessively large pickle size for our value -objects that are pickled by themselves outside of the context of the parent. -The developer responsibility here is only to provide a ``__getstate__`` method -that excludes the :meth:`~MutableBase._parents` collection from the pickle -stream:: - - class MyMutableType(Mutable): - def __getstate__(self): - d = self.__dict__.copy() - d.pop('_parents', None) - return d - -With our dictionary example, we need to return the contents of the dict itself -(and also restore them on __setstate__):: - - class MutableDict(Mutable, dict): - # .... - - def __getstate__(self): - return dict(self) - - def __setstate__(self, state): - self.update(state) - -In the case that our mutable value object is pickled as it is attached to one -or more parent objects that are also part of the pickle, the :class:`.Mutable` -mixin will re-establish the :attr:`.Mutable._parents` collection on each value -object as the owning parents themselves are unpickled. - -.. _mutable_composites: - -Establishing Mutability on Composites -===================================== - -Composites are a special ORM feature which allow a single scalar attribute to -be assigned an object value which represents information "composed" from one -or more columns from the underlying mapped table. The usual example is that of -a geometric "point", and is introduced in :ref:`mapper_composite`. - -.. versionchanged:: 0.7 - The internals of :func:`.orm.composite` have been - greatly simplified and in-place mutation detection is no longer enabled by - default; instead, the user-defined value must detect changes on its own and - propagate them to all owning parents. The :mod:`sqlalchemy.ext.mutable` - extension provides the helper class :class:`.MutableComposite`, which is a - slight variant on the :class:`.Mutable` class. - -As is the case with :class:`.Mutable`, the user-defined composite class -subclasses :class:`.MutableComposite` as a mixin, and detects and delivers -change events to its parents via the :meth:`.MutableComposite.changed` method. -In the case of a composite class, the detection is usually via the usage of -Python descriptors (i.e. ``@property``), or alternatively via the special -Python method ``__setattr__()``. Below we expand upon the ``Point`` class -introduced in :ref:`mapper_composite` to subclass :class:`.MutableComposite` -and to also route attribute set events via ``__setattr__`` to the -:meth:`.MutableComposite.changed` method:: - - from sqlalchemy.ext.mutable import MutableComposite - - class Point(MutableComposite): - def __init__(self, x, y): - self.x = x - self.y = y - - def __setattr__(self, key, value): - "Intercept set events" - - # set the attribute - object.__setattr__(self, key, value) - - # alert all parents to the change - self.changed() - - def __composite_values__(self): - return self.x, self.y - - def __eq__(self, other): - return isinstance(other, Point) and \\ - other.x == self.x and \\ - other.y == self.y - - def __ne__(self, other): - return not self.__eq__(other) - -The :class:`.MutableComposite` class uses a Python metaclass to automatically -establish listeners for any usage of :func:`.orm.composite` that specifies our -``Point`` type. Below, when ``Point`` is mapped to the ``Vertex`` class, -listeners are established which will route change events from ``Point`` -objects to each of the ``Vertex.start`` and ``Vertex.end`` attributes:: - - from sqlalchemy.orm import composite, mapper - from sqlalchemy import Table, Column - - vertices = Table('vertices', metadata, - Column('id', Integer, primary_key=True), - Column('x1', Integer), - Column('y1', Integer), - Column('x2', Integer), - Column('y2', Integer), - ) - - class Vertex(object): - pass - - mapper(Vertex, vertices, properties={ - 'start': composite(Point, vertices.c.x1, vertices.c.y1), - 'end': composite(Point, vertices.c.x2, vertices.c.y2) - }) - -Any in-place changes to the ``Vertex.start`` or ``Vertex.end`` members -will flag the attribute as "dirty" on the parent object:: - - >>> from sqlalchemy.orm import Session - - >>> sess = Session() - >>> v1 = Vertex(start=Point(3, 4), end=Point(12, 15)) - >>> sess.add(v1) - >>> sess.commit() - - >>> v1.end.x = 8 - >>> assert v1 in sess.dirty - True - -Coercing Mutable Composites ---------------------------- - -The :meth:`.MutableBase.coerce` method is also supported on composite types. -In the case of :class:`.MutableComposite`, the :meth:`.MutableBase.coerce` -method is only called for attribute set operations, not load operations. -Overriding the :meth:`.MutableBase.coerce` method is essentially equivalent -to using a :func:`.validates` validation routine for all attributes which -make use of the custom composite type:: - - class Point(MutableComposite): - # other Point methods - # ... - - def coerce(cls, key, value): - if isinstance(value, tuple): - value = Point(*value) - elif not isinstance(value, Point): - raise ValueError("tuple or Point expected") - return value - -.. versionadded:: 0.7.10,0.8.0b2 - Support for the :meth:`.MutableBase.coerce` method in conjunction with - objects of type :class:`.MutableComposite`. - -Supporting Pickling --------------------- - -As is the case with :class:`.Mutable`, the :class:`.MutableComposite` helper -class uses a ``weakref.WeakKeyDictionary`` available via the -:meth:`MutableBase._parents` attribute which isn't picklable. If we need to -pickle instances of ``Point`` or its owning class ``Vertex``, we at least need -to define a ``__getstate__`` that doesn't include the ``_parents`` dictionary. -Below we define both a ``__getstate__`` and a ``__setstate__`` that package up -the minimal form of our ``Point`` class:: - - class Point(MutableComposite): - # ... - - def __getstate__(self): - return self.x, self.y - - def __setstate__(self, state): - self.x, self.y = state - -As with :class:`.Mutable`, the :class:`.MutableComposite` augments the -pickling process of the parent's object-relational state so that the -:meth:`MutableBase._parents` collection is restored to all ``Point`` objects. - -""" -from ..orm.attributes import flag_modified -from .. import event, types -from ..orm import mapper, object_mapper, Mapper -from ..util import memoized_property -import weakref - - -class MutableBase(object): - """Common base class to :class:`.Mutable` - and :class:`.MutableComposite`. - - """ - - @memoized_property - def _parents(self): - """Dictionary of parent object->attribute name on the parent. - - This attribute is a so-called "memoized" property. It initializes - itself with a new ``weakref.WeakKeyDictionary`` the first time - it is accessed, returning the same object upon subsequent access. - - """ - - return weakref.WeakKeyDictionary() - - @classmethod - def coerce(cls, key, value): - """Given a value, coerce it into the target type. - - Can be overridden by custom subclasses to coerce incoming - data into a particular type. - - By default, raises ``ValueError``. - - This method is called in different scenarios depending on if - the parent class is of type :class:`.Mutable` or of type - :class:`.MutableComposite`. In the case of the former, it is called - for both attribute-set operations as well as during ORM loading - operations. For the latter, it is only called during attribute-set - operations; the mechanics of the :func:`.composite` construct - handle coercion during load operations. - - - :param key: string name of the ORM-mapped attribute being set. - :param value: the incoming value. - :return: the method should return the coerced value, or raise - ``ValueError`` if the coercion cannot be completed. - - """ - if value is None: - return None - msg = "Attribute '%s' does not accept objects of type %s" - raise ValueError(msg % (key, type(value))) - - @classmethod - def _get_listen_keys(cls, attribute): - """Given a descriptor attribute, return a ``set()`` of the attribute - keys which indicate a change in the state of this attribute. - - This is normally just ``set([attribute.key])``, but can be overridden - to provide for additional keys. E.g. a :class:`.MutableComposite` - augments this set with the attribute keys associated with the columns - that comprise the composite value. - - This collection is consulted in the case of intercepting the - :meth:`.InstanceEvents.refresh` and - :meth:`.InstanceEvents.refresh_flush` events, which pass along a list - of attribute names that have been refreshed; the list is compared - against this set to determine if action needs to be taken. - - .. versionadded:: 1.0.5 - - """ - return set([attribute.key]) - - @classmethod - def _listen_on_attribute(cls, attribute, coerce, parent_cls): - """Establish this type as a mutation listener for the given - mapped descriptor. - - """ - key = attribute.key - if parent_cls is not attribute.class_: - return - - # rely on "propagate" here - parent_cls = attribute.class_ - - listen_keys = cls._get_listen_keys(attribute) - - def load(state, *args): - """Listen for objects loaded or refreshed. - - Wrap the target data member's value with - ``Mutable``. - - """ - val = state.dict.get(key, None) - if val is not None: - if coerce: - val = cls.coerce(key, val) - state.dict[key] = val - val._parents[state.obj()] = key - - def load_attrs(state, ctx, attrs): - if not attrs or listen_keys.intersection(attrs): - load(state) - - def set(target, value, oldvalue, initiator): - """Listen for set/replace events on the target - data member. - - Establish a weak reference to the parent object - on the incoming value, remove it for the one - outgoing. - - """ - if value is oldvalue: - return value - - if not isinstance(value, cls): - value = cls.coerce(key, value) - if value is not None: - value._parents[target.obj()] = key - if isinstance(oldvalue, cls): - oldvalue._parents.pop(target.obj(), None) - return value - - def pickle(state, state_dict): - val = state.dict.get(key, None) - if val is not None: - if 'ext.mutable.values' not in state_dict: - state_dict['ext.mutable.values'] = [] - state_dict['ext.mutable.values'].append(val) - - def unpickle(state, state_dict): - if 'ext.mutable.values' in state_dict: - for val in state_dict['ext.mutable.values']: - val._parents[state.obj()] = key - - event.listen(parent_cls, 'load', load, - raw=True, propagate=True) - event.listen(parent_cls, 'refresh', load_attrs, - raw=True, propagate=True) - event.listen(parent_cls, 'refresh_flush', load_attrs, - raw=True, propagate=True) - event.listen(attribute, 'set', set, - raw=True, retval=True, propagate=True) - event.listen(parent_cls, 'pickle', pickle, - raw=True, propagate=True) - event.listen(parent_cls, 'unpickle', unpickle, - raw=True, propagate=True) - - -class Mutable(MutableBase): - """Mixin that defines transparent propagation of change - events to a parent object. - - See the example in :ref:`mutable_scalars` for usage information. - - """ - - def changed(self): - """Subclasses should call this method whenever change events occur.""" - - for parent, key in self._parents.items(): - flag_modified(parent, key) - - @classmethod - def associate_with_attribute(cls, attribute): - """Establish this type as a mutation listener for the given - mapped descriptor. - - """ - cls._listen_on_attribute(attribute, True, attribute.class_) - - @classmethod - def associate_with(cls, sqltype): - """Associate this wrapper with all future mapped columns - of the given type. - - This is a convenience method that calls - ``associate_with_attribute`` automatically. - - .. warning:: - - The listeners established by this method are *global* - to all mappers, and are *not* garbage collected. Only use - :meth:`.associate_with` for types that are permanent to an - application, not with ad-hoc types else this will cause unbounded - growth in memory usage. - - """ - - def listen_for_type(mapper, class_): - for prop in mapper.column_attrs: - if isinstance(prop.columns[0].type, sqltype): - cls.associate_with_attribute(getattr(class_, prop.key)) - - event.listen(mapper, 'mapper_configured', listen_for_type) - - @classmethod - def as_mutable(cls, sqltype): - """Associate a SQL type with this mutable Python type. - - This establishes listeners that will detect ORM mappings against - the given type, adding mutation event trackers to those mappings. - - The type is returned, unconditionally as an instance, so that - :meth:`.as_mutable` can be used inline:: - - Table('mytable', metadata, - Column('id', Integer, primary_key=True), - Column('data', MyMutableType.as_mutable(PickleType)) - ) - - Note that the returned type is always an instance, even if a class - is given, and that only columns which are declared specifically with - that type instance receive additional instrumentation. - - To associate a particular mutable type with all occurrences of a - particular type, use the :meth:`.Mutable.associate_with` classmethod - of the particular :class:`.Mutable` subclass to establish a global - association. - - .. warning:: - - The listeners established by this method are *global* - to all mappers, and are *not* garbage collected. Only use - :meth:`.as_mutable` for types that are permanent to an application, - not with ad-hoc types else this will cause unbounded growth - in memory usage. - - """ - sqltype = types.to_instance(sqltype) - - def listen_for_type(mapper, class_): - for prop in mapper.column_attrs: - if prop.columns[0].type is sqltype: - cls.associate_with_attribute(getattr(class_, prop.key)) - - event.listen(mapper, 'mapper_configured', listen_for_type) - - return sqltype - - -class MutableComposite(MutableBase): - """Mixin that defines transparent propagation of change - events on a SQLAlchemy "composite" object to its - owning parent or parents. - - See the example in :ref:`mutable_composites` for usage information. - - """ - - @classmethod - def _get_listen_keys(cls, attribute): - return set([attribute.key]).union(attribute.property._attribute_keys) - - def changed(self): - """Subclasses should call this method whenever change events occur.""" - - for parent, key in self._parents.items(): - - prop = object_mapper(parent).get_property(key) - for value, attr_name in zip( - self.__composite_values__(), - prop._attribute_keys): - setattr(parent, attr_name, value) - - -def _setup_composite_listener(): - def _listen_for_type(mapper, class_): - for prop in mapper.iterate_properties: - if (hasattr(prop, 'composite_class') and - isinstance(prop.composite_class, type) and - issubclass(prop.composite_class, MutableComposite)): - prop.composite_class._listen_on_attribute( - getattr(class_, prop.key), False, class_) - if not event.contains(Mapper, "mapper_configured", _listen_for_type): - event.listen(Mapper, 'mapper_configured', _listen_for_type) -_setup_composite_listener() - - -class MutableDict(Mutable, dict): - """A dictionary type that implements :class:`.Mutable`. - - .. versionadded:: 0.8 - - """ - - def __setitem__(self, key, value): - """Detect dictionary set events and emit change events.""" - dict.__setitem__(self, key, value) - self.changed() - - def setdefault(self, key, value): - result = dict.setdefault(self, key, value) - self.changed() - return result - - def __delitem__(self, key): - """Detect dictionary del events and emit change events.""" - dict.__delitem__(self, key) - self.changed() - - def update(self, *a, **kw): - dict.update(self, *a, **kw) - self.changed() - - def clear(self): - dict.clear(self) - self.changed() - - @classmethod - def coerce(cls, key, value): - """Convert plain dictionary to instance of this class.""" - if not isinstance(value, cls): - if isinstance(value, dict): - return cls(value) - return Mutable.coerce(key, value) - else: - return value - - def __getstate__(self): - return dict(self) - - def __setstate__(self, state): - self.update(state) diff --git a/python/sqlalchemy/ext/orderinglist.py b/python/sqlalchemy/ext/orderinglist.py deleted file mode 100644 index ac31c7cf..00000000 --- a/python/sqlalchemy/ext/orderinglist.py +++ /dev/null @@ -1,380 +0,0 @@ -# ext/orderinglist.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""A custom list that manages index/position information for contained -elements. - -:author: Jason Kirtland - -``orderinglist`` is a helper for mutable ordered relationships. It will -intercept list operations performed on a :func:`.relationship`-managed -collection and -automatically synchronize changes in list position onto a target scalar -attribute. - -Example: A ``slide`` table, where each row refers to zero or more entries -in a related ``bullet`` table. The bullets within a slide are -displayed in order based on the value of the ``position`` column in the -``bullet`` table. As entries are reordered in memory, the value of the -``position`` attribute should be updated to reflect the new sort order:: - - - Base = declarative_base() - - class Slide(Base): - __tablename__ = 'slide' - - id = Column(Integer, primary_key=True) - name = Column(String) - - bullets = relationship("Bullet", order_by="Bullet.position") - - class Bullet(Base): - __tablename__ = 'bullet' - id = Column(Integer, primary_key=True) - slide_id = Column(Integer, ForeignKey('slide.id')) - position = Column(Integer) - text = Column(String) - -The standard relationship mapping will produce a list-like attribute on each -``Slide`` containing all related ``Bullet`` objects, -but coping with changes in ordering is not handled automatically. -When appending a ``Bullet`` into ``Slide.bullets``, the ``Bullet.position`` -attribute will remain unset until manually assigned. When the ``Bullet`` -is inserted into the middle of the list, the following ``Bullet`` objects -will also need to be renumbered. - -The :class:`.OrderingList` object automates this task, managing the -``position`` attribute on all ``Bullet`` objects in the collection. It is -constructed using the :func:`.ordering_list` factory:: - - from sqlalchemy.ext.orderinglist import ordering_list - - Base = declarative_base() - - class Slide(Base): - __tablename__ = 'slide' - - id = Column(Integer, primary_key=True) - name = Column(String) - - bullets = relationship("Bullet", order_by="Bullet.position", - collection_class=ordering_list('position')) - - class Bullet(Base): - __tablename__ = 'bullet' - id = Column(Integer, primary_key=True) - slide_id = Column(Integer, ForeignKey('slide.id')) - position = Column(Integer) - text = Column(String) - -With the above mapping the ``Bullet.position`` attribute is managed:: - - s = Slide() - s.bullets.append(Bullet()) - s.bullets.append(Bullet()) - s.bullets[1].position - >>> 1 - s.bullets.insert(1, Bullet()) - s.bullets[2].position - >>> 2 - -The :class:`.OrderingList` construct only works with **changes** to a -collection, and not the initial load from the database, and requires that the -list be sorted when loaded. Therefore, be sure to specify ``order_by`` on the -:func:`.relationship` against the target ordering attribute, so that the -ordering is correct when first loaded. - -.. warning:: - - :class:`.OrderingList` only provides limited functionality when a primary - key column or unique column is the target of the sort. Operations - that are unsupported or are problematic include: - - * two entries must trade values. This is not supported directly in the - case of a primary key or unique constraint because it means at least - one row would need to be temporarily removed first, or changed to - a third, neutral value while the switch occurs. - - * an entry must be deleted in order to make room for a new entry. - SQLAlchemy's unit of work performs all INSERTs before DELETEs within a - single flush. In the case of a primary key, it will trade - an INSERT/DELETE of the same primary key for an UPDATE statement in order - to lessen the impact of this limitation, however this does not take place - for a UNIQUE column. - A future feature will allow the "DELETE before INSERT" behavior to be - possible, allevating this limitation, though this feature will require - explicit configuration at the mapper level for sets of columns that - are to be handled in this way. - -:func:`.ordering_list` takes the name of the related object's ordering -attribute as an argument. By default, the zero-based integer index of the -object's position in the :func:`.ordering_list` is synchronized with the -ordering attribute: index 0 will get position 0, index 1 position 1, etc. To -start numbering at 1 or some other integer, provide ``count_from=1``. - - -""" -from ..orm.collections import collection, collection_adapter -from .. import util - -__all__ = ['ordering_list'] - - -def ordering_list(attr, count_from=None, **kw): - """Prepares an :class:`OrderingList` factory for use in mapper definitions. - - Returns an object suitable for use as an argument to a Mapper - relationship's ``collection_class`` option. e.g.:: - - from sqlalchemy.ext.orderinglist import ordering_list - - class Slide(Base): - __tablename__ = 'slide' - - id = Column(Integer, primary_key=True) - name = Column(String) - - bullets = relationship("Bullet", order_by="Bullet.position", - collection_class=ordering_list('position')) - - :param attr: - Name of the mapped attribute to use for storage and retrieval of - ordering information - - :param count_from: - Set up an integer-based ordering, starting at ``count_from``. For - example, ``ordering_list('pos', count_from=1)`` would create a 1-based - list in SQL, storing the value in the 'pos' column. Ignored if - ``ordering_func`` is supplied. - - Additional arguments are passed to the :class:`.OrderingList` constructor. - - """ - - kw = _unsugar_count_from(count_from=count_from, **kw) - return lambda: OrderingList(attr, **kw) - - -# Ordering utility functions - - -def count_from_0(index, collection): - """Numbering function: consecutive integers starting at 0.""" - - return index - - -def count_from_1(index, collection): - """Numbering function: consecutive integers starting at 1.""" - - return index + 1 - - -def count_from_n_factory(start): - """Numbering function: consecutive integers starting at arbitrary start.""" - - def f(index, collection): - return index + start - try: - f.__name__ = 'count_from_%i' % start - except TypeError: - pass - return f - - -def _unsugar_count_from(**kw): - """Builds counting functions from keyword arguments. - - Keyword argument filter, prepares a simple ``ordering_func`` from a - ``count_from`` argument, otherwise passes ``ordering_func`` on unchanged. - """ - - count_from = kw.pop('count_from', None) - if kw.get('ordering_func', None) is None and count_from is not None: - if count_from == 0: - kw['ordering_func'] = count_from_0 - elif count_from == 1: - kw['ordering_func'] = count_from_1 - else: - kw['ordering_func'] = count_from_n_factory(count_from) - return kw - - -class OrderingList(list): - """A custom list that manages position information for its children. - - The :class:`.OrderingList` object is normally set up using the - :func:`.ordering_list` factory function, used in conjunction with - the :func:`.relationship` function. - - """ - - def __init__(self, ordering_attr=None, ordering_func=None, - reorder_on_append=False): - """A custom list that manages position information for its children. - - ``OrderingList`` is a ``collection_class`` list implementation that - syncs position in a Python list with a position attribute on the - mapped objects. - - This implementation relies on the list starting in the proper order, - so be **sure** to put an ``order_by`` on your relationship. - - :param ordering_attr: - Name of the attribute that stores the object's order in the - relationship. - - :param ordering_func: Optional. A function that maps the position in - the Python list to a value to store in the - ``ordering_attr``. Values returned are usually (but need not be!) - integers. - - An ``ordering_func`` is called with two positional parameters: the - index of the element in the list, and the list itself. - - If omitted, Python list indexes are used for the attribute values. - Two basic pre-built numbering functions are provided in this module: - ``count_from_0`` and ``count_from_1``. For more exotic examples - like stepped numbering, alphabetical and Fibonacci numbering, see - the unit tests. - - :param reorder_on_append: - Default False. When appending an object with an existing (non-None) - ordering value, that value will be left untouched unless - ``reorder_on_append`` is true. This is an optimization to avoid a - variety of dangerous unexpected database writes. - - SQLAlchemy will add instances to the list via append() when your - object loads. If for some reason the result set from the database - skips a step in the ordering (say, row '1' is missing but you get - '2', '3', and '4'), reorder_on_append=True would immediately - renumber the items to '1', '2', '3'. If you have multiple sessions - making changes, any of whom happen to load this collection even in - passing, all of the sessions would try to "clean up" the numbering - in their commits, possibly causing all but one to fail with a - concurrent modification error. - - Recommend leaving this with the default of False, and just call - ``reorder()`` if you're doing ``append()`` operations with - previously ordered instances or when doing some housekeeping after - manual sql operations. - - """ - self.ordering_attr = ordering_attr - if ordering_func is None: - ordering_func = count_from_0 - self.ordering_func = ordering_func - self.reorder_on_append = reorder_on_append - - # More complex serialization schemes (multi column, e.g.) are possible by - # subclassing and reimplementing these two methods. - def _get_order_value(self, entity): - return getattr(entity, self.ordering_attr) - - def _set_order_value(self, entity, value): - setattr(entity, self.ordering_attr, value) - - def reorder(self): - """Synchronize ordering for the entire collection. - - Sweeps through the list and ensures that each object has accurate - ordering information set. - - """ - for index, entity in enumerate(self): - self._order_entity(index, entity, True) - - # As of 0.5, _reorder is no longer semi-private - _reorder = reorder - - def _order_entity(self, index, entity, reorder=True): - have = self._get_order_value(entity) - - # Don't disturb existing ordering if reorder is False - if have is not None and not reorder: - return - - should_be = self.ordering_func(index, self) - if have != should_be: - self._set_order_value(entity, should_be) - - def append(self, entity): - super(OrderingList, self).append(entity) - self._order_entity(len(self) - 1, entity, self.reorder_on_append) - - def _raw_append(self, entity): - """Append without any ordering behavior.""" - - super(OrderingList, self).append(entity) - _raw_append = collection.adds(1)(_raw_append) - - def insert(self, index, entity): - super(OrderingList, self).insert(index, entity) - self._reorder() - - def remove(self, entity): - super(OrderingList, self).remove(entity) - - adapter = collection_adapter(self) - if adapter and adapter._referenced_by_owner: - self._reorder() - - def pop(self, index=-1): - entity = super(OrderingList, self).pop(index) - self._reorder() - return entity - - def __setitem__(self, index, entity): - if isinstance(index, slice): - step = index.step or 1 - start = index.start or 0 - if start < 0: - start += len(self) - stop = index.stop or len(self) - if stop < 0: - stop += len(self) - - for i in range(start, stop, step): - self.__setitem__(i, entity[i]) - else: - self._order_entity(index, entity, True) - super(OrderingList, self).__setitem__(index, entity) - - def __delitem__(self, index): - super(OrderingList, self).__delitem__(index) - self._reorder() - - def __setslice__(self, start, end, values): - super(OrderingList, self).__setslice__(start, end, values) - self._reorder() - - def __delslice__(self, start, end): - super(OrderingList, self).__delslice__(start, end) - self._reorder() - - def __reduce__(self): - return _reconstitute, (self.__class__, self.__dict__, list(self)) - - for func_name, func in list(locals().items()): - if (util.callable(func) and func.__name__ == func_name and - not func.__doc__ and hasattr(list, func_name)): - func.__doc__ = getattr(list, func_name).__doc__ - del func_name, func - - -def _reconstitute(cls, dict_, items): - """ Reconstitute an :class:`.OrderingList`. - - This is the adjoint to :meth:`.OrderingList.__reduce__`. It is used for - unpickling :class:`.OrderingList` objects. - - """ - obj = cls.__new__(cls) - obj.__dict__.update(dict_) - list.extend(obj, items) - return obj diff --git a/python/sqlalchemy/ext/serializer.py b/python/sqlalchemy/ext/serializer.py deleted file mode 100644 index 555f3760..00000000 --- a/python/sqlalchemy/ext/serializer.py +++ /dev/null @@ -1,159 +0,0 @@ -# ext/serializer.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Serializer/Deserializer objects for usage with SQLAlchemy query structures, -allowing "contextual" deserialization. - -Any SQLAlchemy query structure, either based on sqlalchemy.sql.* -or sqlalchemy.orm.* can be used. The mappers, Tables, Columns, Session -etc. which are referenced by the structure are not persisted in serialized -form, but are instead re-associated with the query structure -when it is deserialized. - -Usage is nearly the same as that of the standard Python pickle module:: - - from sqlalchemy.ext.serializer import loads, dumps - metadata = MetaData(bind=some_engine) - Session = scoped_session(sessionmaker()) - - # ... define mappers - - query = Session.query(MyClass). - filter(MyClass.somedata=='foo').order_by(MyClass.sortkey) - - # pickle the query - serialized = dumps(query) - - # unpickle. Pass in metadata + scoped_session - query2 = loads(serialized, metadata, Session) - - print query2.all() - -Similar restrictions as when using raw pickle apply; mapped classes must be -themselves be pickleable, meaning they are importable from a module-level -namespace. - -The serializer module is only appropriate for query structures. It is not -needed for: - -* instances of user-defined classes. These contain no references to engines, - sessions or expression constructs in the typical case and can be serialized - directly. - -* Table metadata that is to be loaded entirely from the serialized structure - (i.e. is not already declared in the application). Regular - pickle.loads()/dumps() can be used to fully dump any ``MetaData`` object, - typically one which was reflected from an existing database at some previous - point in time. The serializer module is specifically for the opposite case, - where the Table metadata is already present in memory. - -""" - -from ..orm import class_mapper -from ..orm.session import Session -from ..orm.mapper import Mapper -from ..orm.interfaces import MapperProperty -from ..orm.attributes import QueryableAttribute -from .. import Table, Column -from ..engine import Engine -from ..util import pickle, byte_buffer, b64encode, b64decode, text_type -import re - - -__all__ = ['Serializer', 'Deserializer', 'dumps', 'loads'] - - -def Serializer(*args, **kw): - pickler = pickle.Pickler(*args, **kw) - - def persistent_id(obj): - # print "serializing:", repr(obj) - if isinstance(obj, QueryableAttribute): - cls = obj.impl.class_ - key = obj.impl.key - id = "attribute:" + key + ":" + b64encode(pickle.dumps(cls)) - elif isinstance(obj, Mapper) and not obj.non_primary: - id = "mapper:" + b64encode(pickle.dumps(obj.class_)) - elif isinstance(obj, MapperProperty) and not obj.parent.non_primary: - id = "mapperprop:" + b64encode(pickle.dumps(obj.parent.class_)) + \ - ":" + obj.key - elif isinstance(obj, Table): - id = "table:" + text_type(obj.key) - elif isinstance(obj, Column) and isinstance(obj.table, Table): - id = "column:" + \ - text_type(obj.table.key) + ":" + text_type(obj.key) - elif isinstance(obj, Session): - id = "session:" - elif isinstance(obj, Engine): - id = "engine:" - else: - return None - return id - - pickler.persistent_id = persistent_id - return pickler - -our_ids = re.compile( - r'(mapperprop|mapper|table|column|session|attribute|engine):(.*)') - - -def Deserializer(file, metadata=None, scoped_session=None, engine=None): - unpickler = pickle.Unpickler(file) - - def get_engine(): - if engine: - return engine - elif scoped_session and scoped_session().bind: - return scoped_session().bind - elif metadata and metadata.bind: - return metadata.bind - else: - return None - - def persistent_load(id): - m = our_ids.match(text_type(id)) - if not m: - return None - else: - type_, args = m.group(1, 2) - if type_ == 'attribute': - key, clsarg = args.split(":") - cls = pickle.loads(b64decode(clsarg)) - return getattr(cls, key) - elif type_ == "mapper": - cls = pickle.loads(b64decode(args)) - return class_mapper(cls) - elif type_ == "mapperprop": - mapper, keyname = args.split(':') - cls = pickle.loads(b64decode(mapper)) - return class_mapper(cls).attrs[keyname] - elif type_ == "table": - return metadata.tables[args] - elif type_ == "column": - table, colname = args.split(':') - return metadata.tables[table].c[colname] - elif type_ == "session": - return scoped_session() - elif type_ == "engine": - return get_engine() - else: - raise Exception("Unknown token: %s" % type_) - unpickler.persistent_load = persistent_load - return unpickler - - -def dumps(obj, protocol=0): - buf = byte_buffer() - pickler = Serializer(buf, protocol) - pickler.dump(obj) - return buf.getvalue() - - -def loads(data, metadata=None, scoped_session=None, engine=None): - buf = byte_buffer(data) - unpickler = Deserializer(buf, metadata, scoped_session, engine) - return unpickler.load() diff --git a/python/sqlalchemy/inspection.py b/python/sqlalchemy/inspection.py deleted file mode 100644 index a4738cc6..00000000 --- a/python/sqlalchemy/inspection.py +++ /dev/null @@ -1,93 +0,0 @@ -# sqlalchemy/inspect.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""The inspection module provides the :func:`.inspect` function, -which delivers runtime information about a wide variety -of SQLAlchemy objects, both within the Core as well as the -ORM. - -The :func:`.inspect` function is the entry point to SQLAlchemy's -public API for viewing the configuration and construction -of in-memory objects. Depending on the type of object -passed to :func:`.inspect`, the return value will either be -a related object which provides a known interface, or in many -cases it will return the object itself. - -The rationale for :func:`.inspect` is twofold. One is that -it replaces the need to be aware of a large variety of "information -getting" functions in SQLAlchemy, such as :meth:`.Inspector.from_engine`, -:func:`.orm.attributes.instance_state`, :func:`.orm.class_mapper`, -and others. The other is that the return value of :func:`.inspect` -is guaranteed to obey a documented API, thus allowing third party -tools which build on top of SQLAlchemy configurations to be constructed -in a forwards-compatible way. - -.. versionadded:: 0.8 The :func:`.inspect` system is introduced - as of version 0.8. - -""" - -from . import util, exc -_registrars = util.defaultdict(list) - - -def inspect(subject, raiseerr=True): - """Produce an inspection object for the given target. - - The returned value in some cases may be the - same object as the one given, such as if a - :class:`.Mapper` object is passed. In other - cases, it will be an instance of the registered - inspection type for the given object, such as - if an :class:`.engine.Engine` is passed, an - :class:`.Inspector` object is returned. - - :param subject: the subject to be inspected. - :param raiseerr: When ``True``, if the given subject - does not - correspond to a known SQLAlchemy inspected type, - :class:`sqlalchemy.exc.NoInspectionAvailable` - is raised. If ``False``, ``None`` is returned. - - """ - type_ = type(subject) - for cls in type_.__mro__: - if cls in _registrars: - reg = _registrars[cls] - if reg is True: - return subject - ret = reg(subject) - if ret is not None: - break - else: - reg = ret = None - - if raiseerr and ( - reg is None or ret is None - ): - raise exc.NoInspectionAvailable( - "No inspection system is " - "available for object of type %s" % - type_) - return ret - - -def _inspects(*types): - def decorate(fn_or_cls): - for type_ in types: - if type_ in _registrars: - raise AssertionError( - "Type %s is already " - "registered" % type_) - _registrars[type_] = fn_or_cls - return fn_or_cls - return decorate - - -def _self_inspects(cls): - _inspects(cls)(True) - return cls diff --git a/python/sqlalchemy/interfaces.py b/python/sqlalchemy/interfaces.py deleted file mode 100644 index 717e99b5..00000000 --- a/python/sqlalchemy/interfaces.py +++ /dev/null @@ -1,312 +0,0 @@ -# sqlalchemy/interfaces.py -# Copyright (C) 2007-2015 the SQLAlchemy authors and contributors -# -# Copyright (C) 2007 Jason Kirtland jek@discorporate.us -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Deprecated core event interfaces. - -This module is **deprecated** and is superseded by the -event system. - -""" - -from . import event, util - - -class PoolListener(object): - """Hooks into the lifecycle of connections in a :class:`.Pool`. - - .. note:: - - :class:`.PoolListener` is deprecated. Please - refer to :class:`.PoolEvents`. - - Usage:: - - class MyListener(PoolListener): - def connect(self, dbapi_con, con_record): - '''perform connect operations''' - # etc. - - # create a new pool with a listener - p = QueuePool(..., listeners=[MyListener()]) - - # add a listener after the fact - p.add_listener(MyListener()) - - # usage with create_engine() - e = create_engine("url://", listeners=[MyListener()]) - - All of the standard connection :class:`~sqlalchemy.pool.Pool` types can - accept event listeners for key connection lifecycle events: - creation, pool check-out and check-in. There are no events fired - when a connection closes. - - For any given DB-API connection, there will be one ``connect`` - event, `n` number of ``checkout`` events, and either `n` or `n - 1` - ``checkin`` events. (If a ``Connection`` is detached from its - pool via the ``detach()`` method, it won't be checked back in.) - - These are low-level events for low-level objects: raw Python - DB-API connections, without the conveniences of the SQLAlchemy - ``Connection`` wrapper, ``Dialect`` services or ``ClauseElement`` - execution. If you execute SQL through the connection, explicitly - closing all cursors and other resources is recommended. - - Events also receive a ``_ConnectionRecord``, a long-lived internal - ``Pool`` object that basically represents a "slot" in the - connection pool. ``_ConnectionRecord`` objects have one public - attribute of note: ``info``, a dictionary whose contents are - scoped to the lifetime of the DB-API connection managed by the - record. You can use this shared storage area however you like. - - There is no need to subclass ``PoolListener`` to handle events. - Any class that implements one or more of these methods can be used - as a pool listener. The ``Pool`` will inspect the methods - provided by a listener object and add the listener to one or more - internal event queues based on its capabilities. In terms of - efficiency and function call overhead, you're much better off only - providing implementations for the hooks you'll be using. - - """ - - @classmethod - def _adapt_listener(cls, self, listener): - """Adapt a :class:`.PoolListener` to individual - :class:`event.Dispatch` events. - - """ - - listener = util.as_interface(listener, - methods=('connect', 'first_connect', - 'checkout', 'checkin')) - if hasattr(listener, 'connect'): - event.listen(self, 'connect', listener.connect) - if hasattr(listener, 'first_connect'): - event.listen(self, 'first_connect', listener.first_connect) - if hasattr(listener, 'checkout'): - event.listen(self, 'checkout', listener.checkout) - if hasattr(listener, 'checkin'): - event.listen(self, 'checkin', listener.checkin) - - def connect(self, dbapi_con, con_record): - """Called once for each new DB-API connection or Pool's ``creator()``. - - dbapi_con - A newly connected raw DB-API connection (not a SQLAlchemy - ``Connection`` wrapper). - - con_record - The ``_ConnectionRecord`` that persistently manages the connection - - """ - - def first_connect(self, dbapi_con, con_record): - """Called exactly once for the first DB-API connection. - - dbapi_con - A newly connected raw DB-API connection (not a SQLAlchemy - ``Connection`` wrapper). - - con_record - The ``_ConnectionRecord`` that persistently manages the connection - - """ - - def checkout(self, dbapi_con, con_record, con_proxy): - """Called when a connection is retrieved from the Pool. - - dbapi_con - A raw DB-API connection - - con_record - The ``_ConnectionRecord`` that persistently manages the connection - - con_proxy - The ``_ConnectionFairy`` which manages the connection for the span of - the current checkout. - - If you raise an ``exc.DisconnectionError``, the current - connection will be disposed and a fresh connection retrieved. - Processing of all checkout listeners will abort and restart - using the new connection. - """ - - def checkin(self, dbapi_con, con_record): - """Called when a connection returns to the pool. - - Note that the connection may be closed, and may be None if the - connection has been invalidated. ``checkin`` will not be called - for detached connections. (They do not return to the pool.) - - dbapi_con - A raw DB-API connection - - con_record - The ``_ConnectionRecord`` that persistently manages the connection - - """ - - -class ConnectionProxy(object): - """Allows interception of statement execution by Connections. - - .. note:: - - :class:`.ConnectionProxy` is deprecated. Please - refer to :class:`.ConnectionEvents`. - - Either or both of the ``execute()`` and ``cursor_execute()`` - may be implemented to intercept compiled statement and - cursor level executions, e.g.:: - - class MyProxy(ConnectionProxy): - def execute(self, conn, execute, clauseelement, - *multiparams, **params): - print "compiled statement:", clauseelement - return execute(clauseelement, *multiparams, **params) - - def cursor_execute(self, execute, cursor, statement, - parameters, context, executemany): - print "raw statement:", statement - return execute(cursor, statement, parameters, context) - - The ``execute`` argument is a function that will fulfill the default - execution behavior for the operation. The signature illustrated - in the example should be used. - - The proxy is installed into an :class:`~sqlalchemy.engine.Engine` via - the ``proxy`` argument:: - - e = create_engine('someurl://', proxy=MyProxy()) - - """ - - @classmethod - def _adapt_listener(cls, self, listener): - - def adapt_execute(conn, clauseelement, multiparams, params): - - def execute_wrapper(clauseelement, *multiparams, **params): - return clauseelement, multiparams, params - - return listener.execute(conn, execute_wrapper, - clauseelement, *multiparams, - **params) - - event.listen(self, 'before_execute', adapt_execute) - - def adapt_cursor_execute(conn, cursor, statement, - parameters, context, executemany): - - def execute_wrapper( - cursor, - statement, - parameters, - context, - ): - return statement, parameters - - return listener.cursor_execute( - execute_wrapper, - cursor, - statement, - parameters, - context, - executemany, - ) - - event.listen(self, 'before_cursor_execute', adapt_cursor_execute) - - def do_nothing_callback(*arg, **kw): - pass - - def adapt_listener(fn): - - def go(conn, *arg, **kw): - fn(conn, do_nothing_callback, *arg, **kw) - - return util.update_wrapper(go, fn) - - event.listen(self, 'begin', adapt_listener(listener.begin)) - event.listen(self, 'rollback', - adapt_listener(listener.rollback)) - event.listen(self, 'commit', adapt_listener(listener.commit)) - event.listen(self, 'savepoint', - adapt_listener(listener.savepoint)) - event.listen(self, 'rollback_savepoint', - adapt_listener(listener.rollback_savepoint)) - event.listen(self, 'release_savepoint', - adapt_listener(listener.release_savepoint)) - event.listen(self, 'begin_twophase', - adapt_listener(listener.begin_twophase)) - event.listen(self, 'prepare_twophase', - adapt_listener(listener.prepare_twophase)) - event.listen(self, 'rollback_twophase', - adapt_listener(listener.rollback_twophase)) - event.listen(self, 'commit_twophase', - adapt_listener(listener.commit_twophase)) - - def execute(self, conn, execute, clauseelement, *multiparams, **params): - """Intercept high level execute() events.""" - - return execute(clauseelement, *multiparams, **params) - - def cursor_execute(self, execute, cursor, statement, parameters, - context, executemany): - """Intercept low-level cursor execute() events.""" - - return execute(cursor, statement, parameters, context) - - def begin(self, conn, begin): - """Intercept begin() events.""" - - return begin() - - def rollback(self, conn, rollback): - """Intercept rollback() events.""" - - return rollback() - - def commit(self, conn, commit): - """Intercept commit() events.""" - - return commit() - - def savepoint(self, conn, savepoint, name=None): - """Intercept savepoint() events.""" - - return savepoint(name=name) - - def rollback_savepoint(self, conn, rollback_savepoint, name, context): - """Intercept rollback_savepoint() events.""" - - return rollback_savepoint(name, context) - - def release_savepoint(self, conn, release_savepoint, name, context): - """Intercept release_savepoint() events.""" - - return release_savepoint(name, context) - - def begin_twophase(self, conn, begin_twophase, xid): - """Intercept begin_twophase() events.""" - - return begin_twophase(xid) - - def prepare_twophase(self, conn, prepare_twophase, xid): - """Intercept prepare_twophase() events.""" - - return prepare_twophase(xid) - - def rollback_twophase(self, conn, rollback_twophase, xid, is_prepared): - """Intercept rollback_twophase() events.""" - - return rollback_twophase(xid, is_prepared) - - def commit_twophase(self, conn, commit_twophase, xid, is_prepared): - """Intercept commit_twophase() events.""" - - return commit_twophase(xid, is_prepared) diff --git a/python/sqlalchemy/log.py b/python/sqlalchemy/log.py deleted file mode 100644 index c23412e3..00000000 --- a/python/sqlalchemy/log.py +++ /dev/null @@ -1,217 +0,0 @@ -# sqlalchemy/log.py -# Copyright (C) 2006-2015 the SQLAlchemy authors and contributors -# -# Includes alterations by Vinay Sajip vinay_sajip@yahoo.co.uk -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Logging control and utilities. - -Control of logging for SA can be performed from the regular python logging -module. The regular dotted module namespace is used, starting at -'sqlalchemy'. For class-level logging, the class name is appended. - -The "echo" keyword parameter, available on SQLA :class:`.Engine` -and :class:`.Pool` objects, corresponds to a logger specific to that -instance only. - -""" - -import logging -import sys - -# set initial level to WARN. This so that -# log statements don't occur in the absence of explicit -# logging being enabled for 'sqlalchemy'. -rootlogger = logging.getLogger('sqlalchemy') -if rootlogger.level == logging.NOTSET: - rootlogger.setLevel(logging.WARN) - - -def _add_default_handler(logger): - handler = logging.StreamHandler(sys.stdout) - handler.setFormatter(logging.Formatter( - '%(asctime)s %(levelname)s %(name)s %(message)s')) - logger.addHandler(handler) - - -_logged_classes = set() - - -def class_logger(cls): - logger = logging.getLogger(cls.__module__ + "." + cls.__name__) - cls._should_log_debug = lambda self: logger.isEnabledFor(logging.DEBUG) - cls._should_log_info = lambda self: logger.isEnabledFor(logging.INFO) - cls.logger = logger - _logged_classes.add(cls) - return cls - - -class Identified(object): - logging_name = None - - def _should_log_debug(self): - return self.logger.isEnabledFor(logging.DEBUG) - - def _should_log_info(self): - return self.logger.isEnabledFor(logging.INFO) - - -class InstanceLogger(object): - """A logger adapter (wrapper) for :class:`.Identified` subclasses. - - This allows multiple instances (e.g. Engine or Pool instances) - to share a logger, but have its verbosity controlled on a - per-instance basis. - - The basic functionality is to return a logging level - which is based on an instance's echo setting. - - Default implementation is: - - 'debug' -> logging.DEBUG - True -> logging.INFO - False -> Effective level of underlying logger - (logging.WARNING by default) - None -> same as False - """ - - # Map echo settings to logger levels - _echo_map = { - None: logging.NOTSET, - False: logging.NOTSET, - True: logging.INFO, - 'debug': logging.DEBUG, - } - - def __init__(self, echo, name): - self.echo = echo - self.logger = logging.getLogger(name) - - # if echo flag is enabled and no handlers, - # add a handler to the list - if self._echo_map[echo] <= logging.INFO \ - and not self.logger.handlers: - _add_default_handler(self.logger) - - # - # Boilerplate convenience methods - # - def debug(self, msg, *args, **kwargs): - """Delegate a debug call to the underlying logger.""" - - self.log(logging.DEBUG, msg, *args, **kwargs) - - def info(self, msg, *args, **kwargs): - """Delegate an info call to the underlying logger.""" - - self.log(logging.INFO, msg, *args, **kwargs) - - def warning(self, msg, *args, **kwargs): - """Delegate a warning call to the underlying logger.""" - - self.log(logging.WARNING, msg, *args, **kwargs) - - warn = warning - - def error(self, msg, *args, **kwargs): - """ - Delegate an error call to the underlying logger. - """ - self.log(logging.ERROR, msg, *args, **kwargs) - - def exception(self, msg, *args, **kwargs): - """Delegate an exception call to the underlying logger.""" - - kwargs["exc_info"] = 1 - self.log(logging.ERROR, msg, *args, **kwargs) - - def critical(self, msg, *args, **kwargs): - """Delegate a critical call to the underlying logger.""" - - self.log(logging.CRITICAL, msg, *args, **kwargs) - - def log(self, level, msg, *args, **kwargs): - """Delegate a log call to the underlying logger. - - The level here is determined by the echo - flag as well as that of the underlying logger, and - logger._log() is called directly. - - """ - - # inline the logic from isEnabledFor(), - # getEffectiveLevel(), to avoid overhead. - - if self.logger.manager.disable >= level: - return - - selected_level = self._echo_map[self.echo] - if selected_level == logging.NOTSET: - selected_level = self.logger.getEffectiveLevel() - - if level >= selected_level: - self.logger._log(level, msg, args, **kwargs) - - def isEnabledFor(self, level): - """Is this logger enabled for level 'level'?""" - - if self.logger.manager.disable >= level: - return False - return level >= self.getEffectiveLevel() - - def getEffectiveLevel(self): - """What's the effective level for this logger?""" - - level = self._echo_map[self.echo] - if level == logging.NOTSET: - level = self.logger.getEffectiveLevel() - return level - - -def instance_logger(instance, echoflag=None): - """create a logger for an instance that implements :class:`.Identified`.""" - - if instance.logging_name: - name = "%s.%s.%s" % (instance.__class__.__module__, - instance.__class__.__name__, - instance.logging_name) - else: - name = "%s.%s" % (instance.__class__.__module__, - instance.__class__.__name__) - - instance._echo = echoflag - - if echoflag in (False, None): - # if no echo setting or False, return a Logger directly, - # avoiding overhead of filtering - logger = logging.getLogger(name) - else: - # if a specified echo flag, return an EchoLogger, - # which checks the flag, overrides normal log - # levels by calling logger._log() - logger = InstanceLogger(echoflag, name) - - instance.logger = logger - - -class echo_property(object): - __doc__ = """\ - When ``True``, enable log output for this element. - - This has the effect of setting the Python logging level for the namespace - of this element's class and object reference. A value of boolean ``True`` - indicates that the loglevel ``logging.INFO`` will be set for the logger, - whereas the string value ``debug`` will set the loglevel to - ``logging.DEBUG``. - """ - - def __get__(self, instance, owner): - if instance is None: - return self - else: - return instance._echo - - def __set__(self, instance, value): - instance_logger(instance, echoflag=value) diff --git a/python/sqlalchemy/orm/__init__.py b/python/sqlalchemy/orm/__init__.py deleted file mode 100644 index e02a271e..00000000 --- a/python/sqlalchemy/orm/__init__.py +++ /dev/null @@ -1,270 +0,0 @@ -# orm/__init__.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" -Functional constructs for ORM configuration. - -See the SQLAlchemy object relational tutorial and mapper configuration -documentation for an overview of how this module is used. - -""" - -from . import exc -from .mapper import ( - Mapper, - _mapper_registry, - class_mapper, - configure_mappers, - reconstructor, - validates -) -from .interfaces import ( - EXT_CONTINUE, - EXT_STOP, - PropComparator, -) -from .deprecated_interfaces import ( - MapperExtension, - SessionExtension, - AttributeExtension, -) -from .util import ( - aliased, - join, - object_mapper, - outerjoin, - polymorphic_union, - was_deleted, - with_parent, - with_polymorphic, -) -from .properties import ColumnProperty -from .relationships import RelationshipProperty -from .descriptor_props import ( - ComparableProperty, - CompositeProperty, - SynonymProperty, -) -from .relationships import ( - foreign, - remote, -) -from .session import ( - Session, - object_session, - sessionmaker, - make_transient, - make_transient_to_detached -) -from .scoping import ( - scoped_session -) -from . import mapper as mapperlib -from .query import AliasOption, Query, Bundle -from ..util.langhelpers import public_factory -from .. import util as _sa_util -from . import strategies as _strategies - - -def create_session(bind=None, **kwargs): - """Create a new :class:`.Session` - with no automation enabled by default. - - This function is used primarily for testing. The usual - route to :class:`.Session` creation is via its constructor - or the :func:`.sessionmaker` function. - - :param bind: optional, a single Connectable to use for all - database access in the created - :class:`~sqlalchemy.orm.session.Session`. - - :param \*\*kwargs: optional, passed through to the - :class:`.Session` constructor. - - :returns: an :class:`~sqlalchemy.orm.session.Session` instance - - The defaults of create_session() are the opposite of that of - :func:`sessionmaker`; ``autoflush`` and ``expire_on_commit`` are - False, ``autocommit`` is True. In this sense the session acts - more like the "classic" SQLAlchemy 0.3 session with these. - - Usage:: - - >>> from sqlalchemy.orm import create_session - >>> session = create_session() - - It is recommended to use :func:`sessionmaker` instead of - create_session(). - - """ - kwargs.setdefault('autoflush', False) - kwargs.setdefault('autocommit', True) - kwargs.setdefault('expire_on_commit', False) - return Session(bind=bind, **kwargs) - -relationship = public_factory(RelationshipProperty, ".orm.relationship") - - -def relation(*arg, **kw): - """A synonym for :func:`relationship`.""" - - return relationship(*arg, **kw) - - -def dynamic_loader(argument, **kw): - """Construct a dynamically-loading mapper property. - - This is essentially the same as - using the ``lazy='dynamic'`` argument with :func:`relationship`:: - - dynamic_loader(SomeClass) - - # is the same as - - relationship(SomeClass, lazy="dynamic") - - See the section :ref:`dynamic_relationship` for more details - on dynamic loading. - - """ - kw['lazy'] = 'dynamic' - return relationship(argument, **kw) - - -column_property = public_factory(ColumnProperty, ".orm.column_property") -composite = public_factory(CompositeProperty, ".orm.composite") - - -def backref(name, **kwargs): - """Create a back reference with explicit keyword arguments, which are the - same arguments one can send to :func:`relationship`. - - Used with the ``backref`` keyword argument to :func:`relationship` in - place of a string argument, e.g.:: - - 'items':relationship( - SomeItem, backref=backref('parent', lazy='subquery')) - - """ - return (name, kwargs) - - -def deferred(*columns, **kw): - """Indicate a column-based mapped attribute that by default will - not load unless accessed. - - :param \*columns: columns to be mapped. This is typically a single - :class:`.Column` object, however a collection is supported in order - to support multiple columns mapped under the same attribute. - - :param \**kw: additional keyword arguments passed to - :class:`.ColumnProperty`. - - .. seealso:: - - :ref:`deferred` - - """ - return ColumnProperty(deferred=True, *columns, **kw) - - -mapper = public_factory(Mapper, ".orm.mapper") - -synonym = public_factory(SynonymProperty, ".orm.synonym") - -comparable_property = public_factory(ComparableProperty, - ".orm.comparable_property") - - -@_sa_util.deprecated("0.7", message=":func:`.compile_mappers` " - "is renamed to :func:`.configure_mappers`") -def compile_mappers(): - """Initialize the inter-mapper relationships of all mappers that have - been defined. - - """ - configure_mappers() - - -def clear_mappers(): - """Remove all mappers from all classes. - - This function removes all instrumentation from classes and disposes - of their associated mappers. Once called, the classes are unmapped - and can be later re-mapped with new mappers. - - :func:`.clear_mappers` is *not* for normal use, as there is literally no - valid usage for it outside of very specific testing scenarios. Normally, - mappers are permanent structural components of user-defined classes, and - are never discarded independently of their class. If a mapped class - itself is garbage collected, its mapper is automatically disposed of as - well. As such, :func:`.clear_mappers` is only for usage in test suites - that re-use the same classes with different mappings, which is itself an - extremely rare use case - the only such use case is in fact SQLAlchemy's - own test suite, and possibly the test suites of other ORM extension - libraries which intend to test various combinations of mapper construction - upon a fixed set of classes. - - """ - mapperlib._CONFIGURE_MUTEX.acquire() - try: - while _mapper_registry: - try: - # can't even reliably call list(weakdict) in jython - mapper, b = _mapper_registry.popitem() - mapper.dispose() - except KeyError: - pass - finally: - mapperlib._CONFIGURE_MUTEX.release() - -from . import strategy_options - -joinedload = strategy_options.joinedload._unbound_fn -joinedload_all = strategy_options.joinedload._unbound_all_fn -contains_eager = strategy_options.contains_eager._unbound_fn -defer = strategy_options.defer._unbound_fn -undefer = strategy_options.undefer._unbound_fn -undefer_group = strategy_options.undefer_group._unbound_fn -load_only = strategy_options.load_only._unbound_fn -lazyload = strategy_options.lazyload._unbound_fn -lazyload_all = strategy_options.lazyload_all._unbound_all_fn -subqueryload = strategy_options.subqueryload._unbound_fn -subqueryload_all = strategy_options.subqueryload_all._unbound_all_fn -immediateload = strategy_options.immediateload._unbound_fn -noload = strategy_options.noload._unbound_fn -defaultload = strategy_options.defaultload._unbound_fn - -from .strategy_options import Load - - -def eagerload(*args, **kwargs): - """A synonym for :func:`joinedload()`.""" - return joinedload(*args, **kwargs) - - -def eagerload_all(*args, **kwargs): - """A synonym for :func:`joinedload_all()`""" - return joinedload_all(*args, **kwargs) - - -contains_alias = public_factory(AliasOption, ".orm.contains_alias") - - -def __go(lcls): - global __all__ - from .. import util as sa_util - from . import dynamic - from . import events - import inspect as _inspect - - __all__ = sorted(name for name, obj in lcls.items() - if not (name.startswith('_') or _inspect.ismodule(obj))) - - _sa_util.dependencies.resolve_all("sqlalchemy.orm") - -__go(locals()) diff --git a/python/sqlalchemy/orm/attributes.py b/python/sqlalchemy/orm/attributes.py deleted file mode 100644 index 5440d6b5..00000000 --- a/python/sqlalchemy/orm/attributes.py +++ /dev/null @@ -1,1598 +0,0 @@ -# orm/attributes.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Defines instrumentation for class attributes and their interaction -with instances. - -This module is usually not directly visible to user applications, but -defines a large part of the ORM's interactivity. - - -""" - -import operator -from .. import util, event, inspection -from . import interfaces, collections, exc as orm_exc - -from .base import instance_state, instance_dict, manager_of_class - -from .base import PASSIVE_NO_RESULT, ATTR_WAS_SET, ATTR_EMPTY, NO_VALUE,\ - NEVER_SET, NO_CHANGE, CALLABLES_OK, SQL_OK, RELATED_OBJECT_OK,\ - INIT_OK, NON_PERSISTENT_OK, LOAD_AGAINST_COMMITTED, PASSIVE_OFF,\ - PASSIVE_RETURN_NEVER_SET, PASSIVE_NO_INITIALIZE, PASSIVE_NO_FETCH,\ - PASSIVE_NO_FETCH_RELATED, PASSIVE_ONLY_PERSISTENT, NO_AUTOFLUSH -from .base import state_str, instance_str - - -@inspection._self_inspects -class QueryableAttribute(interfaces._MappedAttribute, - interfaces.InspectionAttr, - interfaces.PropComparator): - """Base class for :term:`descriptor` objects that intercept - attribute events on behalf of a :class:`.MapperProperty` - object. The actual :class:`.MapperProperty` is accessible - via the :attr:`.QueryableAttribute.property` - attribute. - - - .. seealso:: - - :class:`.InstrumentedAttribute` - - :class:`.MapperProperty` - - :attr:`.Mapper.all_orm_descriptors` - - :attr:`.Mapper.attrs` - """ - - is_attribute = True - - def __init__(self, class_, key, impl=None, - comparator=None, parententity=None, - of_type=None): - self.class_ = class_ - self.key = key - self.impl = impl - self.comparator = comparator - self._parententity = parententity - self._of_type = of_type - - manager = manager_of_class(class_) - # manager is None in the case of AliasedClass - if manager: - # propagate existing event listeners from - # immediate superclass - for base in manager._bases: - if key in base: - self.dispatch._update(base[key].dispatch) - - @util.memoized_property - def _supports_population(self): - return self.impl.supports_population - - def get_history(self, instance, passive=PASSIVE_OFF): - return self.impl.get_history(instance_state(instance), - instance_dict(instance), passive) - - def __selectable__(self): - # TODO: conditionally attach this method based on clause_element ? - return self - - @util.memoized_property - def info(self): - """Return the 'info' dictionary for the underlying SQL element. - - The behavior here is as follows: - - * If the attribute is a column-mapped property, i.e. - :class:`.ColumnProperty`, which is mapped directly - to a schema-level :class:`.Column` object, this attribute - will return the :attr:`.SchemaItem.info` dictionary associated - with the core-level :class:`.Column` object. - - * If the attribute is a :class:`.ColumnProperty` but is mapped to - any other kind of SQL expression other than a :class:`.Column`, - the attribute will refer to the :attr:`.MapperProperty.info` - dictionary associated directly with the :class:`.ColumnProperty`, - assuming the SQL expression itself does not have its own ``.info`` - attribute (which should be the case, unless a user-defined SQL - construct has defined one). - - * If the attribute refers to any other kind of - :class:`.MapperProperty`, including :class:`.RelationshipProperty`, - the attribute will refer to the :attr:`.MapperProperty.info` - dictionary associated with that :class:`.MapperProperty`. - - * To access the :attr:`.MapperProperty.info` dictionary of the - :class:`.MapperProperty` unconditionally, including for a - :class:`.ColumnProperty` that's associated directly with a - :class:`.schema.Column`, the attribute can be referred to using - :attr:`.QueryableAttribute.property` attribute, as - ``MyClass.someattribute.property.info``. - - .. versionadded:: 0.8.0 - - .. seealso:: - - :attr:`.SchemaItem.info` - - :attr:`.MapperProperty.info` - - """ - return self.comparator.info - - @util.memoized_property - def parent(self): - """Return an inspection instance representing the parent. - - This will be either an instance of :class:`.Mapper` - or :class:`.AliasedInsp`, depending upon the nature - of the parent entity which this attribute is associated - with. - - """ - return inspection.inspect(self._parententity) - - @property - def expression(self): - return self.comparator.__clause_element__() - - def __clause_element__(self): - return self.comparator.__clause_element__() - - def _query_clause_element(self): - """like __clause_element__(), but called specifically - by :class:`.Query` to allow special behavior.""" - - return self.comparator._query_clause_element() - - def adapt_to_entity(self, adapt_to_entity): - assert not self._of_type - return self.__class__(adapt_to_entity.entity, - self.key, impl=self.impl, - comparator=self.comparator.adapt_to_entity( - adapt_to_entity), - parententity=adapt_to_entity) - - def of_type(self, cls): - return QueryableAttribute( - self.class_, - self.key, - self.impl, - self.comparator.of_type(cls), - self._parententity, - of_type=cls) - - def label(self, name): - return self._query_clause_element().label(name) - - def operate(self, op, *other, **kwargs): - return op(self.comparator, *other, **kwargs) - - def reverse_operate(self, op, other, **kwargs): - return op(other, self.comparator, **kwargs) - - def hasparent(self, state, optimistic=False): - return self.impl.hasparent(state, optimistic=optimistic) is not False - - def __getattr__(self, key): - try: - return getattr(self.comparator, key) - except AttributeError: - raise AttributeError( - 'Neither %r object nor %r object associated with %s ' - 'has an attribute %r' % ( - type(self).__name__, - type(self.comparator).__name__, - self, - key) - ) - - def __str__(self): - return "%s.%s" % (self.class_.__name__, self.key) - - @util.memoized_property - def property(self): - """Return the :class:`.MapperProperty` associated with this - :class:`.QueryableAttribute`. - - - Return values here will commonly be instances of - :class:`.ColumnProperty` or :class:`.RelationshipProperty`. - - - """ - return self.comparator.property - - -class InstrumentedAttribute(QueryableAttribute): - """Class bound instrumented attribute which adds basic - :term:`descriptor` methods. - - See :class:`.QueryableAttribute` for a description of most features. - - - """ - - def __set__(self, instance, value): - self.impl.set(instance_state(instance), - instance_dict(instance), value, None) - - def __delete__(self, instance): - self.impl.delete(instance_state(instance), instance_dict(instance)) - - def __get__(self, instance, owner): - if instance is None: - return self - - dict_ = instance_dict(instance) - if self._supports_population and self.key in dict_: - return dict_[self.key] - else: - return self.impl.get(instance_state(instance), dict_) - - -def create_proxied_attribute(descriptor): - """Create an QueryableAttribute / user descriptor hybrid. - - Returns a new QueryableAttribute type that delegates descriptor - behavior and getattr() to the given descriptor. - """ - - # TODO: can move this to descriptor_props if the need for this - # function is removed from ext/hybrid.py - - class Proxy(QueryableAttribute): - """Presents the :class:`.QueryableAttribute` interface as a - proxy on top of a Python descriptor / :class:`.PropComparator` - combination. - - """ - - def __init__(self, class_, key, descriptor, - comparator, - adapt_to_entity=None, doc=None, - original_property=None): - self.class_ = class_ - self.key = key - self.descriptor = descriptor - self.original_property = original_property - self._comparator = comparator - self._adapt_to_entity = adapt_to_entity - self.__doc__ = doc - - @property - def property(self): - return self.comparator.property - - @util.memoized_property - def comparator(self): - if util.callable(self._comparator): - self._comparator = self._comparator() - if self._adapt_to_entity: - self._comparator = self._comparator.adapt_to_entity( - self._adapt_to_entity) - return self._comparator - - def adapt_to_entity(self, adapt_to_entity): - return self.__class__(adapt_to_entity.entity, - self.key, - self.descriptor, - self._comparator, - adapt_to_entity) - - def __get__(self, instance, owner): - if instance is None: - return self - else: - return self.descriptor.__get__(instance, owner) - - def __str__(self): - return "%s.%s" % (self.class_.__name__, self.key) - - def __getattr__(self, attribute): - """Delegate __getattr__ to the original descriptor and/or - comparator.""" - - try: - return getattr(descriptor, attribute) - except AttributeError: - try: - return getattr(self.comparator, attribute) - except AttributeError: - raise AttributeError( - 'Neither %r object nor %r object associated with %s ' - 'has an attribute %r' % ( - type(descriptor).__name__, - type(self.comparator).__name__, - self, - attribute) - ) - - Proxy.__name__ = type(descriptor).__name__ + 'Proxy' - - util.monkeypatch_proxied_specials(Proxy, type(descriptor), - name='descriptor', - from_instance=descriptor) - return Proxy - -OP_REMOVE = util.symbol("REMOVE") -OP_APPEND = util.symbol("APPEND") -OP_REPLACE = util.symbol("REPLACE") - - -class Event(object): - """A token propagated throughout the course of a chain of attribute - events. - - Serves as an indicator of the source of the event and also provides - a means of controlling propagation across a chain of attribute - operations. - - The :class:`.Event` object is sent as the ``initiator`` argument - when dealing with the :meth:`.AttributeEvents.append`, - :meth:`.AttributeEvents.set`, - and :meth:`.AttributeEvents.remove` events. - - The :class:`.Event` object is currently interpreted by the backref - event handlers, and is used to control the propagation of operations - across two mutually-dependent attributes. - - .. versionadded:: 0.9.0 - - :var impl: The :class:`.AttributeImpl` which is the current event - initiator. - - :var op: The symbol :attr:`.OP_APPEND`, :attr:`.OP_REMOVE` or - :attr:`.OP_REPLACE`, indicating the source operation. - - """ - - __slots__ = 'impl', 'op', 'parent_token' - - def __init__(self, attribute_impl, op): - self.impl = attribute_impl - self.op = op - self.parent_token = self.impl.parent_token - - def __eq__(self, other): - return isinstance(other, Event) and \ - other.impl is self.impl and \ - other.op == self.op - - @property - def key(self): - return self.impl.key - - def hasparent(self, state): - return self.impl.hasparent(state) - - -class AttributeImpl(object): - """internal implementation for instrumented attributes.""" - - def __init__(self, class_, key, - callable_, dispatch, trackparent=False, extension=None, - compare_function=None, active_history=False, - parent_token=None, expire_missing=True, - send_modified_events=True, - **kwargs): - """Construct an AttributeImpl. - - \class_ - associated class - - key - string name of the attribute - - \callable_ - optional function which generates a callable based on a parent - instance, which produces the "default" values for a scalar or - collection attribute when it's first accessed, if not present - already. - - trackparent - if True, attempt to track if an instance has a parent attached - to it via this attribute. - - extension - a single or list of AttributeExtension object(s) which will - receive set/delete/append/remove/etc. events. Deprecated. - The event package is now used. - - compare_function - a function that compares two values which are normally - assignable to this attribute. - - active_history - indicates that get_history() should always return the "old" value, - even if it means executing a lazy callable upon attribute change. - - parent_token - Usually references the MapperProperty, used as a key for - the hasparent() function to identify an "owning" attribute. - Allows multiple AttributeImpls to all match a single - owner attribute. - - expire_missing - if False, don't add an "expiry" callable to this attribute - during state.expire_attributes(None), if no value is present - for this key. - - send_modified_events - if False, the InstanceState._modified_event method will have no - effect; this means the attribute will never show up as changed in a - history entry. - """ - self.class_ = class_ - self.key = key - self.callable_ = callable_ - self.dispatch = dispatch - self.trackparent = trackparent - self.parent_token = parent_token or self - self.send_modified_events = send_modified_events - if compare_function is None: - self.is_equal = operator.eq - else: - self.is_equal = compare_function - - # TODO: pass in the manager here - # instead of doing a lookup - attr = manager_of_class(class_)[key] - - for ext in util.to_list(extension or []): - ext._adapt_listener(attr, ext) - - if active_history: - self.dispatch._active_history = True - - self.expire_missing = expire_missing - - __slots__ = ( - 'class_', 'key', 'callable_', 'dispatch', 'trackparent', - 'parent_token', 'send_modified_events', 'is_equal', 'expire_missing' - ) - - def __str__(self): - return "%s.%s" % (self.class_.__name__, self.key) - - def _get_active_history(self): - """Backwards compat for impl.active_history""" - - return self.dispatch._active_history - - def _set_active_history(self, value): - self.dispatch._active_history = value - - active_history = property(_get_active_history, _set_active_history) - - def hasparent(self, state, optimistic=False): - """Return the boolean value of a `hasparent` flag attached to - the given state. - - The `optimistic` flag determines what the default return value - should be if no `hasparent` flag can be located. - - As this function is used to determine if an instance is an - *orphan*, instances that were loaded from storage should be - assumed to not be orphans, until a True/False value for this - flag is set. - - An instance attribute that is loaded by a callable function - will also not have a `hasparent` flag. - - """ - msg = "This AttributeImpl is not configured to track parents." - assert self.trackparent, msg - - return state.parents.get(id(self.parent_token), optimistic) \ - is not False - - def sethasparent(self, state, parent_state, value): - """Set a boolean flag on the given item corresponding to - whether or not it is attached to a parent object via the - attribute represented by this ``InstrumentedAttribute``. - - """ - msg = "This AttributeImpl is not configured to track parents." - assert self.trackparent, msg - - id_ = id(self.parent_token) - if value: - state.parents[id_] = parent_state - else: - if id_ in state.parents: - last_parent = state.parents[id_] - - if last_parent is not False and \ - last_parent.key != parent_state.key: - - if last_parent.obj() is None: - raise orm_exc.StaleDataError( - "Removing state %s from parent " - "state %s along attribute '%s', " - "but the parent record " - "has gone stale, can't be sure this " - "is the most recent parent." % - (state_str(state), - state_str(parent_state), - self.key)) - - return - - state.parents[id_] = False - - def get_history(self, state, dict_, passive=PASSIVE_OFF): - raise NotImplementedError() - - def get_all_pending(self, state, dict_, passive=PASSIVE_NO_INITIALIZE): - """Return a list of tuples of (state, obj) - for all objects in this attribute's current state - + history. - - Only applies to object-based attributes. - - This is an inlining of existing functionality - which roughly corresponds to: - - get_state_history( - state, - key, - passive=PASSIVE_NO_INITIALIZE).sum() - - """ - raise NotImplementedError() - - def initialize(self, state, dict_): - """Initialize the given state's attribute with an empty value.""" - - # As of 1.0, we don't actually set a value in - # dict_. This is so that the state of the object does not get - # modified without emitting the appropriate events. - - - return None - - def get(self, state, dict_, passive=PASSIVE_OFF): - """Retrieve a value from the given object. - If a callable is assembled on this object's attribute, and - passive is False, the callable will be executed and the - resulting value will be set as the new value for this attribute. - """ - if self.key in dict_: - return dict_[self.key] - else: - # if history present, don't load - key = self.key - if key not in state.committed_state or \ - state.committed_state[key] is NEVER_SET: - if not passive & CALLABLES_OK: - return PASSIVE_NO_RESULT - - if key in state.expired_attributes: - value = state._load_expired(state, passive) - elif key in state.callables: - callable_ = state.callables[key] - value = callable_(state, passive) - elif self.callable_: - value = self.callable_(state, passive) - else: - value = ATTR_EMPTY - - if value is PASSIVE_NO_RESULT or value is NEVER_SET: - return value - elif value is ATTR_WAS_SET: - try: - return dict_[key] - except KeyError: - # TODO: no test coverage here. - raise KeyError( - "Deferred loader for attribute " - "%r failed to populate " - "correctly" % key) - elif value is not ATTR_EMPTY: - return self.set_committed_value(state, dict_, value) - - if not passive & INIT_OK: - return NEVER_SET - else: - # Return a new, empty value - return self.initialize(state, dict_) - - def append(self, state, dict_, value, initiator, passive=PASSIVE_OFF): - self.set(state, dict_, value, initiator, passive=passive) - - def remove(self, state, dict_, value, initiator, passive=PASSIVE_OFF): - self.set(state, dict_, None, initiator, - passive=passive, check_old=value) - - def pop(self, state, dict_, value, initiator, passive=PASSIVE_OFF): - self.set(state, dict_, None, initiator, - passive=passive, check_old=value, pop=True) - - def set(self, state, dict_, value, initiator, - passive=PASSIVE_OFF, check_old=None, pop=False): - raise NotImplementedError() - - def get_committed_value(self, state, dict_, passive=PASSIVE_OFF): - """return the unchanged value of this attribute""" - - if self.key in state.committed_state: - value = state.committed_state[self.key] - if value in (NO_VALUE, NEVER_SET): - return None - else: - return value - else: - return self.get(state, dict_, passive=passive) - - def set_committed_value(self, state, dict_, value): - """set an attribute value on the given instance and 'commit' it.""" - - dict_[self.key] = value - state._commit(dict_, [self.key]) - return value - - -class ScalarAttributeImpl(AttributeImpl): - """represents a scalar value-holding InstrumentedAttribute.""" - - accepts_scalar_loader = True - uses_objects = False - supports_population = True - collection = False - - __slots__ = '_replace_token', '_append_token', '_remove_token' - - def __init__(self, *arg, **kw): - super(ScalarAttributeImpl, self).__init__(*arg, **kw) - self._replace_token = self._append_token = None - self._remove_token = None - - def _init_append_token(self): - self._replace_token = self._append_token = Event(self, OP_REPLACE) - return self._replace_token - - _init_append_or_replace_token = _init_append_token - - def _init_remove_token(self): - self._remove_token = Event(self, OP_REMOVE) - return self._remove_token - - def delete(self, state, dict_): - - # TODO: catch key errors, convert to attributeerror? - if self.dispatch._active_history: - old = self.get(state, dict_, PASSIVE_RETURN_NEVER_SET) - else: - old = dict_.get(self.key, NO_VALUE) - - if self.dispatch.remove: - self.fire_remove_event(state, dict_, old, self._remove_token) - state._modified_event(dict_, self, old) - del dict_[self.key] - - def get_history(self, state, dict_, passive=PASSIVE_OFF): - if self.key in dict_: - return History.from_scalar_attribute(self, state, dict_[self.key]) - else: - if passive & INIT_OK: - passive ^= INIT_OK - current = self.get(state, dict_, passive=passive) - if current is PASSIVE_NO_RESULT: - return HISTORY_BLANK - else: - return History.from_scalar_attribute(self, state, current) - - def set(self, state, dict_, value, initiator, - passive=PASSIVE_OFF, check_old=None, pop=False): - if self.dispatch._active_history: - old = self.get(state, dict_, PASSIVE_RETURN_NEVER_SET) - else: - old = dict_.get(self.key, NO_VALUE) - - if self.dispatch.set: - value = self.fire_replace_event(state, dict_, - value, old, initiator) - state._modified_event(dict_, self, old) - dict_[self.key] = value - - def fire_replace_event(self, state, dict_, value, previous, initiator): - for fn in self.dispatch.set: - value = fn( - state, value, previous, - initiator or self._replace_token or - self._init_append_or_replace_token()) - return value - - def fire_remove_event(self, state, dict_, value, initiator): - for fn in self.dispatch.remove: - fn(state, value, - initiator or self._remove_token or self._init_remove_token()) - - @property - def type(self): - self.property.columns[0].type - - -class ScalarObjectAttributeImpl(ScalarAttributeImpl): - """represents a scalar-holding InstrumentedAttribute, - where the target object is also instrumented. - - Adds events to delete/set operations. - - """ - - accepts_scalar_loader = False - uses_objects = True - supports_population = True - collection = False - - __slots__ = () - - def delete(self, state, dict_): - old = self.get(state, dict_) - self.fire_remove_event( - state, dict_, old, - self._remove_token or self._init_remove_token()) - del dict_[self.key] - - def get_history(self, state, dict_, passive=PASSIVE_OFF): - if self.key in dict_: - return History.from_object_attribute(self, state, dict_[self.key]) - else: - if passive & INIT_OK: - passive ^= INIT_OK - current = self.get(state, dict_, passive=passive) - if current is PASSIVE_NO_RESULT: - return HISTORY_BLANK - else: - return History.from_object_attribute(self, state, current) - - def get_all_pending(self, state, dict_, passive=PASSIVE_NO_INITIALIZE): - if self.key in dict_: - current = dict_[self.key] - elif passive & CALLABLES_OK: - current = self.get(state, dict_, passive=passive) - else: - return [] - - # can't use __hash__(), can't use __eq__() here - if current is not None and \ - current is not PASSIVE_NO_RESULT and \ - current is not NEVER_SET: - ret = [(instance_state(current), current)] - else: - ret = [(None, None)] - - if self.key in state.committed_state: - original = state.committed_state[self.key] - if original is not None and \ - original is not PASSIVE_NO_RESULT and \ - original is not NEVER_SET and \ - original is not current: - - ret.append((instance_state(original), original)) - return ret - - def set(self, state, dict_, value, initiator, - passive=PASSIVE_OFF, check_old=None, pop=False): - """Set a value on the given InstanceState. - - """ - if self.dispatch._active_history: - old = self.get( - state, dict_, passive=PASSIVE_ONLY_PERSISTENT | NO_AUTOFLUSH) - else: - old = self.get(state, dict_, passive=PASSIVE_NO_FETCH ^ INIT_OK) - - if check_old is not None and \ - old is not PASSIVE_NO_RESULT and \ - check_old is not old: - if pop: - return - else: - raise ValueError( - "Object %s not associated with %s on attribute '%s'" % ( - instance_str(check_old), - state_str(state), - self.key - )) - - value = self.fire_replace_event(state, dict_, value, old, initiator) - dict_[self.key] = value - - def fire_remove_event(self, state, dict_, value, initiator): - if self.trackparent and value is not None: - self.sethasparent(instance_state(value), state, False) - - for fn in self.dispatch.remove: - fn(state, value, initiator or - self._remove_token or self._init_remove_token()) - - state._modified_event(dict_, self, value) - - def fire_replace_event(self, state, dict_, value, previous, initiator): - if self.trackparent: - if (previous is not value and - previous not in (None, PASSIVE_NO_RESULT, NEVER_SET)): - self.sethasparent(instance_state(previous), state, False) - - for fn in self.dispatch.set: - value = fn( - state, value, previous, initiator or - self._replace_token or self._init_append_or_replace_token()) - - state._modified_event(dict_, self, previous) - - if self.trackparent: - if value is not None: - self.sethasparent(instance_state(value), state, True) - - return value - - -class CollectionAttributeImpl(AttributeImpl): - """A collection-holding attribute that instruments changes in membership. - - Only handles collections of instrumented objects. - - InstrumentedCollectionAttribute holds an arbitrary, user-specified - container object (defaulting to a list) and brokers access to the - CollectionAdapter, a "view" onto that object that presents consistent bag - semantics to the orm layer independent of the user data implementation. - - """ - accepts_scalar_loader = False - uses_objects = True - supports_population = True - collection = True - - __slots__ = 'copy', 'collection_factory', '_append_token', '_remove_token' - - def __init__(self, class_, key, callable_, dispatch, - typecallable=None, trackparent=False, extension=None, - copy_function=None, compare_function=None, **kwargs): - super(CollectionAttributeImpl, self).__init__( - class_, - key, - callable_, dispatch, - trackparent=trackparent, - extension=extension, - compare_function=compare_function, - **kwargs) - - if copy_function is None: - copy_function = self.__copy - self.copy = copy_function - self.collection_factory = typecallable - self._append_token = None - self._remove_token = None - - if getattr(self.collection_factory, "_sa_linker", None): - - @event.listens_for(self, "init_collection") - def link(target, collection, collection_adapter): - collection._sa_linker(collection_adapter) - - @event.listens_for(self, "dispose_collection") - def unlink(target, collection, collection_adapter): - collection._sa_linker(None) - - def _init_append_token(self): - self._append_token = Event(self, OP_APPEND) - return self._append_token - - def _init_remove_token(self): - self._remove_token = Event(self, OP_REMOVE) - return self._remove_token - - def __copy(self, item): - return [y for y in collections.collection_adapter(item)] - - def get_history(self, state, dict_, passive=PASSIVE_OFF): - current = self.get(state, dict_, passive=passive) - if current is PASSIVE_NO_RESULT: - return HISTORY_BLANK - else: - return History.from_collection(self, state, current) - - def get_all_pending(self, state, dict_, passive=PASSIVE_NO_INITIALIZE): - # NOTE: passive is ignored here at the moment - - if self.key not in dict_: - return [] - - current = dict_[self.key] - current = getattr(current, '_sa_adapter') - - if self.key in state.committed_state: - original = state.committed_state[self.key] - if original not in (NO_VALUE, NEVER_SET): - current_states = [((c is not None) and - instance_state(c) or None, c) - for c in current] - original_states = [((c is not None) and - instance_state(c) or None, c) - for c in original] - - current_set = dict(current_states) - original_set = dict(original_states) - - return \ - [(s, o) for s, o in current_states - if s not in original_set] + \ - [(s, o) for s, o in current_states - if s in original_set] + \ - [(s, o) for s, o in original_states - if s not in current_set] - - return [(instance_state(o), o) for o in current] - - def fire_append_event(self, state, dict_, value, initiator): - for fn in self.dispatch.append: - value = fn( - state, value, - initiator or self._append_token or self._init_append_token()) - - state._modified_event(dict_, self, NEVER_SET, True) - - if self.trackparent and value is not None: - self.sethasparent(instance_state(value), state, True) - - return value - - def fire_pre_remove_event(self, state, dict_, initiator): - state._modified_event(dict_, self, NEVER_SET, True) - - def fire_remove_event(self, state, dict_, value, initiator): - if self.trackparent and value is not None: - self.sethasparent(instance_state(value), state, False) - - for fn in self.dispatch.remove: - fn(state, value, - initiator or self._remove_token or self._init_remove_token()) - - state._modified_event(dict_, self, NEVER_SET, True) - - def delete(self, state, dict_): - if self.key not in dict_: - return - - state._modified_event(dict_, self, NEVER_SET, True) - - collection = self.get_collection(state, state.dict) - collection.clear_with_event() - # TODO: catch key errors, convert to attributeerror? - del dict_[self.key] - - def initialize(self, state, dict_): - """Initialize this attribute with an empty collection.""" - - _, user_data = self._initialize_collection(state) - dict_[self.key] = user_data - return user_data - - def _initialize_collection(self, state): - - adapter, collection = state.manager.initialize_collection( - self.key, state, self.collection_factory) - - self.dispatch.init_collection(state, collection, adapter) - - return adapter, collection - - def append(self, state, dict_, value, initiator, passive=PASSIVE_OFF): - collection = self.get_collection(state, dict_, passive=passive) - if collection is PASSIVE_NO_RESULT: - value = self.fire_append_event(state, dict_, value, initiator) - assert self.key not in dict_, \ - "Collection was loaded during event handling." - state._get_pending_mutation(self.key).append(value) - else: - collection.append_with_event(value, initiator) - - def remove(self, state, dict_, value, initiator, passive=PASSIVE_OFF): - collection = self.get_collection(state, state.dict, passive=passive) - if collection is PASSIVE_NO_RESULT: - self.fire_remove_event(state, dict_, value, initiator) - assert self.key not in dict_, \ - "Collection was loaded during event handling." - state._get_pending_mutation(self.key).remove(value) - else: - collection.remove_with_event(value, initiator) - - def pop(self, state, dict_, value, initiator, passive=PASSIVE_OFF): - try: - # TODO: better solution here would be to add - # a "popper" role to collections.py to complement - # "remover". - self.remove(state, dict_, value, initiator, passive=passive) - except (ValueError, KeyError, IndexError): - pass - - def set(self, state, dict_, value, initiator, - passive=PASSIVE_OFF, pop=False): - """Set a value on the given object. - - """ - - self._set_iterable( - state, dict_, value, - lambda adapter, i: adapter.adapt_like_to_iterable(i)) - - def _set_iterable(self, state, dict_, iterable, adapter=None): - """Set a collection value from an iterable of state-bearers. - - ``adapter`` is an optional callable invoked with a CollectionAdapter - and the iterable. Should return an iterable of state-bearing - instances suitable for appending via a CollectionAdapter. Can be used - for, e.g., adapting an incoming dictionary into an iterator of values - rather than keys. - - """ - # pulling a new collection first so that an adaptation exception does - # not trigger a lazy load of the old collection. - new_collection, user_data = self._initialize_collection(state) - if adapter: - new_values = list(adapter(new_collection, iterable)) - else: - new_values = list(iterable) - - old = self.get(state, dict_, passive=PASSIVE_ONLY_PERSISTENT) - if old is PASSIVE_NO_RESULT: - old = self.initialize(state, dict_) - elif old is iterable: - # ignore re-assignment of the current collection, as happens - # implicitly with in-place operators (foo.collection |= other) - return - - # place a copy of "old" in state.committed_state - state._modified_event(dict_, self, old, True) - - old_collection = old._sa_adapter - - dict_[self.key] = user_data - - collections.bulk_replace(new_values, old_collection, new_collection) - - del old._sa_adapter - self.dispatch.dispose_collection(state, old, old_collection) - - def _invalidate_collection(self, collection): - adapter = getattr(collection, '_sa_adapter') - adapter.invalidated = True - - def set_committed_value(self, state, dict_, value): - """Set an attribute value on the given instance and 'commit' it.""" - - collection, user_data = self._initialize_collection(state) - - if value: - collection.append_multiple_without_event(value) - - state.dict[self.key] = user_data - - state._commit(dict_, [self.key]) - - if self.key in state._pending_mutations: - # pending items exist. issue a modified event, - # add/remove new items. - state._modified_event(dict_, self, user_data, True) - - pending = state._pending_mutations.pop(self.key) - added = pending.added_items - removed = pending.deleted_items - for item in added: - collection.append_without_event(item) - for item in removed: - collection.remove_without_event(item) - - return user_data - - def get_collection(self, state, dict_, - user_data=None, passive=PASSIVE_OFF): - """Retrieve the CollectionAdapter associated with the given state. - - Creates a new CollectionAdapter if one does not exist. - - """ - if user_data is None: - user_data = self.get(state, dict_, passive=passive) - if user_data is PASSIVE_NO_RESULT: - return user_data - - return getattr(user_data, '_sa_adapter') - - -def backref_listeners(attribute, key, uselist): - """Apply listeners to synchronize a two-way relationship.""" - - # use easily recognizable names for stack traces - - parent_token = attribute.impl.parent_token - parent_impl = attribute.impl - - def _acceptable_key_err(child_state, initiator, child_impl): - raise ValueError( - "Bidirectional attribute conflict detected: " - 'Passing object %s to attribute "%s" ' - 'triggers a modify event on attribute "%s" ' - 'via the backref "%s".' % ( - state_str(child_state), - initiator.parent_token, - child_impl.parent_token, - attribute.impl.parent_token - ) - ) - - def emit_backref_from_scalar_set_event(state, child, oldchild, initiator): - if oldchild is child: - return child - if oldchild is not None and \ - oldchild is not PASSIVE_NO_RESULT and \ - oldchild is not NEVER_SET: - # With lazy=None, there's no guarantee that the full collection is - # present when updating via a backref. - old_state, old_dict = instance_state(oldchild),\ - instance_dict(oldchild) - impl = old_state.manager[key].impl - - if initiator.impl is not impl or \ - initiator.op not in (OP_REPLACE, OP_REMOVE): - impl.pop(old_state, - old_dict, - state.obj(), - parent_impl._append_token or - parent_impl._init_append_token(), - passive=PASSIVE_NO_FETCH) - - if child is not None: - child_state, child_dict = instance_state(child),\ - instance_dict(child) - child_impl = child_state.manager[key].impl - if initiator.parent_token is not parent_token and \ - initiator.parent_token is not child_impl.parent_token: - _acceptable_key_err(state, initiator, child_impl) - elif initiator.impl is not child_impl or \ - initiator.op not in (OP_APPEND, OP_REPLACE): - child_impl.append( - child_state, - child_dict, - state.obj(), - initiator, - passive=PASSIVE_NO_FETCH) - return child - - def emit_backref_from_collection_append_event(state, child, initiator): - if child is None: - return - - child_state, child_dict = instance_state(child), \ - instance_dict(child) - child_impl = child_state.manager[key].impl - - if initiator.parent_token is not parent_token and \ - initiator.parent_token is not child_impl.parent_token: - _acceptable_key_err(state, initiator, child_impl) - elif initiator.impl is not child_impl or \ - initiator.op not in (OP_APPEND, OP_REPLACE): - child_impl.append( - child_state, - child_dict, - state.obj(), - initiator, - passive=PASSIVE_NO_FETCH) - return child - - def emit_backref_from_collection_remove_event(state, child, initiator): - if child is not None: - child_state, child_dict = instance_state(child),\ - instance_dict(child) - child_impl = child_state.manager[key].impl - if initiator.impl is not child_impl or \ - initiator.op not in (OP_REMOVE, OP_REPLACE): - child_impl.pop( - child_state, - child_dict, - state.obj(), - initiator, - passive=PASSIVE_NO_FETCH) - - if uselist: - event.listen(attribute, "append", - emit_backref_from_collection_append_event, - retval=True, raw=True) - else: - event.listen(attribute, "set", - emit_backref_from_scalar_set_event, - retval=True, raw=True) - # TODO: need coverage in test/orm/ of remove event - event.listen(attribute, "remove", - emit_backref_from_collection_remove_event, - retval=True, raw=True) - -_NO_HISTORY = util.symbol('NO_HISTORY') -_NO_STATE_SYMBOLS = frozenset([ - id(PASSIVE_NO_RESULT), - id(NO_VALUE), - id(NEVER_SET)]) - -History = util.namedtuple("History", [ - "added", "unchanged", "deleted" -]) - - -class History(History): - """A 3-tuple of added, unchanged and deleted values, - representing the changes which have occurred on an instrumented - attribute. - - The easiest way to get a :class:`.History` object for a particular - attribute on an object is to use the :func:`.inspect` function:: - - from sqlalchemy import inspect - - hist = inspect(myobject).attrs.myattribute.history - - Each tuple member is an iterable sequence: - - * ``added`` - the collection of items added to the attribute (the first - tuple element). - - * ``unchanged`` - the collection of items that have not changed on the - attribute (the second tuple element). - - * ``deleted`` - the collection of items that have been removed from the - attribute (the third tuple element). - - """ - - def __bool__(self): - return self != HISTORY_BLANK - __nonzero__ = __bool__ - - def empty(self): - """Return True if this :class:`.History` has no changes - and no existing, unchanged state. - - """ - - return not bool( - (self.added or self.deleted) - or self.unchanged - ) - - def sum(self): - """Return a collection of added + unchanged + deleted.""" - - return (self.added or []) +\ - (self.unchanged or []) +\ - (self.deleted or []) - - def non_deleted(self): - """Return a collection of added + unchanged.""" - - return (self.added or []) +\ - (self.unchanged or []) - - def non_added(self): - """Return a collection of unchanged + deleted.""" - - return (self.unchanged or []) +\ - (self.deleted or []) - - def has_changes(self): - """Return True if this :class:`.History` has changes.""" - - return bool(self.added or self.deleted) - - def as_state(self): - return History( - [(c is not None) - and instance_state(c) or None - for c in self.added], - [(c is not None) - and instance_state(c) or None - for c in self.unchanged], - [(c is not None) - and instance_state(c) or None - for c in self.deleted], - ) - - @classmethod - def from_scalar_attribute(cls, attribute, state, current): - original = state.committed_state.get(attribute.key, _NO_HISTORY) - - if original is _NO_HISTORY: - if current is NEVER_SET: - return cls((), (), ()) - else: - return cls((), [current], ()) - # don't let ClauseElement expressions here trip things up - elif attribute.is_equal(current, original) is True: - return cls((), [current], ()) - else: - # current convention on native scalars is to not - # include information - # about missing previous value in "deleted", but - # we do include None, which helps in some primary - # key situations - if id(original) in _NO_STATE_SYMBOLS: - deleted = () - else: - deleted = [original] - if current is NEVER_SET: - return cls((), (), deleted) - else: - return cls([current], (), deleted) - - @classmethod - def from_object_attribute(cls, attribute, state, current): - original = state.committed_state.get(attribute.key, _NO_HISTORY) - - if original is _NO_HISTORY: - if current is NO_VALUE or current is NEVER_SET: - return cls((), (), ()) - else: - return cls((), [current], ()) - elif current is original: - return cls((), [current], ()) - else: - # current convention on related objects is to not - # include information - # about missing previous value in "deleted", and - # to also not include None - the dependency.py rules - # ignore the None in any case. - if id(original) in _NO_STATE_SYMBOLS or original is None: - deleted = () - else: - deleted = [original] - if current is NO_VALUE or current is NEVER_SET: - return cls((), (), deleted) - else: - return cls([current], (), deleted) - - @classmethod - def from_collection(cls, attribute, state, current): - original = state.committed_state.get(attribute.key, _NO_HISTORY) - - if current is NO_VALUE or current is NEVER_SET: - return cls((), (), ()) - - current = getattr(current, '_sa_adapter') - if original in (NO_VALUE, NEVER_SET): - return cls(list(current), (), ()) - elif original is _NO_HISTORY: - return cls((), list(current), ()) - else: - - current_states = [((c is not None) and instance_state(c) - or None, c) - for c in current - ] - original_states = [((c is not None) and instance_state(c) - or None, c) - for c in original - ] - - current_set = dict(current_states) - original_set = dict(original_states) - - return cls( - [o for s, o in current_states if s not in original_set], - [o for s, o in current_states if s in original_set], - [o for s, o in original_states if s not in current_set] - ) - -HISTORY_BLANK = History(None, None, None) - - -def get_history(obj, key, passive=PASSIVE_OFF): - """Return a :class:`.History` record for the given object - and attribute key. - - :param obj: an object whose class is instrumented by the - attributes package. - - :param key: string attribute name. - - :param passive: indicates loading behavior for the attribute - if the value is not already present. This is a - bitflag attribute, which defaults to the symbol - :attr:`.PASSIVE_OFF` indicating all necessary SQL - should be emitted. - - """ - if passive is True: - util.warn_deprecated("Passing True for 'passive' is deprecated. " - "Use attributes.PASSIVE_NO_INITIALIZE") - passive = PASSIVE_NO_INITIALIZE - elif passive is False: - util.warn_deprecated("Passing False for 'passive' is " - "deprecated. Use attributes.PASSIVE_OFF") - passive = PASSIVE_OFF - - return get_state_history(instance_state(obj), key, passive) - - -def get_state_history(state, key, passive=PASSIVE_OFF): - return state.get_history(key, passive) - - -def has_parent(cls, obj, key, optimistic=False): - """TODO""" - manager = manager_of_class(cls) - state = instance_state(obj) - return manager.has_parent(state, key, optimistic) - - -def register_attribute(class_, key, **kw): - comparator = kw.pop('comparator', None) - parententity = kw.pop('parententity', None) - doc = kw.pop('doc', None) - desc = register_descriptor(class_, key, - comparator, parententity, doc=doc) - register_attribute_impl(class_, key, **kw) - return desc - - -def register_attribute_impl(class_, key, - uselist=False, callable_=None, - useobject=False, - impl_class=None, backref=None, **kw): - - manager = manager_of_class(class_) - if uselist: - factory = kw.pop('typecallable', None) - typecallable = manager.instrument_collection_class( - key, factory or list) - else: - typecallable = kw.pop('typecallable', None) - - dispatch = manager[key].dispatch - - if impl_class: - impl = impl_class(class_, key, typecallable, dispatch, **kw) - elif uselist: - impl = CollectionAttributeImpl(class_, key, callable_, dispatch, - typecallable=typecallable, **kw) - elif useobject: - impl = ScalarObjectAttributeImpl(class_, key, callable_, - dispatch, **kw) - else: - impl = ScalarAttributeImpl(class_, key, callable_, dispatch, **kw) - - manager[key].impl = impl - - if backref: - backref_listeners(manager[key], backref, uselist) - - manager.post_configure_attribute(key) - return manager[key] - - -def register_descriptor(class_, key, comparator=None, - parententity=None, doc=None): - manager = manager_of_class(class_) - - descriptor = InstrumentedAttribute(class_, key, comparator=comparator, - parententity=parententity) - - descriptor.__doc__ = doc - - manager.instrument_attribute(key, descriptor) - return descriptor - - -def unregister_attribute(class_, key): - manager_of_class(class_).uninstrument_attribute(key) - - -def init_collection(obj, key): - """Initialize a collection attribute and return the collection adapter. - - This function is used to provide direct access to collection internals - for a previously unloaded attribute. e.g.:: - - collection_adapter = init_collection(someobject, 'elements') - for elem in values: - collection_adapter.append_without_event(elem) - - For an easier way to do the above, see - :func:`~sqlalchemy.orm.attributes.set_committed_value`. - - obj is an instrumented object instance. An InstanceState - is accepted directly for backwards compatibility but - this usage is deprecated. - - """ - state = instance_state(obj) - dict_ = state.dict - return init_state_collection(state, dict_, key) - - -def init_state_collection(state, dict_, key): - """Initialize a collection attribute and return the collection adapter.""" - - attr = state.manager[key].impl - user_data = attr.initialize(state, dict_) - return attr.get_collection(state, dict_, user_data) - - -def set_committed_value(instance, key, value): - """Set the value of an attribute with no history events. - - Cancels any previous history present. The value should be - a scalar value for scalar-holding attributes, or - an iterable for any collection-holding attribute. - - This is the same underlying method used when a lazy loader - fires off and loads additional data from the database. - In particular, this method can be used by application code - which has loaded additional attributes or collections through - separate queries, which can then be attached to an instance - as though it were part of its original loaded state. - - """ - state, dict_ = instance_state(instance), instance_dict(instance) - state.manager[key].impl.set_committed_value(state, dict_, value) - - -def set_attribute(instance, key, value): - """Set the value of an attribute, firing history events. - - This function may be used regardless of instrumentation - applied directly to the class, i.e. no descriptors are required. - Custom attribute management schemes will need to make usage - of this method to establish attribute state as understood - by SQLAlchemy. - - """ - state, dict_ = instance_state(instance), instance_dict(instance) - state.manager[key].impl.set(state, dict_, value, None) - - -def get_attribute(instance, key): - """Get the value of an attribute, firing any callables required. - - This function may be used regardless of instrumentation - applied directly to the class, i.e. no descriptors are required. - Custom attribute management schemes will need to make usage - of this method to make usage of attribute state as understood - by SQLAlchemy. - - """ - state, dict_ = instance_state(instance), instance_dict(instance) - return state.manager[key].impl.get(state, dict_) - - -def del_attribute(instance, key): - """Delete the value of an attribute, firing history events. - - This function may be used regardless of instrumentation - applied directly to the class, i.e. no descriptors are required. - Custom attribute management schemes will need to make usage - of this method to establish attribute state as understood - by SQLAlchemy. - - """ - state, dict_ = instance_state(instance), instance_dict(instance) - state.manager[key].impl.delete(state, dict_) - - -def flag_modified(instance, key): - """Mark an attribute on an instance as 'modified'. - - This sets the 'modified' flag on the instance and - establishes an unconditional change event for the given attribute. - - """ - state, dict_ = instance_state(instance), instance_dict(instance) - impl = state.manager[key].impl - state._modified_event(dict_, impl, NO_VALUE, force=True) diff --git a/python/sqlalchemy/orm/base.py b/python/sqlalchemy/orm/base.py deleted file mode 100644 index 785bd09d..00000000 --- a/python/sqlalchemy/orm/base.py +++ /dev/null @@ -1,540 +0,0 @@ -# orm/base.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Constants and rudimental functions used throughout the ORM. - -""" - -from .. import util, inspection, exc as sa_exc -from ..sql import expression -from . import exc -import operator - -PASSIVE_NO_RESULT = util.symbol( - 'PASSIVE_NO_RESULT', - """Symbol returned by a loader callable or other attribute/history - retrieval operation when a value could not be determined, based - on loader callable flags. - """ -) - -ATTR_WAS_SET = util.symbol( - 'ATTR_WAS_SET', - """Symbol returned by a loader callable to indicate the - retrieved value, or values, were assigned to their attributes - on the target object. - """ -) - -ATTR_EMPTY = util.symbol( - 'ATTR_EMPTY', - """Symbol used internally to indicate an attribute had no callable.""" -) - -NO_VALUE = util.symbol( - 'NO_VALUE', - """Symbol which may be placed as the 'previous' value of an attribute, - indicating no value was loaded for an attribute when it was modified, - and flags indicated we were not to load it. - """ -) - -NEVER_SET = util.symbol( - 'NEVER_SET', - """Symbol which may be placed as the 'previous' value of an attribute - indicating that the attribute had not been assigned to previously. - """ -) - -NO_CHANGE = util.symbol( - "NO_CHANGE", - """No callables or SQL should be emitted on attribute access - and no state should change - """, canonical=0 -) - -CALLABLES_OK = util.symbol( - "CALLABLES_OK", - """Loader callables can be fired off if a value - is not present. - """, canonical=1 -) - -SQL_OK = util.symbol( - "SQL_OK", - """Loader callables can emit SQL at least on scalar value attributes.""", - canonical=2 -) - -RELATED_OBJECT_OK = util.symbol( - "RELATED_OBJECT_OK", - """Callables can use SQL to load related objects as well - as scalar value attributes. - """, canonical=4 -) - -INIT_OK = util.symbol( - "INIT_OK", - """Attributes should be initialized with a blank - value (None or an empty collection) upon get, if no other - value can be obtained. - """, canonical=8 -) - -NON_PERSISTENT_OK = util.symbol( - "NON_PERSISTENT_OK", - """Callables can be emitted if the parent is not persistent.""", - canonical=16 -) - -LOAD_AGAINST_COMMITTED = util.symbol( - "LOAD_AGAINST_COMMITTED", - """Callables should use committed values as primary/foreign keys during a - load. - """, canonical=32 -) - -NO_AUTOFLUSH = util.symbol( - "NO_AUTOFLUSH", - """Loader callables should disable autoflush.""", - canonical=64 -) - -# pre-packaged sets of flags used as inputs -PASSIVE_OFF = util.symbol( - "PASSIVE_OFF", - "Callables can be emitted in all cases.", - canonical=(RELATED_OBJECT_OK | NON_PERSISTENT_OK | - INIT_OK | CALLABLES_OK | SQL_OK) -) -PASSIVE_RETURN_NEVER_SET = util.symbol( - "PASSIVE_RETURN_NEVER_SET", - """PASSIVE_OFF ^ INIT_OK""", - canonical=PASSIVE_OFF ^ INIT_OK -) -PASSIVE_NO_INITIALIZE = util.symbol( - "PASSIVE_NO_INITIALIZE", - "PASSIVE_RETURN_NEVER_SET ^ CALLABLES_OK", - canonical=PASSIVE_RETURN_NEVER_SET ^ CALLABLES_OK -) -PASSIVE_NO_FETCH = util.symbol( - "PASSIVE_NO_FETCH", - "PASSIVE_OFF ^ SQL_OK", - canonical=PASSIVE_OFF ^ SQL_OK -) -PASSIVE_NO_FETCH_RELATED = util.symbol( - "PASSIVE_NO_FETCH_RELATED", - "PASSIVE_OFF ^ RELATED_OBJECT_OK", - canonical=PASSIVE_OFF ^ RELATED_OBJECT_OK -) -PASSIVE_ONLY_PERSISTENT = util.symbol( - "PASSIVE_ONLY_PERSISTENT", - "PASSIVE_OFF ^ NON_PERSISTENT_OK", - canonical=PASSIVE_OFF ^ NON_PERSISTENT_OK -) - -DEFAULT_MANAGER_ATTR = '_sa_class_manager' -DEFAULT_STATE_ATTR = '_sa_instance_state' -_INSTRUMENTOR = ('mapper', 'instrumentor') - -EXT_CONTINUE = util.symbol('EXT_CONTINUE') -EXT_STOP = util.symbol('EXT_STOP') - -ONETOMANY = util.symbol( - 'ONETOMANY', - """Indicates the one-to-many direction for a :func:`.relationship`. - - This symbol is typically used by the internals but may be exposed within - certain API features. - - """) - -MANYTOONE = util.symbol( - 'MANYTOONE', - """Indicates the many-to-one direction for a :func:`.relationship`. - - This symbol is typically used by the internals but may be exposed within - certain API features. - - """) - -MANYTOMANY = util.symbol( - 'MANYTOMANY', - """Indicates the many-to-many direction for a :func:`.relationship`. - - This symbol is typically used by the internals but may be exposed within - certain API features. - - """) - -NOT_EXTENSION = util.symbol( - 'NOT_EXTENSION', - """Symbol indicating an :class:`InspectionAttr` that's - not part of sqlalchemy.ext. - - Is assigned to the :attr:`.InspectionAttr.extension_type` - attibute. - - """) - -_never_set = frozenset([NEVER_SET]) - -_none_set = frozenset([None, NEVER_SET, PASSIVE_NO_RESULT]) - -_SET_DEFERRED_EXPIRED = util.symbol("SET_DEFERRED_EXPIRED") - -_DEFER_FOR_STATE = util.symbol("DEFER_FOR_STATE") - - -def _generative(*assertions): - """Mark a method as generative, e.g. method-chained.""" - - @util.decorator - def generate(fn, *args, **kw): - self = args[0]._clone() - for assertion in assertions: - assertion(self, fn.__name__) - fn(self, *args[1:], **kw) - return self - return generate - - -# these can be replaced by sqlalchemy.ext.instrumentation -# if augmented class instrumentation is enabled. -def manager_of_class(cls): - return cls.__dict__.get(DEFAULT_MANAGER_ATTR, None) - -instance_state = operator.attrgetter(DEFAULT_STATE_ATTR) - -instance_dict = operator.attrgetter('__dict__') - - -def instance_str(instance): - """Return a string describing an instance.""" - - return state_str(instance_state(instance)) - - -def state_str(state): - """Return a string describing an instance via its InstanceState.""" - - if state is None: - return "None" - else: - return '<%s at 0x%x>' % (state.class_.__name__, id(state.obj())) - - -def state_class_str(state): - """Return a string describing an instance's class via its - InstanceState. - """ - - if state is None: - return "None" - else: - return '<%s>' % (state.class_.__name__, ) - - -def attribute_str(instance, attribute): - return instance_str(instance) + "." + attribute - - -def state_attribute_str(state, attribute): - return state_str(state) + "." + attribute - - -def object_mapper(instance): - """Given an object, return the primary Mapper associated with the object - instance. - - Raises :class:`sqlalchemy.orm.exc.UnmappedInstanceError` - if no mapping is configured. - - This function is available via the inspection system as:: - - inspect(instance).mapper - - Using the inspection system will raise - :class:`sqlalchemy.exc.NoInspectionAvailable` if the instance is - not part of a mapping. - - """ - return object_state(instance).mapper - - -def object_state(instance): - """Given an object, return the :class:`.InstanceState` - associated with the object. - - Raises :class:`sqlalchemy.orm.exc.UnmappedInstanceError` - if no mapping is configured. - - Equivalent functionality is available via the :func:`.inspect` - function as:: - - inspect(instance) - - Using the inspection system will raise - :class:`sqlalchemy.exc.NoInspectionAvailable` if the instance is - not part of a mapping. - - """ - state = _inspect_mapped_object(instance) - if state is None: - raise exc.UnmappedInstanceError(instance) - else: - return state - - -@inspection._inspects(object) -def _inspect_mapped_object(instance): - try: - return instance_state(instance) - # TODO: whats the py-2/3 syntax to catch two - # different kinds of exceptions at once ? - except exc.UnmappedClassError: - return None - except exc.NO_STATE: - return None - - -def _class_to_mapper(class_or_mapper): - insp = inspection.inspect(class_or_mapper, False) - if insp is not None: - return insp.mapper - else: - raise exc.UnmappedClassError(class_or_mapper) - - -def _mapper_or_none(entity): - """Return the :class:`.Mapper` for the given class or None if the - class is not mapped. - """ - - insp = inspection.inspect(entity, False) - if insp is not None: - return insp.mapper - else: - return None - - -def _is_mapped_class(entity): - """Return True if the given object is a mapped class, - :class:`.Mapper`, or :class:`.AliasedClass`. - """ - - insp = inspection.inspect(entity, False) - return insp is not None and \ - not insp.is_clause_element and \ - ( - insp.is_mapper or insp.is_aliased_class - ) - - -def _attr_as_key(attr): - if hasattr(attr, 'key'): - return attr.key - else: - return expression._column_as_key(attr) - - -def _orm_columns(entity): - insp = inspection.inspect(entity, False) - if hasattr(insp, 'selectable'): - return [c for c in insp.selectable.c] - else: - return [entity] - - -def _is_aliased_class(entity): - insp = inspection.inspect(entity, False) - return insp is not None and \ - getattr(insp, "is_aliased_class", False) - - -def _entity_descriptor(entity, key): - """Return a class attribute given an entity and string name. - - May return :class:`.InstrumentedAttribute` or user-defined - attribute. - - """ - insp = inspection.inspect(entity) - if insp.is_selectable: - description = entity - entity = insp.c - elif insp.is_aliased_class: - entity = insp.entity - description = entity - elif hasattr(insp, "mapper"): - description = entity = insp.mapper.class_ - else: - description = entity - - try: - return getattr(entity, key) - except AttributeError: - raise sa_exc.InvalidRequestError( - "Entity '%s' has no property '%s'" % - (description, key) - ) - -_state_mapper = util.dottedgetter('manager.mapper') - - -@inspection._inspects(type) -def _inspect_mapped_class(class_, configure=False): - try: - class_manager = manager_of_class(class_) - if not class_manager.is_mapped: - return None - mapper = class_manager.mapper - except exc.NO_STATE: - return None - else: - if configure and mapper._new_mappers: - mapper._configure_all() - return mapper - - -def class_mapper(class_, configure=True): - """Given a class, return the primary :class:`.Mapper` associated - with the key. - - Raises :exc:`.UnmappedClassError` if no mapping is configured - on the given class, or :exc:`.ArgumentError` if a non-class - object is passed. - - Equivalent functionality is available via the :func:`.inspect` - function as:: - - inspect(some_mapped_class) - - Using the inspection system will raise - :class:`sqlalchemy.exc.NoInspectionAvailable` if the class is not mapped. - - """ - mapper = _inspect_mapped_class(class_, configure=configure) - if mapper is None: - if not isinstance(class_, type): - raise sa_exc.ArgumentError( - "Class object expected, got '%r'." % (class_, )) - raise exc.UnmappedClassError(class_) - else: - return mapper - - -class InspectionAttr(object): - """A base class applied to all ORM objects that can be returned - by the :func:`.inspect` function. - - The attributes defined here allow the usage of simple boolean - checks to test basic facts about the object returned. - - While the boolean checks here are basically the same as using - the Python isinstance() function, the flags here can be used without - the need to import all of these classes, and also such that - the SQLAlchemy class system can change while leaving the flags - here intact for forwards-compatibility. - - """ - __slots__ = () - - is_selectable = False - """Return True if this object is an instance of :class:`.Selectable`.""" - - is_aliased_class = False - """True if this object is an instance of :class:`.AliasedClass`.""" - - is_instance = False - """True if this object is an instance of :class:`.InstanceState`.""" - - is_mapper = False - """True if this object is an instance of :class:`.Mapper`.""" - - is_property = False - """True if this object is an instance of :class:`.MapperProperty`.""" - - is_attribute = False - """True if this object is a Python :term:`descriptor`. - - This can refer to one of many types. Usually a - :class:`.QueryableAttribute` which handles attributes events on behalf - of a :class:`.MapperProperty`. But can also be an extension type - such as :class:`.AssociationProxy` or :class:`.hybrid_property`. - The :attr:`.InspectionAttr.extension_type` will refer to a constant - identifying the specific subtype. - - .. seealso:: - - :attr:`.Mapper.all_orm_descriptors` - - """ - - is_clause_element = False - """True if this object is an instance of :class:`.ClauseElement`.""" - - extension_type = NOT_EXTENSION - """The extension type, if any. - Defaults to :data:`.interfaces.NOT_EXTENSION` - - .. versionadded:: 0.8.0 - - .. seealso:: - - :data:`.HYBRID_METHOD` - - :data:`.HYBRID_PROPERTY` - - :data:`.ASSOCIATION_PROXY` - - """ - - -class InspectionAttrInfo(InspectionAttr): - """Adds the ``.info`` attribute to :class:`.InspectionAttr`. - - The rationale for :class:`.InspectionAttr` vs. :class:`.InspectionAttrInfo` - is that the former is compatible as a mixin for classes that specify - ``__slots__``; this is essentially an implementation artifact. - - """ - - @util.memoized_property - def info(self): - """Info dictionary associated with the object, allowing user-defined - data to be associated with this :class:`.InspectionAttr`. - - The dictionary is generated when first accessed. Alternatively, - it can be specified as a constructor argument to the - :func:`.column_property`, :func:`.relationship`, or :func:`.composite` - functions. - - .. versionadded:: 0.8 Added support for .info to all - :class:`.MapperProperty` subclasses. - - .. versionchanged:: 1.0.0 :attr:`.MapperProperty.info` is also - available on extension types via the - :attr:`.InspectionAttrInfo.info` attribute, so that it can apply - to a wider variety of ORM and extension constructs. - - .. seealso:: - - :attr:`.QueryableAttribute.info` - - :attr:`.SchemaItem.info` - - """ - return {} - - -class _MappedAttribute(object): - """Mixin for attributes which should be replaced by mapper-assigned - attributes. - - """ - __slots__ = () diff --git a/python/sqlalchemy/orm/collections.py b/python/sqlalchemy/orm/collections.py deleted file mode 100644 index 4f988a8d..00000000 --- a/python/sqlalchemy/orm/collections.py +++ /dev/null @@ -1,1579 +0,0 @@ -# orm/collections.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Support for collections of mapped entities. - -The collections package supplies the machinery used to inform the ORM of -collection membership changes. An instrumentation via decoration approach is -used, allowing arbitrary types (including built-ins) to be used as entity -collections without requiring inheritance from a base class. - -Instrumentation decoration relays membership change events to the -:class:`.CollectionAttributeImpl` that is currently managing the collection. -The decorators observe function call arguments and return values, tracking -entities entering or leaving the collection. Two decorator approaches are -provided. One is a bundle of generic decorators that map function arguments -and return values to events:: - - from sqlalchemy.orm.collections import collection - class MyClass(object): - # ... - - @collection.adds(1) - def store(self, item): - self.data.append(item) - - @collection.removes_return() - def pop(self): - return self.data.pop() - - -The second approach is a bundle of targeted decorators that wrap appropriate -append and remove notifiers around the mutation methods present in the -standard Python ``list``, ``set`` and ``dict`` interfaces. These could be -specified in terms of generic decorator recipes, but are instead hand-tooled -for increased efficiency. The targeted decorators occasionally implement -adapter-like behavior, such as mapping bulk-set methods (``extend``, -``update``, ``__setslice__``, etc.) into the series of atomic mutation events -that the ORM requires. - -The targeted decorators are used internally for automatic instrumentation of -entity collection classes. Every collection class goes through a -transformation process roughly like so: - -1. If the class is a built-in, substitute a trivial sub-class -2. Is this class already instrumented? -3. Add in generic decorators -4. Sniff out the collection interface through duck-typing -5. Add targeted decoration to any undecorated interface method - -This process modifies the class at runtime, decorating methods and adding some -bookkeeping properties. This isn't possible (or desirable) for built-in -classes like ``list``, so trivial sub-classes are substituted to hold -decoration:: - - class InstrumentedList(list): - pass - -Collection classes can be specified in ``relationship(collection_class=)`` as -types or a function that returns an instance. Collection classes are -inspected and instrumented during the mapper compilation phase. The -collection_class callable will be executed once to produce a specimen -instance, and the type of that specimen will be instrumented. Functions that -return built-in types like ``lists`` will be adapted to produce instrumented -instances. - -When extending a known type like ``list``, additional decorations are not -generally not needed. Odds are, the extension method will delegate to a -method that's already instrumented. For example:: - - class QueueIsh(list): - def push(self, item): - self.append(item) - def shift(self): - return self.pop(0) - -There's no need to decorate these methods. ``append`` and ``pop`` are already -instrumented as part of the ``list`` interface. Decorating them would fire -duplicate events, which should be avoided. - -The targeted decoration tries not to rely on other methods in the underlying -collection class, but some are unavoidable. Many depend on 'read' methods -being present to properly instrument a 'write', for example, ``__setitem__`` -needs ``__getitem__``. "Bulk" methods like ``update`` and ``extend`` may also -reimplemented in terms of atomic appends and removes, so the ``extend`` -decoration will actually perform many ``append`` operations and not call the -underlying method at all. - -Tight control over bulk operation and the firing of events is also possible by -implementing the instrumentation internally in your methods. The basic -instrumentation package works under the general assumption that collection -mutation will not raise unusual exceptions. If you want to closely -orchestrate append and remove events with exception management, internal -instrumentation may be the answer. Within your method, -``collection_adapter(self)`` will retrieve an object that you can use for -explicit control over triggering append and remove events. - -The owning object and :class:`.CollectionAttributeImpl` are also reachable -through the adapter, allowing for some very sophisticated behavior. - -""" - -import inspect -import operator -import weakref - -from ..sql import expression -from .. import util, exc as sa_exc -from . import base - - -__all__ = ['collection', 'collection_adapter', - 'mapped_collection', 'column_mapped_collection', - 'attribute_mapped_collection'] - -__instrumentation_mutex = util.threading.Lock() - - -class _PlainColumnGetter(object): - """Plain column getter, stores collection of Column objects - directly. - - Serializes to a :class:`._SerializableColumnGetterV2` - which has more expensive __call__() performance - and some rare caveats. - - """ - - def __init__(self, cols): - self.cols = cols - self.composite = len(cols) > 1 - - def __reduce__(self): - return _SerializableColumnGetterV2._reduce_from_cols(self.cols) - - def _cols(self, mapper): - return self.cols - - def __call__(self, value): - state = base.instance_state(value) - m = base._state_mapper(state) - - key = [ - m._get_state_attr_by_column(state, state.dict, col) - for col in self._cols(m) - ] - - if self.composite: - return tuple(key) - else: - return key[0] - - -class _SerializableColumnGetter(object): - """Column-based getter used in version 0.7.6 only. - - Remains here for pickle compatibility with 0.7.6. - - """ - - def __init__(self, colkeys): - self.colkeys = colkeys - self.composite = len(colkeys) > 1 - - def __reduce__(self): - return _SerializableColumnGetter, (self.colkeys,) - - def __call__(self, value): - state = base.instance_state(value) - m = base._state_mapper(state) - key = [m._get_state_attr_by_column( - state, state.dict, - m.mapped_table.columns[k]) - for k in self.colkeys] - if self.composite: - return tuple(key) - else: - return key[0] - - -class _SerializableColumnGetterV2(_PlainColumnGetter): - """Updated serializable getter which deals with - multi-table mapped classes. - - Two extremely unusual cases are not supported. - Mappings which have tables across multiple metadata - objects, or which are mapped to non-Table selectables - linked across inheriting mappers may fail to function - here. - - """ - - def __init__(self, colkeys): - self.colkeys = colkeys - self.composite = len(colkeys) > 1 - - def __reduce__(self): - return self.__class__, (self.colkeys,) - - @classmethod - def _reduce_from_cols(cls, cols): - def _table_key(c): - if not isinstance(c.table, expression.TableClause): - return None - else: - return c.table.key - colkeys = [(c.key, _table_key(c)) for c in cols] - return _SerializableColumnGetterV2, (colkeys,) - - def _cols(self, mapper): - cols = [] - metadata = getattr(mapper.local_table, 'metadata', None) - for (ckey, tkey) in self.colkeys: - if tkey is None or \ - metadata is None or \ - tkey not in metadata: - cols.append(mapper.local_table.c[ckey]) - else: - cols.append(metadata.tables[tkey].c[ckey]) - return cols - - -def column_mapped_collection(mapping_spec): - """A dictionary-based collection type with column-based keying. - - Returns a :class:`.MappedCollection` factory with a keying function - generated from mapping_spec, which may be a Column or a sequence - of Columns. - - The key value must be immutable for the lifetime of the object. You - can not, for example, map on foreign key values if those key values will - change during the session, i.e. from None to a database-assigned integer - after a session flush. - - """ - cols = [expression._only_column_elements(q, "mapping_spec") - for q in util.to_list(mapping_spec) - ] - keyfunc = _PlainColumnGetter(cols) - return lambda: MappedCollection(keyfunc) - - -class _SerializableAttrGetter(object): - def __init__(self, name): - self.name = name - self.getter = operator.attrgetter(name) - - def __call__(self, target): - return self.getter(target) - - def __reduce__(self): - return _SerializableAttrGetter, (self.name, ) - - -def attribute_mapped_collection(attr_name): - """A dictionary-based collection type with attribute-based keying. - - Returns a :class:`.MappedCollection` factory with a keying based on the - 'attr_name' attribute of entities in the collection, where ``attr_name`` - is the string name of the attribute. - - The key value must be immutable for the lifetime of the object. You - can not, for example, map on foreign key values if those key values will - change during the session, i.e. from None to a database-assigned integer - after a session flush. - - """ - getter = _SerializableAttrGetter(attr_name) - return lambda: MappedCollection(getter) - - -def mapped_collection(keyfunc): - """A dictionary-based collection type with arbitrary keying. - - Returns a :class:`.MappedCollection` factory with a keying function - generated from keyfunc, a callable that takes an entity and returns a - key value. - - The key value must be immutable for the lifetime of the object. You - can not, for example, map on foreign key values if those key values will - change during the session, i.e. from None to a database-assigned integer - after a session flush. - - """ - return lambda: MappedCollection(keyfunc) - - -class collection(object): - """Decorators for entity collection classes. - - The decorators fall into two groups: annotations and interception recipes. - - The annotating decorators (appender, remover, iterator, linker, converter, - internally_instrumented) indicate the method's purpose and take no - arguments. They are not written with parens:: - - @collection.appender - def append(self, append): ... - - The recipe decorators all require parens, even those that take no - arguments:: - - @collection.adds('entity') - def insert(self, position, entity): ... - - @collection.removes_return() - def popitem(self): ... - - """ - # Bundled as a class solely for ease of use: packaging, doc strings, - # importability. - - @staticmethod - def appender(fn): - """Tag the method as the collection appender. - - The appender method is called with one positional argument: the value - to append. The method will be automatically decorated with 'adds(1)' - if not already decorated:: - - @collection.appender - def add(self, append): ... - - # or, equivalently - @collection.appender - @collection.adds(1) - def add(self, append): ... - - # for mapping type, an 'append' may kick out a previous value - # that occupies that slot. consider d['a'] = 'foo'- any previous - # value in d['a'] is discarded. - @collection.appender - @collection.replaces(1) - def add(self, entity): - key = some_key_func(entity) - previous = None - if key in self: - previous = self[key] - self[key] = entity - return previous - - If the value to append is not allowed in the collection, you may - raise an exception. Something to remember is that the appender - will be called for each object mapped by a database query. If the - database contains rows that violate your collection semantics, you - will need to get creative to fix the problem, as access via the - collection will not work. - - If the appender method is internally instrumented, you must also - receive the keyword argument '_sa_initiator' and ensure its - promulgation to collection events. - - """ - fn._sa_instrument_role = 'appender' - return fn - - @staticmethod - def remover(fn): - """Tag the method as the collection remover. - - The remover method is called with one positional argument: the value - to remove. The method will be automatically decorated with - :meth:`removes_return` if not already decorated:: - - @collection.remover - def zap(self, entity): ... - - # or, equivalently - @collection.remover - @collection.removes_return() - def zap(self, ): ... - - If the value to remove is not present in the collection, you may - raise an exception or return None to ignore the error. - - If the remove method is internally instrumented, you must also - receive the keyword argument '_sa_initiator' and ensure its - promulgation to collection events. - - """ - fn._sa_instrument_role = 'remover' - return fn - - @staticmethod - def iterator(fn): - """Tag the method as the collection remover. - - The iterator method is called with no arguments. It is expected to - return an iterator over all collection members:: - - @collection.iterator - def __iter__(self): ... - - """ - fn._sa_instrument_role = 'iterator' - return fn - - @staticmethod - def internally_instrumented(fn): - """Tag the method as instrumented. - - This tag will prevent any decoration from being applied to the - method. Use this if you are orchestrating your own calls to - :func:`.collection_adapter` in one of the basic SQLAlchemy - interface methods, or to prevent an automatic ABC method - decoration from wrapping your implementation:: - - # normally an 'extend' method on a list-like class would be - # automatically intercepted and re-implemented in terms of - # SQLAlchemy events and append(). your implementation will - # never be called, unless: - @collection.internally_instrumented - def extend(self, items): ... - - """ - fn._sa_instrumented = True - return fn - - @staticmethod - def linker(fn): - """Tag the method as a "linked to attribute" event handler. - - This optional event handler will be called when the collection class - is linked to or unlinked from the InstrumentedAttribute. It is - invoked immediately after the '_sa_adapter' property is set on - the instance. A single argument is passed: the collection adapter - that has been linked, or None if unlinking. - - .. deprecated:: 1.0.0 - the :meth:`.collection.linker` handler - is superseded by the :meth:`.AttributeEvents.init_collection` - and :meth:`.AttributeEvents.dispose_collection` handlers. - - """ - fn._sa_instrument_role = 'linker' - return fn - - link = linker - """deprecated; synonym for :meth:`.collection.linker`.""" - - @staticmethod - def converter(fn): - """Tag the method as the collection converter. - - This optional method will be called when a collection is being - replaced entirely, as in:: - - myobj.acollection = [newvalue1, newvalue2] - - The converter method will receive the object being assigned and should - return an iterable of values suitable for use by the ``appender`` - method. A converter must not assign values or mutate the collection, - its sole job is to adapt the value the user provides into an iterable - of values for the ORM's use. - - The default converter implementation will use duck-typing to do the - conversion. A dict-like collection will be convert into an iterable - of dictionary values, and other types will simply be iterated:: - - @collection.converter - def convert(self, other): ... - - If the duck-typing of the object does not match the type of this - collection, a TypeError is raised. - - Supply an implementation of this method if you want to expand the - range of possible types that can be assigned in bulk or perform - validation on the values about to be assigned. - - """ - fn._sa_instrument_role = 'converter' - return fn - - @staticmethod - def adds(arg): - """Mark the method as adding an entity to the collection. - - Adds "add to collection" handling to the method. The decorator - argument indicates which method argument holds the SQLAlchemy-relevant - value. Arguments can be specified positionally (i.e. integer) or by - name:: - - @collection.adds(1) - def push(self, item): ... - - @collection.adds('entity') - def do_stuff(self, thing, entity=None): ... - - """ - def decorator(fn): - fn._sa_instrument_before = ('fire_append_event', arg) - return fn - return decorator - - @staticmethod - def replaces(arg): - """Mark the method as replacing an entity in the collection. - - Adds "add to collection" and "remove from collection" handling to - the method. The decorator argument indicates which method argument - holds the SQLAlchemy-relevant value to be added, and return value, if - any will be considered the value to remove. - - Arguments can be specified positionally (i.e. integer) or by name:: - - @collection.replaces(2) - def __setitem__(self, index, item): ... - - """ - def decorator(fn): - fn._sa_instrument_before = ('fire_append_event', arg) - fn._sa_instrument_after = 'fire_remove_event' - return fn - return decorator - - @staticmethod - def removes(arg): - """Mark the method as removing an entity in the collection. - - Adds "remove from collection" handling to the method. The decorator - argument indicates which method argument holds the SQLAlchemy-relevant - value to be removed. Arguments can be specified positionally (i.e. - integer) or by name:: - - @collection.removes(1) - def zap(self, item): ... - - For methods where the value to remove is not known at call-time, use - collection.removes_return. - - """ - def decorator(fn): - fn._sa_instrument_before = ('fire_remove_event', arg) - return fn - return decorator - - @staticmethod - def removes_return(): - """Mark the method as removing an entity in the collection. - - Adds "remove from collection" handling to the method. The return - value of the method, if any, is considered the value to remove. The - method arguments are not inspected:: - - @collection.removes_return() - def pop(self): ... - - For methods where the value to remove is known at call-time, use - collection.remove. - - """ - def decorator(fn): - fn._sa_instrument_after = 'fire_remove_event' - return fn - return decorator - - -collection_adapter = operator.attrgetter('_sa_adapter') -"""Fetch the :class:`.CollectionAdapter` for a collection.""" - - -class CollectionAdapter(object): - """Bridges between the ORM and arbitrary Python collections. - - Proxies base-level collection operations (append, remove, iterate) - to the underlying Python collection, and emits add/remove events for - entities entering or leaving the collection. - - The ORM uses :class:`.CollectionAdapter` exclusively for interaction with - entity collections. - - - """ - invalidated = False - - def __init__(self, attr, owner_state, data): - self._key = attr.key - self._data = weakref.ref(data) - self.owner_state = owner_state - data._sa_adapter = self - - def _warn_invalidated(self): - util.warn("This collection has been invalidated.") - - @property - def data(self): - "The entity collection being adapted." - return self._data() - - @property - def _referenced_by_owner(self): - """return True if the owner state still refers to this collection. - - This will return False within a bulk replace operation, - where this collection is the one being replaced. - - """ - return self.owner_state.dict[self._key] is self._data() - - @util.memoized_property - def attr(self): - return self.owner_state.manager[self._key].impl - - def adapt_like_to_iterable(self, obj): - """Converts collection-compatible objects to an iterable of values. - - Can be passed any type of object, and if the underlying collection - determines that it can be adapted into a stream of values it can - use, returns an iterable of values suitable for append()ing. - - This method may raise TypeError or any other suitable exception - if adaptation fails. - - If a converter implementation is not supplied on the collection, - a default duck-typing-based implementation is used. - - """ - converter = self._data()._sa_converter - if converter is not None: - return converter(obj) - - setting_type = util.duck_type_collection(obj) - receiving_type = util.duck_type_collection(self._data()) - - if obj is None or setting_type != receiving_type: - given = obj is None and 'None' or obj.__class__.__name__ - if receiving_type is None: - wanted = self._data().__class__.__name__ - else: - wanted = receiving_type.__name__ - - raise TypeError( - "Incompatible collection type: %s is not %s-like" % ( - given, wanted)) - - # If the object is an adapted collection, return the (iterable) - # adapter. - if getattr(obj, '_sa_adapter', None) is not None: - return obj._sa_adapter - elif setting_type == dict: - if util.py3k: - return obj.values() - else: - return getattr(obj, 'itervalues', obj.values)() - else: - return iter(obj) - - def append_with_event(self, item, initiator=None): - """Add an entity to the collection, firing mutation events.""" - - self._data()._sa_appender(item, _sa_initiator=initiator) - - def append_without_event(self, item): - """Add or restore an entity to the collection, firing no events.""" - self._data()._sa_appender(item, _sa_initiator=False) - - def append_multiple_without_event(self, items): - """Add or restore an entity to the collection, firing no events.""" - appender = self._data()._sa_appender - for item in items: - appender(item, _sa_initiator=False) - - def remove_with_event(self, item, initiator=None): - """Remove an entity from the collection, firing mutation events.""" - self._data()._sa_remover(item, _sa_initiator=initiator) - - def remove_without_event(self, item): - """Remove an entity from the collection, firing no events.""" - self._data()._sa_remover(item, _sa_initiator=False) - - def clear_with_event(self, initiator=None): - """Empty the collection, firing a mutation event for each entity.""" - - remover = self._data()._sa_remover - for item in list(self): - remover(item, _sa_initiator=initiator) - - def clear_without_event(self): - """Empty the collection, firing no events.""" - - remover = self._data()._sa_remover - for item in list(self): - remover(item, _sa_initiator=False) - - def __iter__(self): - """Iterate over entities in the collection.""" - - return iter(self._data()._sa_iterator()) - - def __len__(self): - """Count entities in the collection.""" - return len(list(self._data()._sa_iterator())) - - def __bool__(self): - return True - - __nonzero__ = __bool__ - - def fire_append_event(self, item, initiator=None): - """Notify that a entity has entered the collection. - - Initiator is a token owned by the InstrumentedAttribute that - initiated the membership mutation, and should be left as None - unless you are passing along an initiator value from a chained - operation. - - """ - if initiator is not False: - if self.invalidated: - self._warn_invalidated() - return self.attr.fire_append_event( - self.owner_state, - self.owner_state.dict, - item, initiator) - else: - return item - - def fire_remove_event(self, item, initiator=None): - """Notify that a entity has been removed from the collection. - - Initiator is the InstrumentedAttribute that initiated the membership - mutation, and should be left as None unless you are passing along - an initiator value from a chained operation. - - """ - if initiator is not False: - if self.invalidated: - self._warn_invalidated() - self.attr.fire_remove_event( - self.owner_state, - self.owner_state.dict, - item, initiator) - - def fire_pre_remove_event(self, initiator=None): - """Notify that an entity is about to be removed from the collection. - - Only called if the entity cannot be removed after calling - fire_remove_event(). - - """ - if self.invalidated: - self._warn_invalidated() - self.attr.fire_pre_remove_event( - self.owner_state, - self.owner_state.dict, - initiator=initiator) - - def __getstate__(self): - return {'key': self._key, - 'owner_state': self.owner_state, - 'data': self.data} - - def __setstate__(self, d): - self._key = d['key'] - self.owner_state = d['owner_state'] - self._data = weakref.ref(d['data']) - - -def bulk_replace(values, existing_adapter, new_adapter): - """Load a new collection, firing events based on prior like membership. - - Appends instances in ``values`` onto the ``new_adapter``. Events will be - fired for any instance not present in the ``existing_adapter``. Any - instances in ``existing_adapter`` not present in ``values`` will have - remove events fired upon them. - - :param values: An iterable of collection member instances - - :param existing_adapter: A :class:`.CollectionAdapter` of - instances to be replaced - - :param new_adapter: An empty :class:`.CollectionAdapter` - to load with ``values`` - - - """ - if not isinstance(values, list): - values = list(values) - - idset = util.IdentitySet - existing_idset = idset(existing_adapter or ()) - constants = existing_idset.intersection(values or ()) - additions = idset(values or ()).difference(constants) - removals = existing_idset.difference(constants) - - for member in values or (): - if member in additions: - new_adapter.append_with_event(member) - elif member in constants: - new_adapter.append_without_event(member) - - if existing_adapter: - for member in removals: - existing_adapter.remove_with_event(member) - - -def prepare_instrumentation(factory): - """Prepare a callable for future use as a collection class factory. - - Given a collection class factory (either a type or no-arg callable), - return another factory that will produce compatible instances when - called. - - This function is responsible for converting collection_class=list - into the run-time behavior of collection_class=InstrumentedList. - - """ - # Convert a builtin to 'Instrumented*' - if factory in __canned_instrumentation: - factory = __canned_instrumentation[factory] - - # Create a specimen - cls = type(factory()) - - # Did factory callable return a builtin? - if cls in __canned_instrumentation: - # Wrap it so that it returns our 'Instrumented*' - factory = __converting_factory(cls, factory) - cls = factory() - - # Instrument the class if needed. - if __instrumentation_mutex.acquire(): - try: - if getattr(cls, '_sa_instrumented', None) != id(cls): - _instrument_class(cls) - finally: - __instrumentation_mutex.release() - - return factory - - -def __converting_factory(specimen_cls, original_factory): - """Return a wrapper that converts a "canned" collection like - set, dict, list into the Instrumented* version. - - """ - - instrumented_cls = __canned_instrumentation[specimen_cls] - - def wrapper(): - collection = original_factory() - return instrumented_cls(collection) - - # often flawed but better than nothing - wrapper.__name__ = "%sWrapper" % original_factory.__name__ - wrapper.__doc__ = original_factory.__doc__ - - return wrapper - - -def _instrument_class(cls): - """Modify methods in a class and install instrumentation.""" - - # In the normal call flow, a request for any of the 3 basic collection - # types is transformed into one of our trivial subclasses - # (e.g. InstrumentedList). Catch anything else that sneaks in here... - if cls.__module__ == '__builtin__': - raise sa_exc.ArgumentError( - "Can not instrument a built-in type. Use a " - "subclass, even a trivial one.") - - roles, methods = _locate_roles_and_methods(cls) - - _setup_canned_roles(cls, roles, methods) - - _assert_required_roles(cls, roles, methods) - - _set_collection_attributes(cls, roles, methods) - - -def _locate_roles_and_methods(cls): - """search for _sa_instrument_role-decorated methods in - method resolution order, assign to roles. - - """ - - roles = {} - methods = {} - - for supercls in cls.__mro__: - for name, method in vars(supercls).items(): - if not util.callable(method): - continue - - # note role declarations - if hasattr(method, '_sa_instrument_role'): - role = method._sa_instrument_role - assert role in ('appender', 'remover', 'iterator', - 'linker', 'converter') - roles.setdefault(role, name) - - # transfer instrumentation requests from decorated function - # to the combined queue - before, after = None, None - if hasattr(method, '_sa_instrument_before'): - op, argument = method._sa_instrument_before - assert op in ('fire_append_event', 'fire_remove_event') - before = op, argument - if hasattr(method, '_sa_instrument_after'): - op = method._sa_instrument_after - assert op in ('fire_append_event', 'fire_remove_event') - after = op - if before: - methods[name] = before + (after, ) - elif after: - methods[name] = None, None, after - return roles, methods - - -def _setup_canned_roles(cls, roles, methods): - """see if this class has "canned" roles based on a known - collection type (dict, set, list). Apply those roles - as needed to the "roles" dictionary, and also - prepare "decorator" methods - - """ - collection_type = util.duck_type_collection(cls) - if collection_type in __interfaces: - canned_roles, decorators = __interfaces[collection_type] - for role, name in canned_roles.items(): - roles.setdefault(role, name) - - # apply ABC auto-decoration to methods that need it - for method, decorator in decorators.items(): - fn = getattr(cls, method, None) - if (fn and method not in methods and - not hasattr(fn, '_sa_instrumented')): - setattr(cls, method, decorator(fn)) - - -def _assert_required_roles(cls, roles, methods): - """ensure all roles are present, and apply implicit instrumentation if - needed - - """ - if 'appender' not in roles or not hasattr(cls, roles['appender']): - raise sa_exc.ArgumentError( - "Type %s must elect an appender method to be " - "a collection class" % cls.__name__) - elif (roles['appender'] not in methods and - not hasattr(getattr(cls, roles['appender']), '_sa_instrumented')): - methods[roles['appender']] = ('fire_append_event', 1, None) - - if 'remover' not in roles or not hasattr(cls, roles['remover']): - raise sa_exc.ArgumentError( - "Type %s must elect a remover method to be " - "a collection class" % cls.__name__) - elif (roles['remover'] not in methods and - not hasattr(getattr(cls, roles['remover']), '_sa_instrumented')): - methods[roles['remover']] = ('fire_remove_event', 1, None) - - if 'iterator' not in roles or not hasattr(cls, roles['iterator']): - raise sa_exc.ArgumentError( - "Type %s must elect an iterator method to be " - "a collection class" % cls.__name__) - - -def _set_collection_attributes(cls, roles, methods): - """apply ad-hoc instrumentation from decorators, class-level defaults - and implicit role declarations - - """ - for method_name, (before, argument, after) in methods.items(): - setattr(cls, method_name, - _instrument_membership_mutator(getattr(cls, method_name), - before, argument, after)) - # intern the role map - for role, method_name in roles.items(): - setattr(cls, '_sa_%s' % role, getattr(cls, method_name)) - - cls._sa_adapter = None - - if not hasattr(cls, '_sa_converter'): - cls._sa_converter = None - cls._sa_instrumented = id(cls) - - -def _instrument_membership_mutator(method, before, argument, after): - """Route method args and/or return value through the collection - adapter.""" - # This isn't smart enough to handle @adds(1) for 'def fn(self, (a, b))' - if before: - fn_args = list(util.flatten_iterator(inspect.getargspec(method)[0])) - if isinstance(argument, int): - pos_arg = argument - named_arg = len(fn_args) > argument and fn_args[argument] or None - else: - if argument in fn_args: - pos_arg = fn_args.index(argument) - else: - pos_arg = None - named_arg = argument - del fn_args - - def wrapper(*args, **kw): - if before: - if pos_arg is None: - if named_arg not in kw: - raise sa_exc.ArgumentError( - "Missing argument %s" % argument) - value = kw[named_arg] - else: - if len(args) > pos_arg: - value = args[pos_arg] - elif named_arg in kw: - value = kw[named_arg] - else: - raise sa_exc.ArgumentError( - "Missing argument %s" % argument) - - initiator = kw.pop('_sa_initiator', None) - if initiator is False: - executor = None - else: - executor = args[0]._sa_adapter - - if before and executor: - getattr(executor, before)(value, initiator) - - if not after or not executor: - return method(*args, **kw) - else: - res = method(*args, **kw) - if res is not None: - getattr(executor, after)(res, initiator) - return res - - wrapper._sa_instrumented = True - if hasattr(method, "_sa_instrument_role"): - wrapper._sa_instrument_role = method._sa_instrument_role - wrapper.__name__ = method.__name__ - wrapper.__doc__ = method.__doc__ - return wrapper - - -def __set(collection, item, _sa_initiator=None): - """Run set events, may eventually be inlined into decorators.""" - - if _sa_initiator is not False: - executor = collection._sa_adapter - if executor: - item = executor.fire_append_event(item, _sa_initiator) - return item - - -def __del(collection, item, _sa_initiator=None): - """Run del events, may eventually be inlined into decorators.""" - if _sa_initiator is not False: - executor = collection._sa_adapter - if executor: - executor.fire_remove_event(item, _sa_initiator) - - -def __before_delete(collection, _sa_initiator=None): - """Special method to run 'commit existing value' methods""" - executor = collection._sa_adapter - if executor: - executor.fire_pre_remove_event(_sa_initiator) - - -def _list_decorators(): - """Tailored instrumentation wrappers for any list-like class.""" - - def _tidy(fn): - fn._sa_instrumented = True - fn.__doc__ = getattr(list, fn.__name__).__doc__ - - def append(fn): - def append(self, item, _sa_initiator=None): - item = __set(self, item, _sa_initiator) - fn(self, item) - _tidy(append) - return append - - def remove(fn): - def remove(self, value, _sa_initiator=None): - __before_delete(self, _sa_initiator) - # testlib.pragma exempt:__eq__ - fn(self, value) - __del(self, value, _sa_initiator) - _tidy(remove) - return remove - - def insert(fn): - def insert(self, index, value): - value = __set(self, value) - fn(self, index, value) - _tidy(insert) - return insert - - def __setitem__(fn): - def __setitem__(self, index, value): - if not isinstance(index, slice): - existing = self[index] - if existing is not None: - __del(self, existing) - value = __set(self, value) - fn(self, index, value) - else: - # slice assignment requires __delitem__, insert, __len__ - step = index.step or 1 - start = index.start or 0 - if start < 0: - start += len(self) - if index.stop is not None: - stop = index.stop - else: - stop = len(self) - if stop < 0: - stop += len(self) - - if step == 1: - for i in range(start, stop, step): - if len(self) > start: - del self[start] - - for i, item in enumerate(value): - self.insert(i + start, item) - else: - rng = list(range(start, stop, step)) - if len(value) != len(rng): - raise ValueError( - "attempt to assign sequence of size %s to " - "extended slice of size %s" % (len(value), - len(rng))) - for i, item in zip(rng, value): - self.__setitem__(i, item) - _tidy(__setitem__) - return __setitem__ - - def __delitem__(fn): - def __delitem__(self, index): - if not isinstance(index, slice): - item = self[index] - __del(self, item) - fn(self, index) - else: - # slice deletion requires __getslice__ and a slice-groking - # __getitem__ for stepped deletion - # note: not breaking this into atomic dels - for item in self[index]: - __del(self, item) - fn(self, index) - _tidy(__delitem__) - return __delitem__ - - if util.py2k: - def __setslice__(fn): - def __setslice__(self, start, end, values): - for value in self[start:end]: - __del(self, value) - values = [__set(self, value) for value in values] - fn(self, start, end, values) - _tidy(__setslice__) - return __setslice__ - - def __delslice__(fn): - def __delslice__(self, start, end): - for value in self[start:end]: - __del(self, value) - fn(self, start, end) - _tidy(__delslice__) - return __delslice__ - - def extend(fn): - def extend(self, iterable): - for value in iterable: - self.append(value) - _tidy(extend) - return extend - - def __iadd__(fn): - def __iadd__(self, iterable): - # list.__iadd__ takes any iterable and seems to let TypeError - # raise as-is instead of returning NotImplemented - for value in iterable: - self.append(value) - return self - _tidy(__iadd__) - return __iadd__ - - def pop(fn): - def pop(self, index=-1): - __before_delete(self) - item = fn(self, index) - __del(self, item) - return item - _tidy(pop) - return pop - - if not util.py2k: - def clear(fn): - def clear(self, index=-1): - for item in self: - __del(self, item) - fn(self) - _tidy(clear) - return clear - - # __imul__ : not wrapping this. all members of the collection are already - # present, so no need to fire appends... wrapping it with an explicit - # decorator is still possible, so events on *= can be had if they're - # desired. hard to imagine a use case for __imul__, though. - - l = locals().copy() - l.pop('_tidy') - return l - - -def _dict_decorators(): - """Tailored instrumentation wrappers for any dict-like mapping class.""" - - def _tidy(fn): - fn._sa_instrumented = True - fn.__doc__ = getattr(dict, fn.__name__).__doc__ - - Unspecified = util.symbol('Unspecified') - - def __setitem__(fn): - def __setitem__(self, key, value, _sa_initiator=None): - if key in self: - __del(self, self[key], _sa_initiator) - value = __set(self, value, _sa_initiator) - fn(self, key, value) - _tidy(__setitem__) - return __setitem__ - - def __delitem__(fn): - def __delitem__(self, key, _sa_initiator=None): - if key in self: - __del(self, self[key], _sa_initiator) - fn(self, key) - _tidy(__delitem__) - return __delitem__ - - def clear(fn): - def clear(self): - for key in self: - __del(self, self[key]) - fn(self) - _tidy(clear) - return clear - - def pop(fn): - def pop(self, key, default=Unspecified): - if key in self: - __del(self, self[key]) - if default is Unspecified: - return fn(self, key) - else: - return fn(self, key, default) - _tidy(pop) - return pop - - def popitem(fn): - def popitem(self): - __before_delete(self) - item = fn(self) - __del(self, item[1]) - return item - _tidy(popitem) - return popitem - - def setdefault(fn): - def setdefault(self, key, default=None): - if key not in self: - self.__setitem__(key, default) - return default - else: - return self.__getitem__(key) - _tidy(setdefault) - return setdefault - - def update(fn): - def update(self, __other=Unspecified, **kw): - if __other is not Unspecified: - if hasattr(__other, 'keys'): - for key in list(__other): - if (key not in self or - self[key] is not __other[key]): - self[key] = __other[key] - else: - for key, value in __other: - if key not in self or self[key] is not value: - self[key] = value - for key in kw: - if key not in self or self[key] is not kw[key]: - self[key] = kw[key] - _tidy(update) - return update - - l = locals().copy() - l.pop('_tidy') - l.pop('Unspecified') - return l - -_set_binop_bases = (set, frozenset) - - -def _set_binops_check_strict(self, obj): - """Allow only set, frozenset and self.__class__-derived - objects in binops.""" - return isinstance(obj, _set_binop_bases + (self.__class__,)) - - -def _set_binops_check_loose(self, obj): - """Allow anything set-like to participate in set binops.""" - return (isinstance(obj, _set_binop_bases + (self.__class__,)) or - util.duck_type_collection(obj) == set) - - -def _set_decorators(): - """Tailored instrumentation wrappers for any set-like class.""" - - def _tidy(fn): - fn._sa_instrumented = True - fn.__doc__ = getattr(set, fn.__name__).__doc__ - - Unspecified = util.symbol('Unspecified') - - def add(fn): - def add(self, value, _sa_initiator=None): - if value not in self: - value = __set(self, value, _sa_initiator) - # testlib.pragma exempt:__hash__ - fn(self, value) - _tidy(add) - return add - - def discard(fn): - def discard(self, value, _sa_initiator=None): - # testlib.pragma exempt:__hash__ - if value in self: - __del(self, value, _sa_initiator) - # testlib.pragma exempt:__hash__ - fn(self, value) - _tidy(discard) - return discard - - def remove(fn): - def remove(self, value, _sa_initiator=None): - # testlib.pragma exempt:__hash__ - if value in self: - __del(self, value, _sa_initiator) - # testlib.pragma exempt:__hash__ - fn(self, value) - _tidy(remove) - return remove - - def pop(fn): - def pop(self): - __before_delete(self) - item = fn(self) - __del(self, item) - return item - _tidy(pop) - return pop - - def clear(fn): - def clear(self): - for item in list(self): - self.remove(item) - _tidy(clear) - return clear - - def update(fn): - def update(self, value): - for item in value: - self.add(item) - _tidy(update) - return update - - def __ior__(fn): - def __ior__(self, value): - if not _set_binops_check_strict(self, value): - return NotImplemented - for item in value: - self.add(item) - return self - _tidy(__ior__) - return __ior__ - - def difference_update(fn): - def difference_update(self, value): - for item in value: - self.discard(item) - _tidy(difference_update) - return difference_update - - def __isub__(fn): - def __isub__(self, value): - if not _set_binops_check_strict(self, value): - return NotImplemented - for item in value: - self.discard(item) - return self - _tidy(__isub__) - return __isub__ - - def intersection_update(fn): - def intersection_update(self, other): - want, have = self.intersection(other), set(self) - remove, add = have - want, want - have - - for item in remove: - self.remove(item) - for item in add: - self.add(item) - _tidy(intersection_update) - return intersection_update - - def __iand__(fn): - def __iand__(self, other): - if not _set_binops_check_strict(self, other): - return NotImplemented - want, have = self.intersection(other), set(self) - remove, add = have - want, want - have - - for item in remove: - self.remove(item) - for item in add: - self.add(item) - return self - _tidy(__iand__) - return __iand__ - - def symmetric_difference_update(fn): - def symmetric_difference_update(self, other): - want, have = self.symmetric_difference(other), set(self) - remove, add = have - want, want - have - - for item in remove: - self.remove(item) - for item in add: - self.add(item) - _tidy(symmetric_difference_update) - return symmetric_difference_update - - def __ixor__(fn): - def __ixor__(self, other): - if not _set_binops_check_strict(self, other): - return NotImplemented - want, have = self.symmetric_difference(other), set(self) - remove, add = have - want, want - have - - for item in remove: - self.remove(item) - for item in add: - self.add(item) - return self - _tidy(__ixor__) - return __ixor__ - - l = locals().copy() - l.pop('_tidy') - l.pop('Unspecified') - return l - - -class InstrumentedList(list): - """An instrumented version of the built-in list.""" - - -class InstrumentedSet(set): - """An instrumented version of the built-in set.""" - - -class InstrumentedDict(dict): - """An instrumented version of the built-in dict.""" - - -__canned_instrumentation = { - list: InstrumentedList, - set: InstrumentedSet, - dict: InstrumentedDict, -} - -__interfaces = { - list: ( - {'appender': 'append', 'remover': 'remove', - 'iterator': '__iter__'}, _list_decorators() - ), - - set: ({'appender': 'add', - 'remover': 'remove', - 'iterator': '__iter__'}, _set_decorators() - ), - - # decorators are required for dicts and object collections. - dict: ({'iterator': 'values'}, _dict_decorators()) if util.py3k - else ({'iterator': 'itervalues'}, _dict_decorators()), -} - - -class MappedCollection(dict): - """A basic dictionary-based collection class. - - Extends dict with the minimal bag semantics that collection - classes require. ``set`` and ``remove`` are implemented in terms - of a keying function: any callable that takes an object and - returns an object for use as a dictionary key. - - """ - - def __init__(self, keyfunc): - """Create a new collection with keying provided by keyfunc. - - keyfunc may be any callable that takes an object and returns an object - for use as a dictionary key. - - The keyfunc will be called every time the ORM needs to add a member by - value-only (such as when loading instances from the database) or - remove a member. The usual cautions about dictionary keying apply- - ``keyfunc(object)`` should return the same output for the life of the - collection. Keying based on mutable properties can result in - unreachable instances "lost" in the collection. - - """ - self.keyfunc = keyfunc - - @collection.appender - @collection.internally_instrumented - def set(self, value, _sa_initiator=None): - """Add an item by value, consulting the keyfunc for the key.""" - - key = self.keyfunc(value) - self.__setitem__(key, value, _sa_initiator) - - @collection.remover - @collection.internally_instrumented - def remove(self, value, _sa_initiator=None): - """Remove an item by value, consulting the keyfunc for the key.""" - - key = self.keyfunc(value) - # Let self[key] raise if key is not in this collection - # testlib.pragma exempt:__ne__ - if self[key] != value: - raise sa_exc.InvalidRequestError( - "Can not remove '%s': collection holds '%s' for key '%s'. " - "Possible cause: is the MappedCollection key function " - "based on mutable properties or properties that only obtain " - "values after flush?" % - (value, self[key], key)) - self.__delitem__(key, _sa_initiator) - - @collection.converter - def _convert(self, dictlike): - """Validate and convert a dict-like object into values for set()ing. - - This is called behind the scenes when a MappedCollection is replaced - entirely by another collection, as in:: - - myobj.mappedcollection = {'a':obj1, 'b': obj2} # ... - - Raises a TypeError if the key in any (key, value) pair in the dictlike - object does not match the key that this collection's keyfunc would - have assigned for that value. - - """ - for incoming_key, value in util.dictlike_iteritems(dictlike): - new_key = self.keyfunc(value) - if incoming_key != new_key: - raise TypeError( - "Found incompatible key %r for value %r; this " - "collection's " - "keying function requires a key of %r for this value." % ( - incoming_key, value, new_key)) - yield value - -# ensure instrumentation is associated with -# these built-in classes; if a user-defined class -# subclasses these and uses @internally_instrumented, -# the superclass is otherwise not instrumented. -# see [ticket:2406]. -_instrument_class(MappedCollection) -_instrument_class(InstrumentedList) -_instrument_class(InstrumentedSet) diff --git a/python/sqlalchemy/orm/dependency.py b/python/sqlalchemy/orm/dependency.py deleted file mode 100644 index d8989939..00000000 --- a/python/sqlalchemy/orm/dependency.py +++ /dev/null @@ -1,1173 +0,0 @@ -# orm/dependency.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Relationship dependencies. - -""" - -from .. import sql, util, exc as sa_exc -from . import attributes, exc, sync, unitofwork, \ - util as mapperutil -from .interfaces import ONETOMANY, MANYTOONE, MANYTOMANY - - -class DependencyProcessor(object): - def __init__(self, prop): - self.prop = prop - self.cascade = prop.cascade - self.mapper = prop.mapper - self.parent = prop.parent - self.secondary = prop.secondary - self.direction = prop.direction - self.post_update = prop.post_update - self.passive_deletes = prop.passive_deletes - self.passive_updates = prop.passive_updates - self.enable_typechecks = prop.enable_typechecks - if self.passive_deletes: - self._passive_delete_flag = attributes.PASSIVE_NO_INITIALIZE - else: - self._passive_delete_flag = attributes.PASSIVE_OFF - if self.passive_updates: - self._passive_update_flag = attributes.PASSIVE_NO_INITIALIZE - else: - self._passive_update_flag = attributes.PASSIVE_OFF - - self.key = prop.key - if not self.prop.synchronize_pairs: - raise sa_exc.ArgumentError( - "Can't build a DependencyProcessor for relationship %s. " - "No target attributes to populate between parent and " - "child are present" % - self.prop) - - @classmethod - def from_relationship(cls, prop): - return _direction_to_processor[prop.direction](prop) - - def hasparent(self, state): - """return True if the given object instance has a parent, - according to the ``InstrumentedAttribute`` handled by this - ``DependencyProcessor``. - - """ - return self.parent.class_manager.get_impl(self.key).hasparent(state) - - def per_property_preprocessors(self, uow): - """establish actions and dependencies related to a flush. - - These actions will operate on all relevant states in - the aggregate. - - """ - uow.register_preprocessor(self, True) - - def per_property_flush_actions(self, uow): - after_save = unitofwork.ProcessAll(uow, self, False, True) - before_delete = unitofwork.ProcessAll(uow, self, True, True) - - parent_saves = unitofwork.SaveUpdateAll( - uow, - self.parent.primary_base_mapper - ) - child_saves = unitofwork.SaveUpdateAll( - uow, - self.mapper.primary_base_mapper - ) - - parent_deletes = unitofwork.DeleteAll( - uow, - self.parent.primary_base_mapper - ) - child_deletes = unitofwork.DeleteAll( - uow, - self.mapper.primary_base_mapper - ) - - self.per_property_dependencies(uow, - parent_saves, - child_saves, - parent_deletes, - child_deletes, - after_save, - before_delete - ) - - def per_state_flush_actions(self, uow, states, isdelete): - """establish actions and dependencies related to a flush. - - These actions will operate on all relevant states - individually. This occurs only if there are cycles - in the 'aggregated' version of events. - - """ - - parent_base_mapper = self.parent.primary_base_mapper - child_base_mapper = self.mapper.primary_base_mapper - child_saves = unitofwork.SaveUpdateAll(uow, child_base_mapper) - child_deletes = unitofwork.DeleteAll(uow, child_base_mapper) - - # locate and disable the aggregate processors - # for this dependency - - if isdelete: - before_delete = unitofwork.ProcessAll(uow, self, True, True) - before_delete.disabled = True - else: - after_save = unitofwork.ProcessAll(uow, self, False, True) - after_save.disabled = True - - # check if the "child" side is part of the cycle - - if child_saves not in uow.cycles: - # based on the current dependencies we use, the saves/ - # deletes should always be in the 'cycles' collection - # together. if this changes, we will have to break up - # this method a bit more. - assert child_deletes not in uow.cycles - - # child side is not part of the cycle, so we will link per-state - # actions to the aggregate "saves", "deletes" actions - child_actions = [ - (child_saves, False), (child_deletes, True) - ] - child_in_cycles = False - else: - child_in_cycles = True - - # check if the "parent" side is part of the cycle - if not isdelete: - parent_saves = unitofwork.SaveUpdateAll( - uow, - self.parent.base_mapper) - parent_deletes = before_delete = None - if parent_saves in uow.cycles: - parent_in_cycles = True - else: - parent_deletes = unitofwork.DeleteAll( - uow, - self.parent.base_mapper) - parent_saves = after_save = None - if parent_deletes in uow.cycles: - parent_in_cycles = True - - # now create actions /dependencies for each state. - - for state in states: - # detect if there's anything changed or loaded - # by a preprocessor on this state/attribute. In the - # case of deletes we may try to load missing items here as well. - sum_ = state.manager[self.key].impl.get_all_pending( - state, state.dict, - self._passive_delete_flag - if isdelete - else attributes.PASSIVE_NO_INITIALIZE) - - if not sum_: - continue - - if isdelete: - before_delete = unitofwork.ProcessState(uow, - self, True, state) - if parent_in_cycles: - parent_deletes = unitofwork.DeleteState( - uow, - state, - parent_base_mapper) - else: - after_save = unitofwork.ProcessState(uow, self, False, state) - if parent_in_cycles: - parent_saves = unitofwork.SaveUpdateState( - uow, - state, - parent_base_mapper) - - if child_in_cycles: - child_actions = [] - for child_state, child in sum_: - if child_state not in uow.states: - child_action = (None, None) - else: - (deleted, listonly) = uow.states[child_state] - if deleted: - child_action = ( - unitofwork.DeleteState( - uow, child_state, - child_base_mapper), - True) - else: - child_action = ( - unitofwork.SaveUpdateState( - uow, child_state, - child_base_mapper), - False) - child_actions.append(child_action) - - # establish dependencies between our possibly per-state - # parent action and our possibly per-state child action. - for child_action, childisdelete in child_actions: - self.per_state_dependencies(uow, parent_saves, - parent_deletes, - child_action, - after_save, before_delete, - isdelete, childisdelete) - - def presort_deletes(self, uowcommit, states): - return False - - def presort_saves(self, uowcommit, states): - return False - - def process_deletes(self, uowcommit, states): - pass - - def process_saves(self, uowcommit, states): - pass - - def prop_has_changes(self, uowcommit, states, isdelete): - if not isdelete or self.passive_deletes: - passive = attributes.PASSIVE_NO_INITIALIZE - elif self.direction is MANYTOONE: - passive = attributes.PASSIVE_NO_FETCH_RELATED - else: - passive = attributes.PASSIVE_OFF - - for s in states: - # TODO: add a high speed method - # to InstanceState which returns: attribute - # has a non-None value, or had one - history = uowcommit.get_attribute_history( - s, - self.key, - passive) - if history and not history.empty(): - return True - else: - return states and \ - not self.prop._is_self_referential and \ - self.mapper in uowcommit.mappers - - def _verify_canload(self, state): - if self.prop.uselist and state is None: - raise exc.FlushError( - "Can't flush None value found in " - "collection %s" % (self.prop, )) - elif state is not None and \ - not self.mapper._canload( - state, allow_subtypes=not self.enable_typechecks): - if self.mapper._canload(state, allow_subtypes=True): - raise exc.FlushError('Attempting to flush an item of type ' - '%(x)s as a member of collection ' - '"%(y)s". Expected an object of type ' - '%(z)s or a polymorphic subclass of ' - 'this type. If %(x)s is a subclass of ' - '%(z)s, configure mapper "%(zm)s" to ' - 'load this subtype polymorphically, or ' - 'set enable_typechecks=False to allow ' - 'any subtype to be accepted for flush. ' - % { - 'x': state.class_, - 'y': self.prop, - 'z': self.mapper.class_, - 'zm': self.mapper, - }) - else: - raise exc.FlushError( - 'Attempting to flush an item of type ' - '%(x)s as a member of collection ' - '"%(y)s". Expected an object of type ' - '%(z)s or a polymorphic subclass of ' - 'this type.' % { - 'x': state.class_, - 'y': self.prop, - 'z': self.mapper.class_, - }) - - def _synchronize(self, state, child, associationrow, - clearkeys, uowcommit): - raise NotImplementedError() - - def _get_reversed_processed_set(self, uow): - if not self.prop._reverse_property: - return None - - process_key = tuple(sorted( - [self.key] + - [p.key for p in self.prop._reverse_property] - )) - return uow.memo( - ('reverse_key', process_key), - set - ) - - def _post_update(self, state, uowcommit, related): - for x in related: - if x is not None: - uowcommit.issue_post_update( - state, - [r for l, r in self.prop.synchronize_pairs] - ) - break - - def _pks_changed(self, uowcommit, state): - raise NotImplementedError() - - def __repr__(self): - return "%s(%s)" % (self.__class__.__name__, self.prop) - - -class OneToManyDP(DependencyProcessor): - - def per_property_dependencies(self, uow, parent_saves, - child_saves, - parent_deletes, - child_deletes, - after_save, - before_delete, - ): - if self.post_update: - child_post_updates = unitofwork.IssuePostUpdate( - uow, - self.mapper.primary_base_mapper, - False) - child_pre_updates = unitofwork.IssuePostUpdate( - uow, - self.mapper.primary_base_mapper, - True) - - uow.dependencies.update([ - (child_saves, after_save), - (parent_saves, after_save), - (after_save, child_post_updates), - - (before_delete, child_pre_updates), - (child_pre_updates, parent_deletes), - (child_pre_updates, child_deletes), - - ]) - else: - uow.dependencies.update([ - (parent_saves, after_save), - (after_save, child_saves), - (after_save, child_deletes), - - (child_saves, parent_deletes), - (child_deletes, parent_deletes), - - (before_delete, child_saves), - (before_delete, child_deletes), - ]) - - def per_state_dependencies(self, uow, - save_parent, - delete_parent, - child_action, - after_save, before_delete, - isdelete, childisdelete): - - if self.post_update: - - child_post_updates = unitofwork.IssuePostUpdate( - uow, - self.mapper.primary_base_mapper, - False) - child_pre_updates = unitofwork.IssuePostUpdate( - uow, - self.mapper.primary_base_mapper, - True) - - # TODO: this whole block is not covered - # by any tests - if not isdelete: - if childisdelete: - uow.dependencies.update([ - (child_action, after_save), - (after_save, child_post_updates), - ]) - else: - uow.dependencies.update([ - (save_parent, after_save), - (child_action, after_save), - (after_save, child_post_updates), - ]) - else: - if childisdelete: - uow.dependencies.update([ - (before_delete, child_pre_updates), - (child_pre_updates, delete_parent), - ]) - else: - uow.dependencies.update([ - (before_delete, child_pre_updates), - (child_pre_updates, delete_parent), - ]) - elif not isdelete: - uow.dependencies.update([ - (save_parent, after_save), - (after_save, child_action), - (save_parent, child_action) - ]) - else: - uow.dependencies.update([ - (before_delete, child_action), - (child_action, delete_parent) - ]) - - def presort_deletes(self, uowcommit, states): - # head object is being deleted, and we manage its list of - # child objects the child objects have to have their - # foreign key to the parent set to NULL - should_null_fks = not self.cascade.delete and \ - not self.passive_deletes == 'all' - - for state in states: - history = uowcommit.get_attribute_history( - state, - self.key, - self._passive_delete_flag) - if history: - for child in history.deleted: - if child is not None and self.hasparent(child) is False: - if self.cascade.delete_orphan: - uowcommit.register_object(child, isdelete=True) - else: - uowcommit.register_object(child) - - if should_null_fks: - for child in history.unchanged: - if child is not None: - uowcommit.register_object( - child, operation="delete", prop=self.prop) - - def presort_saves(self, uowcommit, states): - children_added = uowcommit.memo(('children_added', self), set) - - for state in states: - pks_changed = self._pks_changed(uowcommit, state) - - if not pks_changed or self.passive_updates: - passive = attributes.PASSIVE_NO_INITIALIZE - else: - passive = attributes.PASSIVE_OFF - - history = uowcommit.get_attribute_history( - state, - self.key, - passive) - if history: - for child in history.added: - if child is not None: - uowcommit.register_object(child, cancel_delete=True, - operation="add", - prop=self.prop) - - children_added.update(history.added) - - for child in history.deleted: - if not self.cascade.delete_orphan: - uowcommit.register_object(child, isdelete=False, - operation='delete', - prop=self.prop) - elif self.hasparent(child) is False: - uowcommit.register_object( - child, isdelete=True, - operation="delete", prop=self.prop) - for c, m, st_, dct_ in self.mapper.cascade_iterator( - 'delete', child): - uowcommit.register_object( - st_, - isdelete=True) - - if pks_changed: - if history: - for child in history.unchanged: - if child is not None: - uowcommit.register_object( - child, - False, - self.passive_updates, - operation="pk change", - prop=self.prop) - - def process_deletes(self, uowcommit, states): - # head object is being deleted, and we manage its list of - # child objects the child objects have to have their foreign - # key to the parent set to NULL this phase can be called - # safely for any cascade but is unnecessary if delete cascade - # is on. - - if self.post_update or not self.passive_deletes == 'all': - children_added = uowcommit.memo(('children_added', self), set) - - for state in states: - history = uowcommit.get_attribute_history( - state, - self.key, - self._passive_delete_flag) - if history: - for child in history.deleted: - if child is not None and \ - self.hasparent(child) is False: - self._synchronize( - state, - child, - None, True, - uowcommit, False) - if self.post_update and child: - self._post_update(child, uowcommit, [state]) - - if self.post_update or not self.cascade.delete: - for child in set(history.unchanged).\ - difference(children_added): - if child is not None: - self._synchronize( - state, - child, - None, True, - uowcommit, False) - if self.post_update and child: - self._post_update(child, - uowcommit, - [state]) - - # technically, we can even remove each child from the - # collection here too. but this would be a somewhat - # inconsistent behavior since it wouldn't happen - # if the old parent wasn't deleted but child was moved. - - def process_saves(self, uowcommit, states): - for state in states: - history = uowcommit.get_attribute_history( - state, - self.key, - attributes.PASSIVE_NO_INITIALIZE) - if history: - for child in history.added: - self._synchronize(state, child, None, - False, uowcommit, False) - if child is not None and self.post_update: - self._post_update(child, uowcommit, [state]) - - for child in history.deleted: - if not self.cascade.delete_orphan and \ - not self.hasparent(child): - self._synchronize(state, child, None, True, - uowcommit, False) - - if self._pks_changed(uowcommit, state): - for child in history.unchanged: - self._synchronize(state, child, None, - False, uowcommit, True) - - def _synchronize(self, state, child, - associationrow, clearkeys, uowcommit, - pks_changed): - source = state - dest = child - self._verify_canload(child) - if dest is None or \ - (not self.post_update and uowcommit.is_deleted(dest)): - return - if clearkeys: - sync.clear(dest, self.mapper, self.prop.synchronize_pairs) - else: - sync.populate(source, self.parent, dest, self.mapper, - self.prop.synchronize_pairs, uowcommit, - self.passive_updates and pks_changed) - - def _pks_changed(self, uowcommit, state): - return sync.source_modified( - uowcommit, - state, - self.parent, - self.prop.synchronize_pairs) - - -class ManyToOneDP(DependencyProcessor): - def __init__(self, prop): - DependencyProcessor.__init__(self, prop) - self.mapper._dependency_processors.append(DetectKeySwitch(prop)) - - def per_property_dependencies(self, uow, - parent_saves, - child_saves, - parent_deletes, - child_deletes, - after_save, - before_delete): - - if self.post_update: - parent_post_updates = unitofwork.IssuePostUpdate( - uow, - self.parent.primary_base_mapper, - False) - parent_pre_updates = unitofwork.IssuePostUpdate( - uow, - self.parent.primary_base_mapper, - True) - - uow.dependencies.update([ - (child_saves, after_save), - (parent_saves, after_save), - (after_save, parent_post_updates), - - (after_save, parent_pre_updates), - (before_delete, parent_pre_updates), - - (parent_pre_updates, child_deletes), - ]) - else: - uow.dependencies.update([ - (child_saves, after_save), - (after_save, parent_saves), - (parent_saves, child_deletes), - (parent_deletes, child_deletes) - ]) - - def per_state_dependencies(self, uow, - save_parent, - delete_parent, - child_action, - after_save, before_delete, - isdelete, childisdelete): - - if self.post_update: - - if not isdelete: - parent_post_updates = unitofwork.IssuePostUpdate( - uow, - self.parent.primary_base_mapper, - False) - if childisdelete: - uow.dependencies.update([ - (after_save, parent_post_updates), - (parent_post_updates, child_action) - ]) - else: - uow.dependencies.update([ - (save_parent, after_save), - (child_action, after_save), - - (after_save, parent_post_updates) - ]) - else: - parent_pre_updates = unitofwork.IssuePostUpdate( - uow, - self.parent.primary_base_mapper, - True) - - uow.dependencies.update([ - (before_delete, parent_pre_updates), - (parent_pre_updates, delete_parent), - (parent_pre_updates, child_action) - ]) - - elif not isdelete: - if not childisdelete: - uow.dependencies.update([ - (child_action, after_save), - (after_save, save_parent), - ]) - else: - uow.dependencies.update([ - (after_save, save_parent), - ]) - - else: - if childisdelete: - uow.dependencies.update([ - (delete_parent, child_action) - ]) - - def presort_deletes(self, uowcommit, states): - if self.cascade.delete or self.cascade.delete_orphan: - for state in states: - history = uowcommit.get_attribute_history( - state, - self.key, - self._passive_delete_flag) - if history: - if self.cascade.delete_orphan: - todelete = history.sum() - else: - todelete = history.non_deleted() - for child in todelete: - if child is None: - continue - uowcommit.register_object( - child, isdelete=True, - operation="delete", prop=self.prop) - t = self.mapper.cascade_iterator('delete', child) - for c, m, st_, dct_ in t: - uowcommit.register_object( - st_, isdelete=True) - - def presort_saves(self, uowcommit, states): - for state in states: - uowcommit.register_object(state, operation="add", prop=self.prop) - if self.cascade.delete_orphan: - history = uowcommit.get_attribute_history( - state, - self.key, - self._passive_delete_flag) - if history: - for child in history.deleted: - if self.hasparent(child) is False: - uowcommit.register_object( - child, isdelete=True, - operation="delete", prop=self.prop) - - t = self.mapper.cascade_iterator('delete', child) - for c, m, st_, dct_ in t: - uowcommit.register_object(st_, isdelete=True) - - def process_deletes(self, uowcommit, states): - if self.post_update and \ - not self.cascade.delete_orphan and \ - not self.passive_deletes == 'all': - - # post_update means we have to update our - # row to not reference the child object - # before we can DELETE the row - for state in states: - self._synchronize(state, None, None, True, uowcommit) - if state and self.post_update: - history = uowcommit.get_attribute_history( - state, - self.key, - self._passive_delete_flag) - if history: - self._post_update(state, uowcommit, history.sum()) - - def process_saves(self, uowcommit, states): - for state in states: - history = uowcommit.get_attribute_history( - state, - self.key, - attributes.PASSIVE_NO_INITIALIZE) - if history: - if history.added: - for child in history.added: - self._synchronize(state, child, None, False, - uowcommit, "add") - if self.post_update: - self._post_update(state, uowcommit, history.sum()) - - def _synchronize(self, state, child, associationrow, - clearkeys, uowcommit, operation=None): - if state is None or \ - (not self.post_update and uowcommit.is_deleted(state)): - return - - if operation is not None and \ - child is not None and \ - not uowcommit.session._contains_state(child): - util.warn( - "Object of type %s not in session, %s " - "operation along '%s' won't proceed" % - (mapperutil.state_class_str(child), operation, self.prop)) - return - - if clearkeys or child is None: - sync.clear(state, self.parent, self.prop.synchronize_pairs) - else: - self._verify_canload(child) - sync.populate(child, self.mapper, state, - self.parent, - self.prop.synchronize_pairs, - uowcommit, - False) - - -class DetectKeySwitch(DependencyProcessor): - """For many-to-one relationships with no one-to-many backref, - searches for parents through the unit of work when a primary - key has changed and updates them. - - Theoretically, this approach could be expanded to support transparent - deletion of objects referenced via many-to-one as well, although - the current attribute system doesn't do enough bookkeeping for this - to be efficient. - - """ - - def per_property_preprocessors(self, uow): - if self.prop._reverse_property: - if self.passive_updates: - return - else: - if False in (prop.passive_updates for - prop in self.prop._reverse_property): - return - - uow.register_preprocessor(self, False) - - def per_property_flush_actions(self, uow): - parent_saves = unitofwork.SaveUpdateAll( - uow, - self.parent.base_mapper) - after_save = unitofwork.ProcessAll(uow, self, False, False) - uow.dependencies.update([ - (parent_saves, after_save) - ]) - - def per_state_flush_actions(self, uow, states, isdelete): - pass - - def presort_deletes(self, uowcommit, states): - pass - - def presort_saves(self, uow, states): - if not self.passive_updates: - # for non-passive updates, register in the preprocess stage - # so that mapper save_obj() gets a hold of changes - self._process_key_switches(states, uow) - - def prop_has_changes(self, uow, states, isdelete): - if not isdelete and self.passive_updates: - d = self._key_switchers(uow, states) - return bool(d) - - return False - - def process_deletes(self, uowcommit, states): - assert False - - def process_saves(self, uowcommit, states): - # for passive updates, register objects in the process stage - # so that we avoid ManyToOneDP's registering the object without - # the listonly flag in its own preprocess stage (results in UPDATE) - # statements being emitted - assert self.passive_updates - self._process_key_switches(states, uowcommit) - - def _key_switchers(self, uow, states): - switched, notswitched = uow.memo( - ('pk_switchers', self), - lambda: (set(), set()) - ) - - allstates = switched.union(notswitched) - for s in states: - if s not in allstates: - if self._pks_changed(uow, s): - switched.add(s) - else: - notswitched.add(s) - return switched - - def _process_key_switches(self, deplist, uowcommit): - switchers = self._key_switchers(uowcommit, deplist) - if switchers: - # if primary key values have actually changed somewhere, perform - # a linear search through the UOW in search of a parent. - for state in uowcommit.session.identity_map.all_states(): - if not issubclass(state.class_, self.parent.class_): - continue - dict_ = state.dict - related = state.get_impl(self.key).get( - state, dict_, passive=self._passive_update_flag) - if related is not attributes.PASSIVE_NO_RESULT and \ - related is not None: - related_state = attributes.instance_state(dict_[self.key]) - if related_state in switchers: - uowcommit.register_object(state, - False, - self.passive_updates) - sync.populate( - related_state, - self.mapper, state, - self.parent, self.prop.synchronize_pairs, - uowcommit, self.passive_updates) - - def _pks_changed(self, uowcommit, state): - return bool(state.key) and sync.source_modified( - uowcommit, state, self.mapper, self.prop.synchronize_pairs) - - -class ManyToManyDP(DependencyProcessor): - - def per_property_dependencies(self, uow, parent_saves, - child_saves, - parent_deletes, - child_deletes, - after_save, - before_delete - ): - - uow.dependencies.update([ - (parent_saves, after_save), - (child_saves, after_save), - (after_save, child_deletes), - - # a rowswitch on the parent from deleted to saved - # can make this one occur, as the "save" may remove - # an element from the - # "deleted" list before we have a chance to - # process its child rows - (before_delete, parent_saves), - - (before_delete, parent_deletes), - (before_delete, child_deletes), - (before_delete, child_saves), - ]) - - def per_state_dependencies(self, uow, - save_parent, - delete_parent, - child_action, - after_save, before_delete, - isdelete, childisdelete): - if not isdelete: - if childisdelete: - uow.dependencies.update([ - (save_parent, after_save), - (after_save, child_action), - ]) - else: - uow.dependencies.update([ - (save_parent, after_save), - (child_action, after_save), - ]) - else: - uow.dependencies.update([ - (before_delete, child_action), - (before_delete, delete_parent) - ]) - - def presort_deletes(self, uowcommit, states): - # TODO: no tests fail if this whole - # thing is removed !!!! - if not self.passive_deletes: - # if no passive deletes, load history on - # the collection, so that prop_has_changes() - # returns True - for state in states: - uowcommit.get_attribute_history( - state, - self.key, - self._passive_delete_flag) - - def presort_saves(self, uowcommit, states): - if not self.passive_updates: - # if no passive updates, load history on - # each collection where parent has changed PK, - # so that prop_has_changes() returns True - for state in states: - if self._pks_changed(uowcommit, state): - history = uowcommit.get_attribute_history( - state, - self.key, - attributes.PASSIVE_OFF) - - if not self.cascade.delete_orphan: - return - - # check for child items removed from the collection - # if delete_orphan check is turned on. - for state in states: - history = uowcommit.get_attribute_history( - state, - self.key, - attributes.PASSIVE_NO_INITIALIZE) - if history: - for child in history.deleted: - if self.hasparent(child) is False: - uowcommit.register_object( - child, isdelete=True, - operation="delete", prop=self.prop) - for c, m, st_, dct_ in self.mapper.cascade_iterator( - 'delete', - child): - uowcommit.register_object( - st_, isdelete=True) - - def process_deletes(self, uowcommit, states): - secondary_delete = [] - secondary_insert = [] - secondary_update = [] - - processed = self._get_reversed_processed_set(uowcommit) - tmp = set() - for state in states: - # this history should be cached already, as - # we loaded it in preprocess_deletes - history = uowcommit.get_attribute_history( - state, - self.key, - self._passive_delete_flag) - if history: - for child in history.non_added(): - if child is None or \ - (processed is not None and - (state, child) in processed): - continue - associationrow = {} - if not self._synchronize( - state, - child, - associationrow, - False, uowcommit, "delete"): - continue - secondary_delete.append(associationrow) - - tmp.update((c, state) for c in history.non_added()) - - if processed is not None: - processed.update(tmp) - - self._run_crud(uowcommit, secondary_insert, - secondary_update, secondary_delete) - - def process_saves(self, uowcommit, states): - secondary_delete = [] - secondary_insert = [] - secondary_update = [] - - processed = self._get_reversed_processed_set(uowcommit) - tmp = set() - - for state in states: - need_cascade_pks = not self.passive_updates and \ - self._pks_changed(uowcommit, state) - if need_cascade_pks: - passive = attributes.PASSIVE_OFF - else: - passive = attributes.PASSIVE_NO_INITIALIZE - history = uowcommit.get_attribute_history(state, self.key, - passive) - if history: - for child in history.added: - if (processed is not None and - (state, child) in processed): - continue - associationrow = {} - if not self._synchronize(state, - child, - associationrow, - False, uowcommit, "add"): - continue - secondary_insert.append(associationrow) - for child in history.deleted: - if (processed is not None and - (state, child) in processed): - continue - associationrow = {} - if not self._synchronize(state, - child, - associationrow, - False, uowcommit, "delete"): - continue - secondary_delete.append(associationrow) - - tmp.update((c, state) - for c in history.added + history.deleted) - - if need_cascade_pks: - - for child in history.unchanged: - associationrow = {} - sync.update(state, - self.parent, - associationrow, - "old_", - self.prop.synchronize_pairs) - sync.update(child, - self.mapper, - associationrow, - "old_", - self.prop.secondary_synchronize_pairs) - - secondary_update.append(associationrow) - - if processed is not None: - processed.update(tmp) - - self._run_crud(uowcommit, secondary_insert, - secondary_update, secondary_delete) - - def _run_crud(self, uowcommit, secondary_insert, - secondary_update, secondary_delete): - connection = uowcommit.transaction.connection(self.mapper) - - if secondary_delete: - associationrow = secondary_delete[0] - statement = self.secondary.delete(sql.and_(*[ - c == sql.bindparam(c.key, type_=c.type) - for c in self.secondary.c - if c.key in associationrow - ])) - result = connection.execute(statement, secondary_delete) - - if result.supports_sane_multi_rowcount() and \ - result.rowcount != len(secondary_delete): - raise exc.StaleDataError( - "DELETE statement on table '%s' expected to delete " - "%d row(s); Only %d were matched." % - (self.secondary.description, len(secondary_delete), - result.rowcount) - ) - - if secondary_update: - associationrow = secondary_update[0] - statement = self.secondary.update(sql.and_(*[ - c == sql.bindparam("old_" + c.key, type_=c.type) - for c in self.secondary.c - if c.key in associationrow - ])) - result = connection.execute(statement, secondary_update) - - if result.supports_sane_multi_rowcount() and \ - result.rowcount != len(secondary_update): - raise exc.StaleDataError( - "UPDATE statement on table '%s' expected to update " - "%d row(s); Only %d were matched." % - (self.secondary.description, len(secondary_update), - result.rowcount) - ) - - if secondary_insert: - statement = self.secondary.insert() - connection.execute(statement, secondary_insert) - - def _synchronize(self, state, child, associationrow, - clearkeys, uowcommit, operation): - - # this checks for None if uselist=True - self._verify_canload(child) - - # but if uselist=False we get here. If child is None, - # no association row can be generated, so return. - if child is None: - return False - - if child is not None and not uowcommit.session._contains_state(child): - if not child.deleted: - util.warn( - "Object of type %s not in session, %s " - "operation along '%s' won't proceed" % - (mapperutil.state_class_str(child), operation, self.prop)) - return False - - sync.populate_dict(state, self.parent, associationrow, - self.prop.synchronize_pairs) - sync.populate_dict(child, self.mapper, associationrow, - self.prop.secondary_synchronize_pairs) - - return True - - def _pks_changed(self, uowcommit, state): - return sync.source_modified( - uowcommit, - state, - self.parent, - self.prop.synchronize_pairs) - -_direction_to_processor = { - ONETOMANY: OneToManyDP, - MANYTOONE: ManyToOneDP, - MANYTOMANY: ManyToManyDP, -} diff --git a/python/sqlalchemy/orm/deprecated_interfaces.py b/python/sqlalchemy/orm/deprecated_interfaces.py deleted file mode 100644 index bb6d185d..00000000 --- a/python/sqlalchemy/orm/deprecated_interfaces.py +++ /dev/null @@ -1,487 +0,0 @@ -# orm/deprecated_interfaces.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -from .. import event, util -from .interfaces import EXT_CONTINUE - - -@util.langhelpers.dependency_for("sqlalchemy.orm.interfaces") -class MapperExtension(object): - """Base implementation for :class:`.Mapper` event hooks. - - .. note:: - - :class:`.MapperExtension` is deprecated. Please - refer to :func:`.event.listen` as well as - :class:`.MapperEvents`. - - New extension classes subclass :class:`.MapperExtension` and are specified - using the ``extension`` mapper() argument, which is a single - :class:`.MapperExtension` or a list of such:: - - from sqlalchemy.orm.interfaces import MapperExtension - - class MyExtension(MapperExtension): - def before_insert(self, mapper, connection, instance): - print "instance %s before insert !" % instance - - m = mapper(User, users_table, extension=MyExtension()) - - A single mapper can maintain a chain of ``MapperExtension`` - objects. When a particular mapping event occurs, the - corresponding method on each ``MapperExtension`` is invoked - serially, and each method has the ability to halt the chain - from proceeding further:: - - m = mapper(User, users_table, extension=[ext1, ext2, ext3]) - - Each ``MapperExtension`` method returns the symbol - EXT_CONTINUE by default. This symbol generally means "move - to the next ``MapperExtension`` for processing". For methods - that return objects like translated rows or new object - instances, EXT_CONTINUE means the result of the method - should be ignored. In some cases it's required for a - default mapper activity to be performed, such as adding a - new instance to a result list. - - The symbol EXT_STOP has significance within a chain - of ``MapperExtension`` objects that the chain will be stopped - when this symbol is returned. Like EXT_CONTINUE, it also - has additional significance in some cases that a default - mapper activity will not be performed. - - """ - - @classmethod - def _adapt_instrument_class(cls, self, listener): - cls._adapt_listener_methods(self, listener, ('instrument_class',)) - - @classmethod - def _adapt_listener(cls, self, listener): - cls._adapt_listener_methods( - self, listener, - ( - 'init_instance', - 'init_failed', - 'reconstruct_instance', - 'before_insert', - 'after_insert', - 'before_update', - 'after_update', - 'before_delete', - 'after_delete' - )) - - @classmethod - def _adapt_listener_methods(cls, self, listener, methods): - - for meth in methods: - me_meth = getattr(MapperExtension, meth) - ls_meth = getattr(listener, meth) - - if not util.methods_equivalent(me_meth, ls_meth): - if meth == 'reconstruct_instance': - def go(ls_meth): - def reconstruct(instance, ctx): - ls_meth(self, instance) - return reconstruct - event.listen(self.class_manager, 'load', - go(ls_meth), raw=False, propagate=True) - elif meth == 'init_instance': - def go(ls_meth): - def init_instance(instance, args, kwargs): - ls_meth(self, self.class_, - self.class_manager.original_init, - instance, args, kwargs) - return init_instance - event.listen(self.class_manager, 'init', - go(ls_meth), raw=False, propagate=True) - elif meth == 'init_failed': - def go(ls_meth): - def init_failed(instance, args, kwargs): - util.warn_exception( - ls_meth, self, self.class_, - self.class_manager.original_init, - instance, args, kwargs) - - return init_failed - event.listen(self.class_manager, 'init_failure', - go(ls_meth), raw=False, propagate=True) - else: - event.listen(self, "%s" % meth, ls_meth, - raw=False, retval=True, propagate=True) - - def instrument_class(self, mapper, class_): - """Receive a class when the mapper is first constructed, and has - applied instrumentation to the mapped class. - - The return value is only significant within the ``MapperExtension`` - chain; the parent mapper's behavior isn't modified by this method. - - """ - return EXT_CONTINUE - - def init_instance(self, mapper, class_, oldinit, instance, args, kwargs): - """Receive an instance when its constructor is called. - - This method is only called during a userland construction of - an object. It is not called when an object is loaded from the - database. - - The return value is only significant within the ``MapperExtension`` - chain; the parent mapper's behavior isn't modified by this method. - - """ - return EXT_CONTINUE - - def init_failed(self, mapper, class_, oldinit, instance, args, kwargs): - """Receive an instance when its constructor has been called, - and raised an exception. - - This method is only called during a userland construction of - an object. It is not called when an object is loaded from the - database. - - The return value is only significant within the ``MapperExtension`` - chain; the parent mapper's behavior isn't modified by this method. - - """ - return EXT_CONTINUE - - def reconstruct_instance(self, mapper, instance): - """Receive an object instance after it has been created via - ``__new__``, and after initial attribute population has - occurred. - - This typically occurs when the instance is created based on - incoming result rows, and is only called once for that - instance's lifetime. - - Note that during a result-row load, this method is called upon - the first row received for this instance. Note that some - attributes and collections may or may not be loaded or even - initialized, depending on what's present in the result rows. - - The return value is only significant within the ``MapperExtension`` - chain; the parent mapper's behavior isn't modified by this method. - - """ - return EXT_CONTINUE - - def before_insert(self, mapper, connection, instance): - """Receive an object instance before that instance is inserted - into its table. - - This is a good place to set up primary key values and such - that aren't handled otherwise. - - Column-based attributes can be modified within this method - which will result in the new value being inserted. However - *no* changes to the overall flush plan can be made, and - manipulation of the ``Session`` will not have the desired effect. - To manipulate the ``Session`` within an extension, use - ``SessionExtension``. - - The return value is only significant within the ``MapperExtension`` - chain; the parent mapper's behavior isn't modified by this method. - - """ - - return EXT_CONTINUE - - def after_insert(self, mapper, connection, instance): - """Receive an object instance after that instance is inserted. - - The return value is only significant within the ``MapperExtension`` - chain; the parent mapper's behavior isn't modified by this method. - - """ - - return EXT_CONTINUE - - def before_update(self, mapper, connection, instance): - """Receive an object instance before that instance is updated. - - Note that this method is called for all instances that are marked as - "dirty", even those which have no net changes to their column-based - attributes. An object is marked as dirty when any of its column-based - attributes have a "set attribute" operation called or when any of its - collections are modified. If, at update time, no column-based - attributes have any net changes, no UPDATE statement will be issued. - This means that an instance being sent to before_update is *not* a - guarantee that an UPDATE statement will be issued (although you can - affect the outcome here). - - To detect if the column-based attributes on the object have net - changes, and will therefore generate an UPDATE statement, use - ``object_session(instance).is_modified(instance, - include_collections=False)``. - - Column-based attributes can be modified within this method - which will result in the new value being updated. However - *no* changes to the overall flush plan can be made, and - manipulation of the ``Session`` will not have the desired effect. - To manipulate the ``Session`` within an extension, use - ``SessionExtension``. - - The return value is only significant within the ``MapperExtension`` - chain; the parent mapper's behavior isn't modified by this method. - - """ - - return EXT_CONTINUE - - def after_update(self, mapper, connection, instance): - """Receive an object instance after that instance is updated. - - The return value is only significant within the ``MapperExtension`` - chain; the parent mapper's behavior isn't modified by this method. - - """ - - return EXT_CONTINUE - - def before_delete(self, mapper, connection, instance): - """Receive an object instance before that instance is deleted. - - Note that *no* changes to the overall flush plan can be made - here; and manipulation of the ``Session`` will not have the - desired effect. To manipulate the ``Session`` within an - extension, use ``SessionExtension``. - - The return value is only significant within the ``MapperExtension`` - chain; the parent mapper's behavior isn't modified by this method. - - """ - - return EXT_CONTINUE - - def after_delete(self, mapper, connection, instance): - """Receive an object instance after that instance is deleted. - - The return value is only significant within the ``MapperExtension`` - chain; the parent mapper's behavior isn't modified by this method. - - """ - - return EXT_CONTINUE - - -@util.langhelpers.dependency_for("sqlalchemy.orm.interfaces") -class SessionExtension(object): - - """Base implementation for :class:`.Session` event hooks. - - .. note:: - - :class:`.SessionExtension` is deprecated. Please - refer to :func:`.event.listen` as well as - :class:`.SessionEvents`. - - Subclasses may be installed into a :class:`.Session` (or - :class:`.sessionmaker`) using the ``extension`` keyword - argument:: - - from sqlalchemy.orm.interfaces import SessionExtension - - class MySessionExtension(SessionExtension): - def before_commit(self, session): - print "before commit!" - - Session = sessionmaker(extension=MySessionExtension()) - - The same :class:`.SessionExtension` instance can be used - with any number of sessions. - - """ - - @classmethod - def _adapt_listener(cls, self, listener): - for meth in [ - 'before_commit', - 'after_commit', - 'after_rollback', - 'before_flush', - 'after_flush', - 'after_flush_postexec', - 'after_begin', - 'after_attach', - 'after_bulk_update', - 'after_bulk_delete', - ]: - me_meth = getattr(SessionExtension, meth) - ls_meth = getattr(listener, meth) - - if not util.methods_equivalent(me_meth, ls_meth): - event.listen(self, meth, getattr(listener, meth)) - - def before_commit(self, session): - """Execute right before commit is called. - - Note that this may not be per-flush if a longer running - transaction is ongoing.""" - - def after_commit(self, session): - """Execute after a commit has occurred. - - Note that this may not be per-flush if a longer running - transaction is ongoing.""" - - def after_rollback(self, session): - """Execute after a rollback has occurred. - - Note that this may not be per-flush if a longer running - transaction is ongoing.""" - - def before_flush(self, session, flush_context, instances): - """Execute before flush process has started. - - `instances` is an optional list of objects which were passed to - the ``flush()`` method. """ - - def after_flush(self, session, flush_context): - """Execute after flush has completed, but before commit has been - called. - - Note that the session's state is still in pre-flush, i.e. 'new', - 'dirty', and 'deleted' lists still show pre-flush state as well - as the history settings on instance attributes.""" - - def after_flush_postexec(self, session, flush_context): - """Execute after flush has completed, and after the post-exec - state occurs. - - This will be when the 'new', 'dirty', and 'deleted' lists are in - their final state. An actual commit() may or may not have - occurred, depending on whether or not the flush started its own - transaction or participated in a larger transaction. """ - - def after_begin(self, session, transaction, connection): - """Execute after a transaction is begun on a connection - - `transaction` is the SessionTransaction. This method is called - after an engine level transaction is begun on a connection. """ - - def after_attach(self, session, instance): - """Execute after an instance is attached to a session. - - This is called after an add, delete or merge. """ - - def after_bulk_update(self, session, query, query_context, result): - """Execute after a bulk update operation to the session. - - This is called after a session.query(...).update() - - `query` is the query object that this update operation was - called on. `query_context` was the query context object. - `result` is the result object returned from the bulk operation. - """ - - def after_bulk_delete(self, session, query, query_context, result): - """Execute after a bulk delete operation to the session. - - This is called after a session.query(...).delete() - - `query` is the query object that this delete operation was - called on. `query_context` was the query context object. - `result` is the result object returned from the bulk operation. - """ - - -@util.langhelpers.dependency_for("sqlalchemy.orm.interfaces") -class AttributeExtension(object): - """Base implementation for :class:`.AttributeImpl` event hooks, events - that fire upon attribute mutations in user code. - - .. note:: - - :class:`.AttributeExtension` is deprecated. Please - refer to :func:`.event.listen` as well as - :class:`.AttributeEvents`. - - :class:`.AttributeExtension` is used to listen for set, - remove, and append events on individual mapped attributes. - It is established on an individual mapped attribute using - the `extension` argument, available on - :func:`.column_property`, :func:`.relationship`, and - others:: - - from sqlalchemy.orm.interfaces import AttributeExtension - from sqlalchemy.orm import mapper, relationship, column_property - - class MyAttrExt(AttributeExtension): - def append(self, state, value, initiator): - print "append event !" - return value - - def set(self, state, value, oldvalue, initiator): - print "set event !" - return value - - mapper(SomeClass, sometable, properties={ - 'foo':column_property(sometable.c.foo, extension=MyAttrExt()), - 'bar':relationship(Bar, extension=MyAttrExt()) - }) - - Note that the :class:`.AttributeExtension` methods - :meth:`~.AttributeExtension.append` and - :meth:`~.AttributeExtension.set` need to return the - ``value`` parameter. The returned value is used as the - effective value, and allows the extension to change what is - ultimately persisted. - - AttributeExtension is assembled within the descriptors associated - with a mapped class. - - """ - - active_history = True - """indicates that the set() method would like to receive the 'old' value, - even if it means firing lazy callables. - - Note that ``active_history`` can also be set directly via - :func:`.column_property` and :func:`.relationship`. - - """ - - @classmethod - def _adapt_listener(cls, self, listener): - event.listen(self, 'append', listener.append, - active_history=listener.active_history, - raw=True, retval=True) - event.listen(self, 'remove', listener.remove, - active_history=listener.active_history, - raw=True, retval=True) - event.listen(self, 'set', listener.set, - active_history=listener.active_history, - raw=True, retval=True) - - def append(self, state, value, initiator): - """Receive a collection append event. - - The returned value will be used as the actual value to be - appended. - - """ - return value - - def remove(self, state, value, initiator): - """Receive a remove event. - - No return value is defined. - - """ - pass - - def set(self, state, value, oldvalue, initiator): - """Receive a set event. - - The returned value will be used as the actual value to be - set. - - """ - return value diff --git a/python/sqlalchemy/orm/descriptor_props.py b/python/sqlalchemy/orm/descriptor_props.py deleted file mode 100644 index 17c2d28c..00000000 --- a/python/sqlalchemy/orm/descriptor_props.py +++ /dev/null @@ -1,699 +0,0 @@ -# orm/descriptor_props.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Descriptor properties are more "auxiliary" properties -that exist as configurational elements, but don't participate -as actively in the load/persist ORM loop. - -""" - -from .interfaces import MapperProperty, PropComparator -from .util import _none_set -from . import attributes -from .. import util, sql, exc as sa_exc, event, schema -from ..sql import expression -from . import properties -from . import query - - -class DescriptorProperty(MapperProperty): - """:class:`.MapperProperty` which proxies access to a - user-defined descriptor.""" - - doc = None - - def instrument_class(self, mapper): - prop = self - - class _ProxyImpl(object): - accepts_scalar_loader = False - expire_missing = True - collection = False - - def __init__(self, key): - self.key = key - - if hasattr(prop, 'get_history'): - def get_history(self, state, dict_, - passive=attributes.PASSIVE_OFF): - return prop.get_history(state, dict_, passive) - - if self.descriptor is None: - desc = getattr(mapper.class_, self.key, None) - if mapper._is_userland_descriptor(desc): - self.descriptor = desc - - if self.descriptor is None: - def fset(obj, value): - setattr(obj, self.name, value) - - def fdel(obj): - delattr(obj, self.name) - - def fget(obj): - return getattr(obj, self.name) - - self.descriptor = property( - fget=fget, - fset=fset, - fdel=fdel, - ) - - proxy_attr = attributes.create_proxied_attribute( - self.descriptor)( - self.parent.class_, - self.key, - self.descriptor, - lambda: self._comparator_factory(mapper), - doc=self.doc, - original_property=self - ) - proxy_attr.impl = _ProxyImpl(self.key) - mapper.class_manager.instrument_attribute(self.key, proxy_attr) - - -@util.langhelpers.dependency_for("sqlalchemy.orm.properties") -class CompositeProperty(DescriptorProperty): - """Defines a "composite" mapped attribute, representing a collection - of columns as one attribute. - - :class:`.CompositeProperty` is constructed using the :func:`.composite` - function. - - .. seealso:: - - :ref:`mapper_composite` - - """ - - def __init__(self, class_, *attrs, **kwargs): - """Return a composite column-based property for use with a Mapper. - - See the mapping documentation section :ref:`mapper_composite` for a - full usage example. - - The :class:`.MapperProperty` returned by :func:`.composite` - is the :class:`.CompositeProperty`. - - :param class\_: - The "composite type" class. - - :param \*cols: - List of Column objects to be mapped. - - :param active_history=False: - When ``True``, indicates that the "previous" value for a - scalar attribute should be loaded when replaced, if not - already loaded. See the same flag on :func:`.column_property`. - - .. versionchanged:: 0.7 - This flag specifically becomes meaningful - - previously it was a placeholder. - - :param group: - A group name for this property when marked as deferred. - - :param deferred: - When True, the column property is "deferred", meaning that it does - not load immediately, and is instead loaded when the attribute is - first accessed on an instance. See also - :func:`~sqlalchemy.orm.deferred`. - - :param comparator_factory: a class which extends - :class:`.CompositeProperty.Comparator` which provides custom SQL - clause generation for comparison operations. - - :param doc: - optional string that will be applied as the doc on the - class-bound descriptor. - - :param info: Optional data dictionary which will be populated into the - :attr:`.MapperProperty.info` attribute of this object. - - .. versionadded:: 0.8 - - :param extension: - an :class:`.AttributeExtension` instance, - or list of extensions, which will be prepended to the list of - attribute listeners for the resulting descriptor placed on the - class. **Deprecated.** Please see :class:`.AttributeEvents`. - - """ - super(CompositeProperty, self).__init__() - - self.attrs = attrs - self.composite_class = class_ - self.active_history = kwargs.get('active_history', False) - self.deferred = kwargs.get('deferred', False) - self.group = kwargs.get('group', None) - self.comparator_factory = kwargs.pop('comparator_factory', - self.__class__.Comparator) - if 'info' in kwargs: - self.info = kwargs.pop('info') - - util.set_creation_order(self) - self._create_descriptor() - - def instrument_class(self, mapper): - super(CompositeProperty, self).instrument_class(mapper) - self._setup_event_handlers() - - def do_init(self): - """Initialization which occurs after the :class:`.CompositeProperty` - has been associated with its parent mapper. - - """ - self._setup_arguments_on_columns() - - def _create_descriptor(self): - """Create the Python descriptor that will serve as - the access point on instances of the mapped class. - - """ - - def fget(instance): - dict_ = attributes.instance_dict(instance) - state = attributes.instance_state(instance) - - if self.key not in dict_: - # key not present. Iterate through related - # attributes, retrieve their values. This - # ensures they all load. - values = [ - getattr(instance, key) - for key in self._attribute_keys - ] - - # current expected behavior here is that the composite is - # created on access if the object is persistent or if - # col attributes have non-None. This would be better - # if the composite were created unconditionally, - # but that would be a behavioral change. - if self.key not in dict_ and ( - state.key is not None or - not _none_set.issuperset(values) - ): - dict_[self.key] = self.composite_class(*values) - state.manager.dispatch.refresh(state, None, [self.key]) - - return dict_.get(self.key, None) - - def fset(instance, value): - dict_ = attributes.instance_dict(instance) - state = attributes.instance_state(instance) - attr = state.manager[self.key] - previous = dict_.get(self.key, attributes.NO_VALUE) - for fn in attr.dispatch.set: - value = fn(state, value, previous, attr.impl) - dict_[self.key] = value - if value is None: - for key in self._attribute_keys: - setattr(instance, key, None) - else: - for key, value in zip( - self._attribute_keys, - value.__composite_values__()): - setattr(instance, key, value) - - def fdel(instance): - state = attributes.instance_state(instance) - dict_ = attributes.instance_dict(instance) - previous = dict_.pop(self.key, attributes.NO_VALUE) - attr = state.manager[self.key] - attr.dispatch.remove(state, previous, attr.impl) - for key in self._attribute_keys: - setattr(instance, key, None) - - self.descriptor = property(fget, fset, fdel) - - @util.memoized_property - def _comparable_elements(self): - return [ - getattr(self.parent.class_, prop.key) - for prop in self.props - ] - - @util.memoized_property - def props(self): - props = [] - for attr in self.attrs: - if isinstance(attr, str): - prop = self.parent.get_property( - attr, _configure_mappers=False) - elif isinstance(attr, schema.Column): - prop = self.parent._columntoproperty[attr] - elif isinstance(attr, attributes.InstrumentedAttribute): - prop = attr.property - else: - raise sa_exc.ArgumentError( - "Composite expects Column objects or mapped " - "attributes/attribute names as arguments, got: %r" - % (attr,)) - props.append(prop) - return props - - @property - def columns(self): - return [a for a in self.attrs if isinstance(a, schema.Column)] - - def _setup_arguments_on_columns(self): - """Propagate configuration arguments made on this composite - to the target columns, for those that apply. - - """ - for prop in self.props: - prop.active_history = self.active_history - if self.deferred: - prop.deferred = self.deferred - prop.strategy_class = prop._strategy_lookup( - ("deferred", True), - ("instrument", True)) - prop.group = self.group - - def _setup_event_handlers(self): - """Establish events that populate/expire the composite attribute.""" - - def load_handler(state, *args): - dict_ = state.dict - - if self.key in dict_: - return - - # if column elements aren't loaded, skip. - # __get__() will initiate a load for those - # columns - for k in self._attribute_keys: - if k not in dict_: - return - - # assert self.key not in dict_ - dict_[self.key] = self.composite_class( - *[state.dict[key] for key in - self._attribute_keys] - ) - - def expire_handler(state, keys): - if keys is None or set(self._attribute_keys).intersection(keys): - state.dict.pop(self.key, None) - - def insert_update_handler(mapper, connection, state): - """After an insert or update, some columns may be expired due - to server side defaults, or re-populated due to client side - defaults. Pop out the composite value here so that it - recreates. - - """ - - state.dict.pop(self.key, None) - - event.listen(self.parent, 'after_insert', - insert_update_handler, raw=True) - event.listen(self.parent, 'after_update', - insert_update_handler, raw=True) - event.listen(self.parent, 'load', - load_handler, raw=True, propagate=True) - event.listen(self.parent, 'refresh', - load_handler, raw=True, propagate=True) - event.listen(self.parent, 'expire', - expire_handler, raw=True, propagate=True) - - # TODO: need a deserialize hook here - - @util.memoized_property - def _attribute_keys(self): - return [ - prop.key for prop in self.props - ] - - def get_history(self, state, dict_, passive=attributes.PASSIVE_OFF): - """Provided for userland code that uses attributes.get_history().""" - - added = [] - deleted = [] - - has_history = False - for prop in self.props: - key = prop.key - hist = state.manager[key].impl.get_history(state, dict_) - if hist.has_changes(): - has_history = True - - non_deleted = hist.non_deleted() - if non_deleted: - added.extend(non_deleted) - else: - added.append(None) - if hist.deleted: - deleted.extend(hist.deleted) - else: - deleted.append(None) - - if has_history: - return attributes.History( - [self.composite_class(*added)], - (), - [self.composite_class(*deleted)] - ) - else: - return attributes.History( - (), [self.composite_class(*added)], () - ) - - def _comparator_factory(self, mapper): - return self.comparator_factory(self, mapper) - - class CompositeBundle(query.Bundle): - def __init__(self, property, expr): - self.property = property - super(CompositeProperty.CompositeBundle, self).__init__( - property.key, *expr) - - def create_row_processor(self, query, procs, labels): - def proc(row): - return self.property.composite_class( - *[proc(row) for proc in procs]) - return proc - - class Comparator(PropComparator): - """Produce boolean, comparison, and other operators for - :class:`.CompositeProperty` attributes. - - See the example in :ref:`composite_operations` for an overview - of usage , as well as the documentation for :class:`.PropComparator`. - - See also: - - :class:`.PropComparator` - - :class:`.ColumnOperators` - - :ref:`types_operators` - - :attr:`.TypeEngine.comparator_factory` - - """ - - __hash__ = None - - @property - def clauses(self): - return self.__clause_element__() - - def __clause_element__(self): - return expression.ClauseList( - group=False, *self._comparable_elements) - - def _query_clause_element(self): - return CompositeProperty.CompositeBundle( - self.prop, self.__clause_element__()) - - @util.memoized_property - def _comparable_elements(self): - if self._adapt_to_entity: - return [ - getattr( - self._adapt_to_entity.entity, - prop.key - ) for prop in self.prop._comparable_elements - ] - else: - return self.prop._comparable_elements - - def __eq__(self, other): - if other is None: - values = [None] * len(self.prop._comparable_elements) - else: - values = other.__composite_values__() - comparisons = [ - a == b - for a, b in zip(self.prop._comparable_elements, values) - ] - if self._adapt_to_entity: - comparisons = [self.adapter(x) for x in comparisons] - return sql.and_(*comparisons) - - def __ne__(self, other): - return sql.not_(self.__eq__(other)) - - def __str__(self): - return str(self.parent.class_.__name__) + "." + self.key - - -@util.langhelpers.dependency_for("sqlalchemy.orm.properties") -class ConcreteInheritedProperty(DescriptorProperty): - """A 'do nothing' :class:`.MapperProperty` that disables - an attribute on a concrete subclass that is only present - on the inherited mapper, not the concrete classes' mapper. - - Cases where this occurs include: - - * When the superclass mapper is mapped against a - "polymorphic union", which includes all attributes from - all subclasses. - * When a relationship() is configured on an inherited mapper, - but not on the subclass mapper. Concrete mappers require - that relationship() is configured explicitly on each - subclass. - - """ - - def _comparator_factory(self, mapper): - comparator_callable = None - - for m in self.parent.iterate_to_root(): - p = m._props[self.key] - if not isinstance(p, ConcreteInheritedProperty): - comparator_callable = p.comparator_factory - break - return comparator_callable - - def __init__(self): - super(ConcreteInheritedProperty, self).__init__() - def warn(): - raise AttributeError("Concrete %s does not implement " - "attribute %r at the instance level. Add " - "this property explicitly to %s." % - (self.parent, self.key, self.parent)) - - class NoninheritedConcreteProp(object): - def __set__(s, obj, value): - warn() - - def __delete__(s, obj): - warn() - - def __get__(s, obj, owner): - if obj is None: - return self.descriptor - warn() - self.descriptor = NoninheritedConcreteProp() - - -@util.langhelpers.dependency_for("sqlalchemy.orm.properties") -class SynonymProperty(DescriptorProperty): - - def __init__(self, name, map_column=None, - descriptor=None, comparator_factory=None, - doc=None, info=None): - """Denote an attribute name as a synonym to a mapped property, - in that the attribute will mirror the value and expression behavior - of another attribute. - - :param name: the name of the existing mapped property. This - can refer to the string name of any :class:`.MapperProperty` - configured on the class, including column-bound attributes - and relationships. - - :param descriptor: a Python :term:`descriptor` that will be used - as a getter (and potentially a setter) when this attribute is - accessed at the instance level. - - :param map_column: if ``True``, the :func:`.synonym` construct will - locate the existing named :class:`.MapperProperty` based on the - attribute name of this :func:`.synonym`, and assign it to a new - attribute linked to the name of this :func:`.synonym`. - That is, given a mapping like:: - - class MyClass(Base): - __tablename__ = 'my_table' - - id = Column(Integer, primary_key=True) - job_status = Column(String(50)) - - job_status = synonym("_job_status", map_column=True) - - The above class ``MyClass`` will now have the ``job_status`` - :class:`.Column` object mapped to the attribute named - ``_job_status``, and the attribute named ``job_status`` will refer - to the synonym itself. This feature is typically used in - conjunction with the ``descriptor`` argument in order to link a - user-defined descriptor as a "wrapper" for an existing column. - - :param info: Optional data dictionary which will be populated into the - :attr:`.InspectionAttr.info` attribute of this object. - - .. versionadded:: 1.0.0 - - :param comparator_factory: A subclass of :class:`.PropComparator` - that will provide custom comparison behavior at the SQL expression - level. - - .. note:: - - For the use case of providing an attribute which redefines both - Python-level and SQL-expression level behavior of an attribute, - please refer to the Hybrid attribute introduced at - :ref:`mapper_hybrids` for a more effective technique. - - .. seealso:: - - :ref:`synonyms` - examples of functionality. - - :ref:`mapper_hybrids` - Hybrids provide a better approach for - more complicated attribute-wrapping schemes than synonyms. - - """ - super(SynonymProperty, self).__init__() - - self.name = name - self.map_column = map_column - self.descriptor = descriptor - self.comparator_factory = comparator_factory - self.doc = doc or (descriptor and descriptor.__doc__) or None - if info: - self.info = info - - util.set_creation_order(self) - - # TODO: when initialized, check _proxied_property, - # emit a warning if its not a column-based property - - @util.memoized_property - def _proxied_property(self): - return getattr(self.parent.class_, self.name).property - - def _comparator_factory(self, mapper): - prop = self._proxied_property - - if self.comparator_factory: - comp = self.comparator_factory(prop, mapper) - else: - comp = prop.comparator_factory(prop, mapper) - return comp - - def set_parent(self, parent, init): - if self.map_column: - # implement the 'map_column' option. - if self.key not in parent.mapped_table.c: - raise sa_exc.ArgumentError( - "Can't compile synonym '%s': no column on table " - "'%s' named '%s'" - % (self.name, parent.mapped_table.description, self.key)) - elif parent.mapped_table.c[self.key] in \ - parent._columntoproperty and \ - parent._columntoproperty[ - parent.mapped_table.c[self.key] - ].key == self.name: - raise sa_exc.ArgumentError( - "Can't call map_column=True for synonym %r=%r, " - "a ColumnProperty already exists keyed to the name " - "%r for column %r" % - (self.key, self.name, self.name, self.key) - ) - p = properties.ColumnProperty(parent.mapped_table.c[self.key]) - parent._configure_property( - self.name, p, - init=init, - setparent=True) - p._mapped_by_synonym = self.key - - self.parent = parent - - -@util.langhelpers.dependency_for("sqlalchemy.orm.properties") -class ComparableProperty(DescriptorProperty): - """Instruments a Python property for use in query expressions.""" - - def __init__( - self, comparator_factory, descriptor=None, doc=None, info=None): - """Provides a method of applying a :class:`.PropComparator` - to any Python descriptor attribute. - - .. versionchanged:: 0.7 - :func:`.comparable_property` is superseded by - the :mod:`~sqlalchemy.ext.hybrid` extension. See the example - at :ref:`hybrid_custom_comparators`. - - Allows any Python descriptor to behave like a SQL-enabled - attribute when used at the class level in queries, allowing - redefinition of expression operator behavior. - - In the example below we redefine :meth:`.PropComparator.operate` - to wrap both sides of an expression in ``func.lower()`` to produce - case-insensitive comparison:: - - from sqlalchemy.orm import comparable_property - from sqlalchemy.orm.interfaces import PropComparator - from sqlalchemy.sql import func - from sqlalchemy import Integer, String, Column - from sqlalchemy.ext.declarative import declarative_base - - class CaseInsensitiveComparator(PropComparator): - def __clause_element__(self): - return self.prop - - def operate(self, op, other): - return op( - func.lower(self.__clause_element__()), - func.lower(other) - ) - - Base = declarative_base() - - class SearchWord(Base): - __tablename__ = 'search_word' - id = Column(Integer, primary_key=True) - word = Column(String) - word_insensitive = comparable_property(lambda prop, mapper: - CaseInsensitiveComparator( - mapper.c.word, mapper) - ) - - - A mapping like the above allows the ``word_insensitive`` attribute - to render an expression like:: - - >>> print SearchWord.word_insensitive == "Trucks" - lower(search_word.word) = lower(:lower_1) - - :param comparator_factory: - A PropComparator subclass or factory that defines operator behavior - for this property. - - :param descriptor: - Optional when used in a ``properties={}`` declaration. The Python - descriptor or property to layer comparison behavior on top of. - - The like-named descriptor will be automatically retrieved from the - mapped class if left blank in a ``properties`` declaration. - - :param info: Optional data dictionary which will be populated into the - :attr:`.InspectionAttr.info` attribute of this object. - - .. versionadded:: 1.0.0 - - """ - super(ComparableProperty, self).__init__() - self.descriptor = descriptor - self.comparator_factory = comparator_factory - self.doc = doc or (descriptor and descriptor.__doc__) or None - if info: - self.info = info - util.set_creation_order(self) - - def _comparator_factory(self, mapper): - return self.comparator_factory(self, mapper) diff --git a/python/sqlalchemy/orm/dynamic.py b/python/sqlalchemy/orm/dynamic.py deleted file mode 100644 index aedd863f..00000000 --- a/python/sqlalchemy/orm/dynamic.py +++ /dev/null @@ -1,370 +0,0 @@ -# orm/dynamic.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Dynamic collection API. - -Dynamic collections act like Query() objects for read operations and support -basic add/delete mutation. - -""" - -from .. import log, util, exc -from ..sql import operators -from . import ( - attributes, object_session, util as orm_util, strategies, - object_mapper, exc as orm_exc, properties -) -from .query import Query - - -@log.class_logger -@properties.RelationshipProperty.strategy_for(lazy="dynamic") -class DynaLoader(strategies.AbstractRelationshipLoader): - def init_class_attribute(self, mapper): - self.is_class_level = True - if not self.uselist: - raise exc.InvalidRequestError( - "On relationship %s, 'dynamic' loaders cannot be used with " - "many-to-one/one-to-one relationships and/or " - "uselist=False." % self.parent_property) - strategies._register_attribute( - self, - mapper, - useobject=True, - uselist=True, - impl_class=DynamicAttributeImpl, - target_mapper=self.parent_property.mapper, - order_by=self.parent_property.order_by, - query_class=self.parent_property.query_class, - backref=self.parent_property.back_populates, - ) - - -class DynamicAttributeImpl(attributes.AttributeImpl): - uses_objects = True - accepts_scalar_loader = False - supports_population = False - collection = False - - def __init__(self, class_, key, typecallable, - dispatch, - target_mapper, order_by, query_class=None, **kw): - super(DynamicAttributeImpl, self).\ - __init__(class_, key, typecallable, dispatch, **kw) - self.target_mapper = target_mapper - self.order_by = order_by - if not query_class: - self.query_class = AppenderQuery - elif AppenderMixin in query_class.mro(): - self.query_class = query_class - else: - self.query_class = mixin_user_query(query_class) - - def get(self, state, dict_, passive=attributes.PASSIVE_OFF): - if not passive & attributes.SQL_OK: - return self._get_collection_history( - state, attributes.PASSIVE_NO_INITIALIZE).added_items - else: - return self.query_class(self, state) - - def get_collection(self, state, dict_, user_data=None, - passive=attributes.PASSIVE_NO_INITIALIZE): - if not passive & attributes.SQL_OK: - return self._get_collection_history(state, - passive).added_items - else: - history = self._get_collection_history(state, passive) - return history.added_plus_unchanged - - @util.memoized_property - def _append_token(self): - return attributes.Event(self, attributes.OP_APPEND) - - @util.memoized_property - def _remove_token(self): - return attributes.Event(self, attributes.OP_REMOVE) - - def fire_append_event(self, state, dict_, value, initiator, - collection_history=None): - if collection_history is None: - collection_history = self._modified_event(state, dict_) - - collection_history.add_added(value) - - for fn in self.dispatch.append: - value = fn(state, value, initiator or self._append_token) - - if self.trackparent and value is not None: - self.sethasparent(attributes.instance_state(value), state, True) - - def fire_remove_event(self, state, dict_, value, initiator, - collection_history=None): - if collection_history is None: - collection_history = self._modified_event(state, dict_) - - collection_history.add_removed(value) - - if self.trackparent and value is not None: - self.sethasparent(attributes.instance_state(value), state, False) - - for fn in self.dispatch.remove: - fn(state, value, initiator or self._remove_token) - - def _modified_event(self, state, dict_): - - if self.key not in state.committed_state: - state.committed_state[self.key] = CollectionHistory(self, state) - - state._modified_event(dict_, - self, - attributes.NEVER_SET) - - # this is a hack to allow the fixtures.ComparableEntity fixture - # to work - dict_[self.key] = True - return state.committed_state[self.key] - - def set(self, state, dict_, value, initiator, - passive=attributes.PASSIVE_OFF, - check_old=None, pop=False): - if initiator and initiator.parent_token is self.parent_token: - return - - if pop and value is None: - return - self._set_iterable(state, dict_, value) - - def _set_iterable(self, state, dict_, iterable, adapter=None): - new_values = list(iterable) - if state.has_identity: - old_collection = util.IdentitySet(self.get(state, dict_)) - - collection_history = self._modified_event(state, dict_) - if not state.has_identity: - old_collection = collection_history.added_items - else: - old_collection = old_collection.union( - collection_history.added_items) - - idset = util.IdentitySet - constants = old_collection.intersection(new_values) - additions = idset(new_values).difference(constants) - removals = old_collection.difference(constants) - - for member in new_values: - if member in additions: - self.fire_append_event(state, dict_, member, None, - collection_history=collection_history) - - for member in removals: - self.fire_remove_event(state, dict_, member, None, - collection_history=collection_history) - - def delete(self, *args, **kwargs): - raise NotImplementedError() - - def set_committed_value(self, state, dict_, value): - raise NotImplementedError("Dynamic attributes don't support " - "collection population.") - - def get_history(self, state, dict_, passive=attributes.PASSIVE_OFF): - c = self._get_collection_history(state, passive) - return c.as_history() - - def get_all_pending(self, state, dict_, - passive=attributes.PASSIVE_NO_INITIALIZE): - c = self._get_collection_history( - state, passive) - return [ - (attributes.instance_state(x), x) - for x in - c.all_items - ] - - def _get_collection_history(self, state, passive=attributes.PASSIVE_OFF): - if self.key in state.committed_state: - c = state.committed_state[self.key] - else: - c = CollectionHistory(self, state) - - if state.has_identity and (passive & attributes.INIT_OK): - return CollectionHistory(self, state, apply_to=c) - else: - return c - - def append(self, state, dict_, value, initiator, - passive=attributes.PASSIVE_OFF): - if initiator is not self: - self.fire_append_event(state, dict_, value, initiator) - - def remove(self, state, dict_, value, initiator, - passive=attributes.PASSIVE_OFF): - if initiator is not self: - self.fire_remove_event(state, dict_, value, initiator) - - def pop(self, state, dict_, value, initiator, - passive=attributes.PASSIVE_OFF): - self.remove(state, dict_, value, initiator, passive=passive) - - -class AppenderMixin(object): - query_class = None - - def __init__(self, attr, state): - super(AppenderMixin, self).__init__(attr.target_mapper, None) - self.instance = instance = state.obj() - self.attr = attr - - mapper = object_mapper(instance) - prop = mapper._props[self.attr.key] - self._criterion = prop._with_parent( - instance, - alias_secondary=False) - - if self.attr.order_by: - self._order_by = self.attr.order_by - - def session(self): - sess = object_session(self.instance) - if sess is not None and self.autoflush and sess.autoflush \ - and self.instance in sess: - sess.flush() - if not orm_util.has_identity(self.instance): - return None - else: - return sess - session = property(session, lambda s, x: None) - - def __iter__(self): - sess = self.session - if sess is None: - return iter(self.attr._get_collection_history( - attributes.instance_state(self.instance), - attributes.PASSIVE_NO_INITIALIZE).added_items) - else: - return iter(self._clone(sess)) - - def __getitem__(self, index): - sess = self.session - if sess is None: - return self.attr._get_collection_history( - attributes.instance_state(self.instance), - attributes.PASSIVE_NO_INITIALIZE).indexed(index) - else: - return self._clone(sess).__getitem__(index) - - def count(self): - sess = self.session - if sess is None: - return len(self.attr._get_collection_history( - attributes.instance_state(self.instance), - attributes.PASSIVE_NO_INITIALIZE).added_items) - else: - return self._clone(sess).count() - - def _clone(self, sess=None): - # note we're returning an entirely new Query class instance - # here without any assignment capabilities; the class of this - # query is determined by the session. - instance = self.instance - if sess is None: - sess = object_session(instance) - if sess is None: - raise orm_exc.DetachedInstanceError( - "Parent instance %s is not bound to a Session, and no " - "contextual session is established; lazy load operation " - "of attribute '%s' cannot proceed" % ( - orm_util.instance_str(instance), self.attr.key)) - - if self.query_class: - query = self.query_class(self.attr.target_mapper, session=sess) - else: - query = sess.query(self.attr.target_mapper) - - query._criterion = self._criterion - query._order_by = self._order_by - - return query - - def extend(self, iterator): - for item in iterator: - self.attr.append( - attributes.instance_state(self.instance), - attributes.instance_dict(self.instance), item, None) - - def append(self, item): - self.attr.append( - attributes.instance_state(self.instance), - attributes.instance_dict(self.instance), item, None) - - def remove(self, item): - self.attr.remove( - attributes.instance_state(self.instance), - attributes.instance_dict(self.instance), item, None) - - -class AppenderQuery(AppenderMixin, Query): - """A dynamic query that supports basic collection storage operations.""" - - -def mixin_user_query(cls): - """Return a new class with AppenderQuery functionality layered over.""" - name = 'Appender' + cls.__name__ - return type(name, (AppenderMixin, cls), {'query_class': cls}) - - -class CollectionHistory(object): - """Overrides AttributeHistory to receive append/remove events directly.""" - - def __init__(self, attr, state, apply_to=None): - if apply_to: - coll = AppenderQuery(attr, state).autoflush(False) - self.unchanged_items = util.OrderedIdentitySet(coll) - self.added_items = apply_to.added_items - self.deleted_items = apply_to.deleted_items - self._reconcile_collection = True - else: - self.deleted_items = util.OrderedIdentitySet() - self.added_items = util.OrderedIdentitySet() - self.unchanged_items = util.OrderedIdentitySet() - self._reconcile_collection = False - - @property - def added_plus_unchanged(self): - return list(self.added_items.union(self.unchanged_items)) - - @property - def all_items(self): - return list(self.added_items.union( - self.unchanged_items).union(self.deleted_items)) - - def as_history(self): - if self._reconcile_collection: - added = self.added_items.difference(self.unchanged_items) - deleted = self.deleted_items.intersection(self.unchanged_items) - unchanged = self.unchanged_items.difference(deleted) - else: - added, unchanged, deleted = self.added_items,\ - self.unchanged_items,\ - self.deleted_items - return attributes.History( - list(added), - list(unchanged), - list(deleted), - ) - - def indexed(self, index): - return list(self.added_items)[index] - - def add_added(self, value): - self.added_items.add(value) - - def add_removed(self, value): - if value in self.added_items: - self.added_items.remove(value) - else: - self.deleted_items.add(value) diff --git a/python/sqlalchemy/orm/evaluator.py b/python/sqlalchemy/orm/evaluator.py deleted file mode 100644 index 1e828ff8..00000000 --- a/python/sqlalchemy/orm/evaluator.py +++ /dev/null @@ -1,134 +0,0 @@ -# orm/evaluator.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -import operator -from ..sql import operators - - -class UnevaluatableError(Exception): - pass - -_straight_ops = set(getattr(operators, op) - for op in ('add', 'mul', 'sub', - 'div', - 'mod', 'truediv', - 'lt', 'le', 'ne', 'gt', 'ge', 'eq')) - - -_notimplemented_ops = set(getattr(operators, op) - for op in ('like_op', 'notlike_op', 'ilike_op', - 'notilike_op', 'between_op', 'in_op', - 'notin_op', 'endswith_op', 'concat_op')) - - -class EvaluatorCompiler(object): - def __init__(self, target_cls=None): - self.target_cls = target_cls - - def process(self, clause): - meth = getattr(self, "visit_%s" % clause.__visit_name__, None) - if not meth: - raise UnevaluatableError( - "Cannot evaluate %s" % type(clause).__name__) - return meth(clause) - - def visit_grouping(self, clause): - return self.process(clause.element) - - def visit_null(self, clause): - return lambda obj: None - - def visit_false(self, clause): - return lambda obj: False - - def visit_true(self, clause): - return lambda obj: True - - def visit_column(self, clause): - if 'parentmapper' in clause._annotations: - parentmapper = clause._annotations['parentmapper'] - if self.target_cls and not issubclass( - self.target_cls, parentmapper.class_): - raise UnevaluatableError( - "Can't evaluate criteria against alternate class %s" % - parentmapper.class_ - ) - key = parentmapper._columntoproperty[clause].key - else: - key = clause.key - - get_corresponding_attr = operator.attrgetter(key) - return lambda obj: get_corresponding_attr(obj) - - def visit_clauselist(self, clause): - evaluators = list(map(self.process, clause.clauses)) - if clause.operator is operators.or_: - def evaluate(obj): - has_null = False - for sub_evaluate in evaluators: - value = sub_evaluate(obj) - if value: - return True - has_null = has_null or value is None - if has_null: - return None - return False - elif clause.operator is operators.and_: - def evaluate(obj): - for sub_evaluate in evaluators: - value = sub_evaluate(obj) - if not value: - if value is None: - return None - return False - return True - else: - raise UnevaluatableError( - "Cannot evaluate clauselist with operator %s" % - clause.operator) - - return evaluate - - def visit_binary(self, clause): - eval_left, eval_right = list(map(self.process, - [clause.left, clause.right])) - operator = clause.operator - if operator is operators.is_: - def evaluate(obj): - return eval_left(obj) == eval_right(obj) - elif operator is operators.isnot: - def evaluate(obj): - return eval_left(obj) != eval_right(obj) - elif operator in _straight_ops: - def evaluate(obj): - left_val = eval_left(obj) - right_val = eval_right(obj) - if left_val is None or right_val is None: - return None - return operator(eval_left(obj), eval_right(obj)) - else: - raise UnevaluatableError( - "Cannot evaluate %s with operator %s" % - (type(clause).__name__, clause.operator)) - return evaluate - - def visit_unary(self, clause): - eval_inner = self.process(clause.element) - if clause.operator is operators.inv: - def evaluate(obj): - value = eval_inner(obj) - if value is None: - return None - return not value - return evaluate - raise UnevaluatableError( - "Cannot evaluate %s with operator %s" % - (type(clause).__name__, clause.operator)) - - def visit_bindparam(self, clause): - val = clause.value - return lambda obj: val diff --git a/python/sqlalchemy/orm/events.py b/python/sqlalchemy/orm/events.py deleted file mode 100644 index 29bdbaa8..00000000 --- a/python/sqlalchemy/orm/events.py +++ /dev/null @@ -1,1800 +0,0 @@ -# orm/events.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""ORM event interfaces. - -""" -from .. import event, exc, util -from .base import _mapper_or_none -import inspect -import weakref -from . import interfaces -from . import mapperlib, instrumentation -from .session import Session, sessionmaker -from .scoping import scoped_session -from .attributes import QueryableAttribute -from .query import Query - -class InstrumentationEvents(event.Events): - """Events related to class instrumentation events. - - The listeners here support being established against - any new style class, that is any object that is a subclass - of 'type'. Events will then be fired off for events - against that class. If the "propagate=True" flag is passed - to event.listen(), the event will fire off for subclasses - of that class as well. - - The Python ``type`` builtin is also accepted as a target, - which when used has the effect of events being emitted - for all classes. - - Note the "propagate" flag here is defaulted to ``True``, - unlike the other class level events where it defaults - to ``False``. This means that new subclasses will also - be the subject of these events, when a listener - is established on a superclass. - - .. versionchanged:: 0.8 - events here will emit based - on comparing the incoming class to the type of class - passed to :func:`.event.listen`. Previously, the - event would fire for any class unconditionally regardless - of what class was sent for listening, despite - documentation which stated the contrary. - - """ - - _target_class_doc = "SomeBaseClass" - _dispatch_target = instrumentation.InstrumentationFactory - - @classmethod - def _accept_with(cls, target): - if isinstance(target, type): - return _InstrumentationEventsHold(target) - else: - return None - - @classmethod - def _listen(cls, event_key, propagate=True, **kw): - target, identifier, fn = \ - event_key.dispatch_target, event_key.identifier, \ - event_key._listen_fn - - def listen(target_cls, *arg): - listen_cls = target() - if propagate and issubclass(target_cls, listen_cls): - return fn(target_cls, *arg) - elif not propagate and target_cls is listen_cls: - return fn(target_cls, *arg) - - def remove(ref): - key = event.registry._EventKey( - None, identifier, listen, - instrumentation._instrumentation_factory) - getattr(instrumentation._instrumentation_factory.dispatch, - identifier).remove(key) - - target = weakref.ref(target.class_, remove) - - event_key.\ - with_dispatch_target(instrumentation._instrumentation_factory).\ - with_wrapper(listen).base_listen(**kw) - - @classmethod - def _clear(cls): - super(InstrumentationEvents, cls)._clear() - instrumentation._instrumentation_factory.dispatch._clear() - - def class_instrument(self, cls): - """Called after the given class is instrumented. - - To get at the :class:`.ClassManager`, use - :func:`.manager_of_class`. - - """ - - def class_uninstrument(self, cls): - """Called before the given class is uninstrumented. - - To get at the :class:`.ClassManager`, use - :func:`.manager_of_class`. - - """ - - def attribute_instrument(self, cls, key, inst): - """Called when an attribute is instrumented.""" - - -class _InstrumentationEventsHold(object): - """temporary marker object used to transfer from _accept_with() to - _listen() on the InstrumentationEvents class. - - """ - - def __init__(self, class_): - self.class_ = class_ - - dispatch = event.dispatcher(InstrumentationEvents) - - -class InstanceEvents(event.Events): - """Define events specific to object lifecycle. - - e.g.:: - - from sqlalchemy import event - - def my_load_listener(target, context): - print "on load!" - - event.listen(SomeClass, 'load', my_load_listener) - - Available targets include: - - * mapped classes - * unmapped superclasses of mapped or to-be-mapped classes - (using the ``propagate=True`` flag) - * :class:`.Mapper` objects - * the :class:`.Mapper` class itself and the :func:`.mapper` - function indicate listening for all mappers. - - .. versionchanged:: 0.8.0 instance events can be associated with - unmapped superclasses of mapped classes. - - Instance events are closely related to mapper events, but - are more specific to the instance and its instrumentation, - rather than its system of persistence. - - When using :class:`.InstanceEvents`, several modifiers are - available to the :func:`.event.listen` function. - - :param propagate=False: When True, the event listener should - be applied to all inheriting classes as well as the - class which is the target of this listener. - :param raw=False: When True, the "target" argument passed - to applicable event listener functions will be the - instance's :class:`.InstanceState` management - object, rather than the mapped instance itself. - - """ - - _target_class_doc = "SomeClass" - - _dispatch_target = instrumentation.ClassManager - - @classmethod - def _new_classmanager_instance(cls, class_, classmanager): - _InstanceEventsHold.populate(class_, classmanager) - - @classmethod - @util.dependencies("sqlalchemy.orm") - def _accept_with(cls, orm, target): - if isinstance(target, instrumentation.ClassManager): - return target - elif isinstance(target, mapperlib.Mapper): - return target.class_manager - elif target is orm.mapper: - return instrumentation.ClassManager - elif isinstance(target, type): - if issubclass(target, mapperlib.Mapper): - return instrumentation.ClassManager - else: - manager = instrumentation.manager_of_class(target) - if manager: - return manager - else: - return _InstanceEventsHold(target) - return None - - @classmethod - def _listen(cls, event_key, raw=False, propagate=False, **kw): - target, identifier, fn = \ - event_key.dispatch_target, event_key.identifier, \ - event_key._listen_fn - - if not raw: - def wrap(state, *arg, **kw): - return fn(state.obj(), *arg, **kw) - event_key = event_key.with_wrapper(wrap) - - event_key.base_listen(propagate=propagate, **kw) - - if propagate: - for mgr in target.subclass_managers(True): - event_key.with_dispatch_target(mgr).base_listen( - propagate=True) - - @classmethod - def _clear(cls): - super(InstanceEvents, cls)._clear() - _InstanceEventsHold._clear() - - def first_init(self, manager, cls): - """Called when the first instance of a particular mapping is called. - - This event is called when the ``__init__`` method of a class - is called the first time for that particular class. The event - invokes before ``__init__`` actually proceeds as well as before - the :meth:`.InstanceEvents.init` event is invoked. - - """ - - def init(self, target, args, kwargs): - """Receive an instance when its constructor is called. - - This method is only called during a userland construction of - an object, in conjunction with the object's constructor, e.g. - its ``__init__`` method. It is not called when an object is - loaded from the database; see the :meth:`.InstanceEvents.load` - event in order to intercept a database load. - - The event is called before the actual ``__init__`` constructor - of the object is called. The ``kwargs`` dictionary may be - modified in-place in order to affect what is passed to - ``__init__``. - - :param target: the mapped instance. If - the event is configured with ``raw=True``, this will - instead be the :class:`.InstanceState` state-management - object associated with the instance. - :param args: positional arguments passed to the ``__init__`` method. - This is passed as a tuple and is currently immutable. - :param kwargs: keyword arguments passed to the ``__init__`` method. - This structure *can* be altered in place. - - .. seealso:: - - :meth:`.InstanceEvents.init_failure` - - :meth:`.InstanceEvents.load` - - """ - - def init_failure(self, target, args, kwargs): - """Receive an instance when its constructor has been called, - and raised an exception. - - This method is only called during a userland construction of - an object, in conjunction with the object's constructor, e.g. - its ``__init__`` method. It is not called when an object is loaded - from the database. - - The event is invoked after an exception raised by the ``__init__`` - method is caught. After the event - is invoked, the original exception is re-raised outwards, so that - the construction of the object still raises an exception. The - actual exception and stack trace raised should be present in - ``sys.exc_info()``. - - :param target: the mapped instance. If - the event is configured with ``raw=True``, this will - instead be the :class:`.InstanceState` state-management - object associated with the instance. - :param args: positional arguments that were passed to the ``__init__`` - method. - :param kwargs: keyword arguments that were passed to the ``__init__`` - method. - - .. seealso:: - - :meth:`.InstanceEvents.init` - - :meth:`.InstanceEvents.load` - - """ - - def load(self, target, context): - """Receive an object instance after it has been created via - ``__new__``, and after initial attribute population has - occurred. - - This typically occurs when the instance is created based on - incoming result rows, and is only called once for that - instance's lifetime. - - Note that during a result-row load, this method is called upon - the first row received for this instance. Note that some - attributes and collections may or may not be loaded or even - initialized, depending on what's present in the result rows. - - :param target: the mapped instance. If - the event is configured with ``raw=True``, this will - instead be the :class:`.InstanceState` state-management - object associated with the instance. - :param context: the :class:`.QueryContext` corresponding to the - current :class:`.Query` in progress. This argument may be - ``None`` if the load does not correspond to a :class:`.Query`, - such as during :meth:`.Session.merge`. - - .. seealso:: - - :meth:`.InstanceEvents.init` - - :meth:`.InstanceEvents.refresh` - - """ - - def refresh(self, target, context, attrs): - """Receive an object instance after one or more attributes have - been refreshed from a query. - - Contrast this to the :meth:`.InstanceEvents.load` method, which - is invoked when the object is first loaded from a query. - - :param target: the mapped instance. If - the event is configured with ``raw=True``, this will - instead be the :class:`.InstanceState` state-management - object associated with the instance. - :param context: the :class:`.QueryContext` corresponding to the - current :class:`.Query` in progress. - :param attrs: sequence of attribute names which - were populated, or None if all column-mapped, non-deferred - attributes were populated. - - .. seealso:: - - :meth:`.InstanceEvents.load` - - """ - - def refresh_flush(self, target, flush_context, attrs): - """Receive an object instance after one or more attributes have - been refreshed within the persistence of the object. - - This event is the same as :meth:`.InstanceEvents.refresh` except - it is invoked within the unit of work flush process, and the values - here typically come from the process of handling an INSERT or - UPDATE, such as via the RETURNING clause or from Python-side default - values. - - .. versionadded:: 1.0.5 - - :param target: the mapped instance. If - the event is configured with ``raw=True``, this will - instead be the :class:`.InstanceState` state-management - object associated with the instance. - :param flush_context: Internal :class:`.UOWTransaction` object - which handles the details of the flush. - :param attrs: sequence of attribute names which - were populated. - - """ - - def expire(self, target, attrs): - """Receive an object instance after its attributes or some subset - have been expired. - - 'keys' is a list of attribute names. If None, the entire - state was expired. - - :param target: the mapped instance. If - the event is configured with ``raw=True``, this will - instead be the :class:`.InstanceState` state-management - object associated with the instance. - :param attrs: sequence of attribute - names which were expired, or None if all attributes were - expired. - - """ - - def pickle(self, target, state_dict): - """Receive an object instance when its associated state is - being pickled. - - :param target: the mapped instance. If - the event is configured with ``raw=True``, this will - instead be the :class:`.InstanceState` state-management - object associated with the instance. - :param state_dict: the dictionary returned by - :class:`.InstanceState.__getstate__`, containing the state - to be pickled. - - """ - - def unpickle(self, target, state_dict): - """Receive an object instance after its associated state has - been unpickled. - - :param target: the mapped instance. If - the event is configured with ``raw=True``, this will - instead be the :class:`.InstanceState` state-management - object associated with the instance. - :param state_dict: the dictionary sent to - :class:`.InstanceState.__setstate__`, containing the state - dictionary which was pickled. - - """ - - -class _EventsHold(event.RefCollection): - """Hold onto listeners against unmapped, uninstrumented classes. - - Establish _listen() for that class' mapper/instrumentation when - those objects are created for that class. - - """ - - def __init__(self, class_): - self.class_ = class_ - - @classmethod - def _clear(cls): - cls.all_holds.clear() - - class HoldEvents(object): - _dispatch_target = None - - @classmethod - def _listen(cls, event_key, raw=False, propagate=False, **kw): - target, identifier, fn = \ - event_key.dispatch_target, event_key.identifier, event_key.fn - - if target.class_ in target.all_holds: - collection = target.all_holds[target.class_] - else: - collection = target.all_holds[target.class_] = {} - - event.registry._stored_in_collection(event_key, target) - collection[event_key._key] = (event_key, raw, propagate) - - if propagate: - stack = list(target.class_.__subclasses__()) - while stack: - subclass = stack.pop(0) - stack.extend(subclass.__subclasses__()) - subject = target.resolve(subclass) - if subject is not None: - # we are already going through __subclasses__() - # so leave generic propagate flag False - event_key.with_dispatch_target(subject).\ - listen(raw=raw, propagate=False, **kw) - - def remove(self, event_key): - target, identifier, fn = \ - event_key.dispatch_target, event_key.identifier, event_key.fn - - if isinstance(target, _EventsHold): - collection = target.all_holds[target.class_] - del collection[event_key._key] - - @classmethod - def populate(cls, class_, subject): - for subclass in class_.__mro__: - if subclass in cls.all_holds: - collection = cls.all_holds[subclass] - for event_key, raw, propagate in collection.values(): - if propagate or subclass is class_: - # since we can't be sure in what order different - # classes in a hierarchy are triggered with - # populate(), we rely upon _EventsHold for all event - # assignment, instead of using the generic propagate - # flag. - event_key.with_dispatch_target(subject).\ - listen(raw=raw, propagate=False) - - -class _InstanceEventsHold(_EventsHold): - all_holds = weakref.WeakKeyDictionary() - - def resolve(self, class_): - return instrumentation.manager_of_class(class_) - - class HoldInstanceEvents(_EventsHold.HoldEvents, InstanceEvents): - pass - - dispatch = event.dispatcher(HoldInstanceEvents) - - -class MapperEvents(event.Events): - """Define events specific to mappings. - - e.g.:: - - from sqlalchemy import event - - def my_before_insert_listener(mapper, connection, target): - # execute a stored procedure upon INSERT, - # apply the value to the row to be inserted - target.calculated_value = connection.scalar( - "select my_special_function(%d)" - % target.special_number) - - # associate the listener function with SomeClass, - # to execute during the "before_insert" hook - event.listen( - SomeClass, 'before_insert', my_before_insert_listener) - - Available targets include: - - * mapped classes - * unmapped superclasses of mapped or to-be-mapped classes - (using the ``propagate=True`` flag) - * :class:`.Mapper` objects - * the :class:`.Mapper` class itself and the :func:`.mapper` - function indicate listening for all mappers. - - .. versionchanged:: 0.8.0 mapper events can be associated with - unmapped superclasses of mapped classes. - - Mapper events provide hooks into critical sections of the - mapper, including those related to object instrumentation, - object loading, and object persistence. In particular, the - persistence methods :meth:`~.MapperEvents.before_insert`, - and :meth:`~.MapperEvents.before_update` are popular - places to augment the state being persisted - however, these - methods operate with several significant restrictions. The - user is encouraged to evaluate the - :meth:`.SessionEvents.before_flush` and - :meth:`.SessionEvents.after_flush` methods as more - flexible and user-friendly hooks in which to apply - additional database state during a flush. - - When using :class:`.MapperEvents`, several modifiers are - available to the :func:`.event.listen` function. - - :param propagate=False: When True, the event listener should - be applied to all inheriting mappers and/or the mappers of - inheriting classes, as well as any - mapper which is the target of this listener. - :param raw=False: When True, the "target" argument passed - to applicable event listener functions will be the - instance's :class:`.InstanceState` management - object, rather than the mapped instance itself. - :param retval=False: when True, the user-defined event function - must have a return value, the purpose of which is either to - control subsequent event propagation, or to otherwise alter - the operation in progress by the mapper. Possible return - values are: - - * ``sqlalchemy.orm.interfaces.EXT_CONTINUE`` - continue event - processing normally. - * ``sqlalchemy.orm.interfaces.EXT_STOP`` - cancel all subsequent - event handlers in the chain. - * other values - the return value specified by specific listeners. - - """ - - _target_class_doc = "SomeClass" - _dispatch_target = mapperlib.Mapper - - @classmethod - def _new_mapper_instance(cls, class_, mapper): - _MapperEventsHold.populate(class_, mapper) - - @classmethod - @util.dependencies("sqlalchemy.orm") - def _accept_with(cls, orm, target): - if target is orm.mapper: - return mapperlib.Mapper - elif isinstance(target, type): - if issubclass(target, mapperlib.Mapper): - return target - else: - mapper = _mapper_or_none(target) - if mapper is not None: - return mapper - else: - return _MapperEventsHold(target) - else: - return target - - @classmethod - def _listen( - cls, event_key, raw=False, retval=False, propagate=False, **kw): - target, identifier, fn = \ - event_key.dispatch_target, event_key.identifier, \ - event_key._listen_fn - - if identifier in ("before_configured", "after_configured") and \ - target is not mapperlib.Mapper: - util.warn( - "'before_configured' and 'after_configured' ORM events " - "only invoke with the mapper() function or Mapper class " - "as the target.") - - if not raw or not retval: - if not raw: - meth = getattr(cls, identifier) - try: - target_index = \ - inspect.getargspec(meth)[0].index('target') - 1 - except ValueError: - target_index = None - - def wrap(*arg, **kw): - if not raw and target_index is not None: - arg = list(arg) - arg[target_index] = arg[target_index].obj() - if not retval: - fn(*arg, **kw) - return interfaces.EXT_CONTINUE - else: - return fn(*arg, **kw) - event_key = event_key.with_wrapper(wrap) - - if propagate: - for mapper in target.self_and_descendants: - event_key.with_dispatch_target(mapper).base_listen( - propagate=True, **kw) - else: - event_key.base_listen(**kw) - - @classmethod - def _clear(cls): - super(MapperEvents, cls)._clear() - _MapperEventsHold._clear() - - def instrument_class(self, mapper, class_): - """Receive a class when the mapper is first constructed, - before instrumentation is applied to the mapped class. - - This event is the earliest phase of mapper construction. - Most attributes of the mapper are not yet initialized. - - This listener can either be applied to the :class:`.Mapper` - class overall, or to any un-mapped class which serves as a base - for classes that will be mapped (using the ``propagate=True`` flag):: - - Base = declarative_base() - - @event.listens_for(Base, "instrument_class", propagate=True) - def on_new_class(mapper, cls_): - " ... " - - :param mapper: the :class:`.Mapper` which is the target - of this event. - :param class\_: the mapped class. - - """ - - def mapper_configured(self, mapper, class_): - """Called when a specific mapper has completed its own configuration - within the scope of the :func:`.configure_mappers` call. - - The :meth:`.MapperEvents.mapper_configured` event is invoked - for each mapper that is encountered when the - :func:`.orm.configure_mappers` function proceeds through the current - list of not-yet-configured mappers. - :func:`.orm.configure_mappers` is typically invoked - automatically as mappings are first used, as well as each time - new mappers have been made available and new mapper use is - detected. - - When the event is called, the mapper should be in its final - state, but **not including backrefs** that may be invoked from - other mappers; they might still be pending within the - configuration operation. Bidirectional relationships that - are instead configured via the - :paramref:`.orm.relationship.back_populates` argument - *will* be fully available, since this style of relationship does not - rely upon other possibly-not-configured mappers to know that they - exist. - - For an event that is guaranteed to have **all** mappers ready - to go including backrefs that are defined only on other - mappings, use the :meth:`.MapperEvents.after_configured` - event; this event invokes only after all known mappings have been - fully configured. - - The :meth:`.MapperEvents.mapper_configured` event, unlike - :meth:`.MapperEvents.before_configured` or - :meth:`.MapperEvents.after_configured`, - is called for each mapper/class individually, and the mapper is - passed to the event itself. It also is called exactly once for - a particular mapper. The event is therefore useful for - configurational steps that benefit from being invoked just once - on a specific mapper basis, which don't require that "backref" - configurations are necessarily ready yet. - - :param mapper: the :class:`.Mapper` which is the target - of this event. - :param class\_: the mapped class. - - .. seealso:: - - :meth:`.MapperEvents.before_configured` - - :meth:`.MapperEvents.after_configured` - - """ - # TODO: need coverage for this event - - def before_configured(self): - """Called before a series of mappers have been configured. - - The :meth:`.MapperEvents.before_configured` event is invoked - each time the :func:`.orm.configure_mappers` function is - invoked, before the function has done any of its work. - :func:`.orm.configure_mappers` is typically invoked - automatically as mappings are first used, as well as each time - new mappers have been made available and new mapper use is - detected. - - This event can **only** be applied to the :class:`.Mapper` class - or :func:`.mapper` function, and not to individual mappings or - mapped classes. It is only invoked for all mappings as a whole:: - - from sqlalchemy.orm import mapper - - @event.listens_for(mapper, "before_configured") - def go(): - # ... - - Constrast this event to :meth:`.MapperEvents.after_configured`, - which is invoked after the series of mappers has been configured, - as well as :meth:`.MapperEvents.mapper_configured`, which is invoked - on a per-mapper basis as each one is configured to the extent possible. - - Theoretically this event is called once per - application, but is actually called any time new mappers - are to be affected by a :func:`.orm.configure_mappers` - call. If new mappings are constructed after existing ones have - already been used, this event will likely be called again. To ensure - that a particular event is only called once and no further, the - ``once=True`` argument (new in 0.9.4) can be applied:: - - from sqlalchemy.orm import mapper - - @event.listens_for(mapper, "before_configured", once=True) - def go(): - # ... - - - .. versionadded:: 0.9.3 - - - .. seealso:: - - :meth:`.MapperEvents.mapper_configured` - - :meth:`.MapperEvents.after_configured` - - """ - - def after_configured(self): - """Called after a series of mappers have been configured. - - The :meth:`.MapperEvents.after_configured` event is invoked - each time the :func:`.orm.configure_mappers` function is - invoked, after the function has completed its work. - :func:`.orm.configure_mappers` is typically invoked - automatically as mappings are first used, as well as each time - new mappers have been made available and new mapper use is - detected. - - Contrast this event to the :meth:`.MapperEvents.mapper_configured` - event, which is called on a per-mapper basis while the configuration - operation proceeds; unlike that event, when this event is invoked, - all cross-configurations (e.g. backrefs) will also have been made - available for any mappers that were pending. - Also constrast to :meth:`.MapperEvents.before_configured`, - which is invoked before the series of mappers has been configured. - - This event can **only** be applied to the :class:`.Mapper` class - or :func:`.mapper` function, and not to individual mappings or - mapped classes. It is only invoked for all mappings as a whole:: - - from sqlalchemy.orm import mapper - - @event.listens_for(mapper, "after_configured") - def go(): - # ... - - Theoretically this event is called once per - application, but is actually called any time new mappers - have been affected by a :func:`.orm.configure_mappers` - call. If new mappings are constructed after existing ones have - already been used, this event will likely be called again. To ensure - that a particular event is only called once and no further, the - ``once=True`` argument (new in 0.9.4) can be applied:: - - from sqlalchemy.orm import mapper - - @event.listens_for(mapper, "after_configured", once=True) - def go(): - # ... - - .. seealso:: - - :meth:`.MapperEvents.mapper_configured` - - :meth:`.MapperEvents.before_configured` - - """ - - def before_insert(self, mapper, connection, target): - """Receive an object instance before an INSERT statement - is emitted corresponding to that instance. - - This event is used to modify local, non-object related - attributes on the instance before an INSERT occurs, as well - as to emit additional SQL statements on the given - connection. - - The event is often called for a batch of objects of the - same class before their INSERT statements are emitted at - once in a later step. In the extremely rare case that - this is not desirable, the :func:`.mapper` can be - configured with ``batch=False``, which will cause - batches of instances to be broken up into individual - (and more poorly performing) event->persist->event - steps. - - .. warning:: - - Mapper-level flush events only allow **very limited operations**, - on attributes local to the row being operated upon only, - as well as allowing any SQL to be emitted on the given - :class:`.Connection`. **Please read fully** the notes - at :ref:`session_persistence_mapper` for guidelines on using - these methods; generally, the :meth:`.SessionEvents.before_flush` - method should be preferred for general on-flush changes. - - :param mapper: the :class:`.Mapper` which is the target - of this event. - :param connection: the :class:`.Connection` being used to - emit INSERT statements for this instance. This - provides a handle into the current transaction on the - target database specific to this instance. - :param target: the mapped instance being persisted. If - the event is configured with ``raw=True``, this will - instead be the :class:`.InstanceState` state-management - object associated with the instance. - :return: No return value is supported by this event. - - .. seealso:: - - :ref:`session_persistence_events` - - """ - - def after_insert(self, mapper, connection, target): - """Receive an object instance after an INSERT statement - is emitted corresponding to that instance. - - This event is used to modify in-Python-only - state on the instance after an INSERT occurs, as well - as to emit additional SQL statements on the given - connection. - - The event is often called for a batch of objects of the - same class after their INSERT statements have been - emitted at once in a previous step. In the extremely - rare case that this is not desirable, the - :func:`.mapper` can be configured with ``batch=False``, - which will cause batches of instances to be broken up - into individual (and more poorly performing) - event->persist->event steps. - - .. warning:: - - Mapper-level flush events only allow **very limited operations**, - on attributes local to the row being operated upon only, - as well as allowing any SQL to be emitted on the given - :class:`.Connection`. **Please read fully** the notes - at :ref:`session_persistence_mapper` for guidelines on using - these methods; generally, the :meth:`.SessionEvents.before_flush` - method should be preferred for general on-flush changes. - - :param mapper: the :class:`.Mapper` which is the target - of this event. - :param connection: the :class:`.Connection` being used to - emit INSERT statements for this instance. This - provides a handle into the current transaction on the - target database specific to this instance. - :param target: the mapped instance being persisted. If - the event is configured with ``raw=True``, this will - instead be the :class:`.InstanceState` state-management - object associated with the instance. - :return: No return value is supported by this event. - - .. seealso:: - - :ref:`session_persistence_events` - - """ - - def before_update(self, mapper, connection, target): - """Receive an object instance before an UPDATE statement - is emitted corresponding to that instance. - - This event is used to modify local, non-object related - attributes on the instance before an UPDATE occurs, as well - as to emit additional SQL statements on the given - connection. - - This method is called for all instances that are - marked as "dirty", *even those which have no net changes - to their column-based attributes*. An object is marked - as dirty when any of its column-based attributes have a - "set attribute" operation called or when any of its - collections are modified. If, at update time, no - column-based attributes have any net changes, no UPDATE - statement will be issued. This means that an instance - being sent to :meth:`~.MapperEvents.before_update` is - *not* a guarantee that an UPDATE statement will be - issued, although you can affect the outcome here by - modifying attributes so that a net change in value does - exist. - - To detect if the column-based attributes on the object have net - changes, and will therefore generate an UPDATE statement, use - ``object_session(instance).is_modified(instance, - include_collections=False)``. - - The event is often called for a batch of objects of the - same class before their UPDATE statements are emitted at - once in a later step. In the extremely rare case that - this is not desirable, the :func:`.mapper` can be - configured with ``batch=False``, which will cause - batches of instances to be broken up into individual - (and more poorly performing) event->persist->event - steps. - - .. warning:: - - Mapper-level flush events only allow **very limited operations**, - on attributes local to the row being operated upon only, - as well as allowing any SQL to be emitted on the given - :class:`.Connection`. **Please read fully** the notes - at :ref:`session_persistence_mapper` for guidelines on using - these methods; generally, the :meth:`.SessionEvents.before_flush` - method should be preferred for general on-flush changes. - - :param mapper: the :class:`.Mapper` which is the target - of this event. - :param connection: the :class:`.Connection` being used to - emit UPDATE statements for this instance. This - provides a handle into the current transaction on the - target database specific to this instance. - :param target: the mapped instance being persisted. If - the event is configured with ``raw=True``, this will - instead be the :class:`.InstanceState` state-management - object associated with the instance. - :return: No return value is supported by this event. - - .. seealso:: - - :ref:`session_persistence_events` - - """ - - def after_update(self, mapper, connection, target): - """Receive an object instance after an UPDATE statement - is emitted corresponding to that instance. - - This event is used to modify in-Python-only - state on the instance after an UPDATE occurs, as well - as to emit additional SQL statements on the given - connection. - - This method is called for all instances that are - marked as "dirty", *even those which have no net changes - to their column-based attributes*, and for which - no UPDATE statement has proceeded. An object is marked - as dirty when any of its column-based attributes have a - "set attribute" operation called or when any of its - collections are modified. If, at update time, no - column-based attributes have any net changes, no UPDATE - statement will be issued. This means that an instance - being sent to :meth:`~.MapperEvents.after_update` is - *not* a guarantee that an UPDATE statement has been - issued. - - To detect if the column-based attributes on the object have net - changes, and therefore resulted in an UPDATE statement, use - ``object_session(instance).is_modified(instance, - include_collections=False)``. - - The event is often called for a batch of objects of the - same class after their UPDATE statements have been emitted at - once in a previous step. In the extremely rare case that - this is not desirable, the :func:`.mapper` can be - configured with ``batch=False``, which will cause - batches of instances to be broken up into individual - (and more poorly performing) event->persist->event - steps. - - .. warning:: - - Mapper-level flush events only allow **very limited operations**, - on attributes local to the row being operated upon only, - as well as allowing any SQL to be emitted on the given - :class:`.Connection`. **Please read fully** the notes - at :ref:`session_persistence_mapper` for guidelines on using - these methods; generally, the :meth:`.SessionEvents.before_flush` - method should be preferred for general on-flush changes. - - :param mapper: the :class:`.Mapper` which is the target - of this event. - :param connection: the :class:`.Connection` being used to - emit UPDATE statements for this instance. This - provides a handle into the current transaction on the - target database specific to this instance. - :param target: the mapped instance being persisted. If - the event is configured with ``raw=True``, this will - instead be the :class:`.InstanceState` state-management - object associated with the instance. - :return: No return value is supported by this event. - - .. seealso:: - - :ref:`session_persistence_events` - - """ - - def before_delete(self, mapper, connection, target): - """Receive an object instance before a DELETE statement - is emitted corresponding to that instance. - - This event is used to emit additional SQL statements on - the given connection as well as to perform application - specific bookkeeping related to a deletion event. - - The event is often called for a batch of objects of the - same class before their DELETE statements are emitted at - once in a later step. - - .. warning:: - - Mapper-level flush events only allow **very limited operations**, - on attributes local to the row being operated upon only, - as well as allowing any SQL to be emitted on the given - :class:`.Connection`. **Please read fully** the notes - at :ref:`session_persistence_mapper` for guidelines on using - these methods; generally, the :meth:`.SessionEvents.before_flush` - method should be preferred for general on-flush changes. - - :param mapper: the :class:`.Mapper` which is the target - of this event. - :param connection: the :class:`.Connection` being used to - emit DELETE statements for this instance. This - provides a handle into the current transaction on the - target database specific to this instance. - :param target: the mapped instance being deleted. If - the event is configured with ``raw=True``, this will - instead be the :class:`.InstanceState` state-management - object associated with the instance. - :return: No return value is supported by this event. - - .. seealso:: - - :ref:`session_persistence_events` - - """ - - def after_delete(self, mapper, connection, target): - """Receive an object instance after a DELETE statement - has been emitted corresponding to that instance. - - This event is used to emit additional SQL statements on - the given connection as well as to perform application - specific bookkeeping related to a deletion event. - - The event is often called for a batch of objects of the - same class after their DELETE statements have been emitted at - once in a previous step. - - .. warning:: - - Mapper-level flush events only allow **very limited operations**, - on attributes local to the row being operated upon only, - as well as allowing any SQL to be emitted on the given - :class:`.Connection`. **Please read fully** the notes - at :ref:`session_persistence_mapper` for guidelines on using - these methods; generally, the :meth:`.SessionEvents.before_flush` - method should be preferred for general on-flush changes. - - :param mapper: the :class:`.Mapper` which is the target - of this event. - :param connection: the :class:`.Connection` being used to - emit DELETE statements for this instance. This - provides a handle into the current transaction on the - target database specific to this instance. - :param target: the mapped instance being deleted. If - the event is configured with ``raw=True``, this will - instead be the :class:`.InstanceState` state-management - object associated with the instance. - :return: No return value is supported by this event. - - .. seealso:: - - :ref:`session_persistence_events` - - """ - - -class _MapperEventsHold(_EventsHold): - all_holds = weakref.WeakKeyDictionary() - - def resolve(self, class_): - return _mapper_or_none(class_) - - class HoldMapperEvents(_EventsHold.HoldEvents, MapperEvents): - pass - - dispatch = event.dispatcher(HoldMapperEvents) - - -class SessionEvents(event.Events): - """Define events specific to :class:`.Session` lifecycle. - - e.g.:: - - from sqlalchemy import event - from sqlalchemy.orm import sessionmaker - - def my_before_commit(session): - print "before commit!" - - Session = sessionmaker() - - event.listen(Session, "before_commit", my_before_commit) - - The :func:`~.event.listen` function will accept - :class:`.Session` objects as well as the return result - of :class:`~.sessionmaker()` and :class:`~.scoped_session()`. - - Additionally, it accepts the :class:`.Session` class which - will apply listeners to all :class:`.Session` instances - globally. - - """ - - _target_class_doc = "SomeSessionOrFactory" - - _dispatch_target = Session - - @classmethod - def _accept_with(cls, target): - if isinstance(target, scoped_session): - - target = target.session_factory - if not isinstance(target, sessionmaker) and \ - ( - not isinstance(target, type) or - not issubclass(target, Session) - ): - raise exc.ArgumentError( - "Session event listen on a scoped_session " - "requires that its creation callable " - "is associated with the Session class.") - - if isinstance(target, sessionmaker): - return target.class_ - elif isinstance(target, type): - if issubclass(target, scoped_session): - return Session - elif issubclass(target, Session): - return target - elif isinstance(target, Session): - return target - else: - return None - - def after_transaction_create(self, session, transaction): - """Execute when a new :class:`.SessionTransaction` is created. - - This event differs from :meth:`~.SessionEvents.after_begin` - in that it occurs for each :class:`.SessionTransaction` - overall, as opposed to when transactions are begun - on individual database connections. It is also invoked - for nested transactions and subtransactions, and is always - matched by a corresponding - :meth:`~.SessionEvents.after_transaction_end` event - (assuming normal operation of the :class:`.Session`). - - :param session: the target :class:`.Session`. - :param transaction: the target :class:`.SessionTransaction`. - - .. versionadded:: 0.8 - - .. seealso:: - - :meth:`~.SessionEvents.after_transaction_end` - - """ - - def after_transaction_end(self, session, transaction): - """Execute when the span of a :class:`.SessionTransaction` ends. - - This event differs from :meth:`~.SessionEvents.after_commit` - in that it corresponds to all :class:`.SessionTransaction` - objects in use, including those for nested transactions - and subtransactions, and is always matched by a corresponding - :meth:`~.SessionEvents.after_transaction_create` event. - - :param session: the target :class:`.Session`. - :param transaction: the target :class:`.SessionTransaction`. - - .. versionadded:: 0.8 - - .. seealso:: - - :meth:`~.SessionEvents.after_transaction_create` - - """ - - def before_commit(self, session): - """Execute before commit is called. - - .. note:: - - The :meth:`~.SessionEvents.before_commit` hook is *not* per-flush, - that is, the :class:`.Session` can emit SQL to the database - many times within the scope of a transaction. - For interception of these events, use the - :meth:`~.SessionEvents.before_flush`, - :meth:`~.SessionEvents.after_flush`, or - :meth:`~.SessionEvents.after_flush_postexec` - events. - - :param session: The target :class:`.Session`. - - .. seealso:: - - :meth:`~.SessionEvents.after_commit` - - :meth:`~.SessionEvents.after_begin` - - :meth:`~.SessionEvents.after_transaction_create` - - :meth:`~.SessionEvents.after_transaction_end` - - """ - - def after_commit(self, session): - """Execute after a commit has occurred. - - .. note:: - - The :meth:`~.SessionEvents.after_commit` hook is *not* per-flush, - that is, the :class:`.Session` can emit SQL to the database - many times within the scope of a transaction. - For interception of these events, use the - :meth:`~.SessionEvents.before_flush`, - :meth:`~.SessionEvents.after_flush`, or - :meth:`~.SessionEvents.after_flush_postexec` - events. - - .. note:: - - The :class:`.Session` is not in an active transaction - when the :meth:`~.SessionEvents.after_commit` event is invoked, - and therefore can not emit SQL. To emit SQL corresponding to - every transaction, use the :meth:`~.SessionEvents.before_commit` - event. - - :param session: The target :class:`.Session`. - - .. seealso:: - - :meth:`~.SessionEvents.before_commit` - - :meth:`~.SessionEvents.after_begin` - - :meth:`~.SessionEvents.after_transaction_create` - - :meth:`~.SessionEvents.after_transaction_end` - - """ - - def after_rollback(self, session): - """Execute after a real DBAPI rollback has occurred. - - Note that this event only fires when the *actual* rollback against - the database occurs - it does *not* fire each time the - :meth:`.Session.rollback` method is called, if the underlying - DBAPI transaction has already been rolled back. In many - cases, the :class:`.Session` will not be in - an "active" state during this event, as the current - transaction is not valid. To acquire a :class:`.Session` - which is active after the outermost rollback has proceeded, - use the :meth:`.SessionEvents.after_soft_rollback` event, checking the - :attr:`.Session.is_active` flag. - - :param session: The target :class:`.Session`. - - """ - - def after_soft_rollback(self, session, previous_transaction): - """Execute after any rollback has occurred, including "soft" - rollbacks that don't actually emit at the DBAPI level. - - This corresponds to both nested and outer rollbacks, i.e. - the innermost rollback that calls the DBAPI's - rollback() method, as well as the enclosing rollback - calls that only pop themselves from the transaction stack. - - The given :class:`.Session` can be used to invoke SQL and - :meth:`.Session.query` operations after an outermost rollback - by first checking the :attr:`.Session.is_active` flag:: - - @event.listens_for(Session, "after_soft_rollback") - def do_something(session, previous_transaction): - if session.is_active: - session.execute("select * from some_table") - - :param session: The target :class:`.Session`. - :param previous_transaction: The :class:`.SessionTransaction` - transactional marker object which was just closed. The current - :class:`.SessionTransaction` for the given :class:`.Session` is - available via the :attr:`.Session.transaction` attribute. - - .. versionadded:: 0.7.3 - - """ - - def before_flush(self, session, flush_context, instances): - """Execute before flush process has started. - - :param session: The target :class:`.Session`. - :param flush_context: Internal :class:`.UOWTransaction` object - which handles the details of the flush. - :param instances: Usually ``None``, this is the collection of - objects which can be passed to the :meth:`.Session.flush` method - (note this usage is deprecated). - - .. seealso:: - - :meth:`~.SessionEvents.after_flush` - - :meth:`~.SessionEvents.after_flush_postexec` - - :ref:`session_persistence_events` - - """ - - def after_flush(self, session, flush_context): - """Execute after flush has completed, but before commit has been - called. - - Note that the session's state is still in pre-flush, i.e. 'new', - 'dirty', and 'deleted' lists still show pre-flush state as well - as the history settings on instance attributes. - - :param session: The target :class:`.Session`. - :param flush_context: Internal :class:`.UOWTransaction` object - which handles the details of the flush. - - .. seealso:: - - :meth:`~.SessionEvents.before_flush` - - :meth:`~.SessionEvents.after_flush_postexec` - - :ref:`session_persistence_events` - - """ - - def after_flush_postexec(self, session, flush_context): - """Execute after flush has completed, and after the post-exec - state occurs. - - This will be when the 'new', 'dirty', and 'deleted' lists are in - their final state. An actual commit() may or may not have - occurred, depending on whether or not the flush started its own - transaction or participated in a larger transaction. - - :param session: The target :class:`.Session`. - :param flush_context: Internal :class:`.UOWTransaction` object - which handles the details of the flush. - - - .. seealso:: - - :meth:`~.SessionEvents.before_flush` - - :meth:`~.SessionEvents.after_flush` - - :ref:`session_persistence_events` - - """ - - def after_begin(self, session, transaction, connection): - """Execute after a transaction is begun on a connection - - :param session: The target :class:`.Session`. - :param transaction: The :class:`.SessionTransaction`. - :param connection: The :class:`~.engine.Connection` object - which will be used for SQL statements. - - .. seealso:: - - :meth:`~.SessionEvents.before_commit` - - :meth:`~.SessionEvents.after_commit` - - :meth:`~.SessionEvents.after_transaction_create` - - :meth:`~.SessionEvents.after_transaction_end` - - """ - - def before_attach(self, session, instance): - """Execute before an instance is attached to a session. - - This is called before an add, delete or merge causes - the object to be part of the session. - - .. versionadded:: 0.8. Note that :meth:`~.SessionEvents.after_attach` - now fires off after the item is part of the session. - :meth:`.before_attach` is provided for those cases where - the item should not yet be part of the session state. - - .. seealso:: - - :meth:`~.SessionEvents.after_attach` - - :ref:`session_lifecycle_events` - - """ - - def after_attach(self, session, instance): - """Execute after an instance is attached to a session. - - This is called after an add, delete or merge. - - .. note:: - - As of 0.8, this event fires off *after* the item - has been fully associated with the session, which is - different than previous releases. For event - handlers that require the object not yet - be part of session state (such as handlers which - may autoflush while the target object is not - yet complete) consider the - new :meth:`.before_attach` event. - - .. seealso:: - - :meth:`~.SessionEvents.before_attach` - - :ref:`session_lifecycle_events` - - """ - - @event._legacy_signature("0.9", - ["session", "query", "query_context", "result"], - lambda update_context: ( - update_context.session, - update_context.query, - update_context.context, - update_context.result)) - def after_bulk_update(self, update_context): - """Execute after a bulk update operation to the session. - - This is called as a result of the :meth:`.Query.update` method. - - :param update_context: an "update context" object which contains - details about the update, including these attributes: - - * ``session`` - the :class:`.Session` involved - * ``query`` -the :class:`.Query` object that this update operation - was called upon. - * ``context`` The :class:`.QueryContext` object, corresponding - to the invocation of an ORM query. - * ``result`` the :class:`.ResultProxy` returned as a result of the - bulk UPDATE operation. - - - """ - - @event._legacy_signature("0.9", - ["session", "query", "query_context", "result"], - lambda delete_context: ( - delete_context.session, - delete_context.query, - delete_context.context, - delete_context.result)) - def after_bulk_delete(self, delete_context): - """Execute after a bulk delete operation to the session. - - This is called as a result of the :meth:`.Query.delete` method. - - :param delete_context: a "delete context" object which contains - details about the update, including these attributes: - - * ``session`` - the :class:`.Session` involved - * ``query`` -the :class:`.Query` object that this update operation - was called upon. - * ``context`` The :class:`.QueryContext` object, corresponding - to the invocation of an ORM query. - * ``result`` the :class:`.ResultProxy` returned as a result of the - bulk DELETE operation. - - - """ - - -class AttributeEvents(event.Events): - """Define events for object attributes. - - These are typically defined on the class-bound descriptor for the - target class. - - e.g.:: - - from sqlalchemy import event - - def my_append_listener(target, value, initiator): - print "received append event for target: %s" % target - - event.listen(MyClass.collection, 'append', my_append_listener) - - Listeners have the option to return a possibly modified version - of the value, when the ``retval=True`` flag is passed - to :func:`~.event.listen`:: - - def validate_phone(target, value, oldvalue, initiator): - "Strip non-numeric characters from a phone number" - - return re.sub(r'(?![0-9])', '', value) - - # setup listener on UserContact.phone attribute, instructing - # it to use the return value - listen(UserContact.phone, 'set', validate_phone, retval=True) - - A validation function like the above can also raise an exception - such as :exc:`ValueError` to halt the operation. - - Several modifiers are available to the :func:`~.event.listen` function. - - :param active_history=False: When True, indicates that the - "set" event would like to receive the "old" value being - replaced unconditionally, even if this requires firing off - database loads. Note that ``active_history`` can also be - set directly via :func:`.column_property` and - :func:`.relationship`. - - :param propagate=False: When True, the listener function will - be established not just for the class attribute given, but - for attributes of the same name on all current subclasses - of that class, as well as all future subclasses of that - class, using an additional listener that listens for - instrumentation events. - :param raw=False: When True, the "target" argument to the - event will be the :class:`.InstanceState` management - object, rather than the mapped instance itself. - :param retval=False: when True, the user-defined event - listening must return the "value" argument from the - function. This gives the listening function the opportunity - to change the value that is ultimately used for a "set" - or "append" event. - - """ - - _target_class_doc = "SomeClass.some_attribute" - _dispatch_target = QueryableAttribute - - @staticmethod - def _set_dispatch(cls, dispatch_cls): - dispatch = event.Events._set_dispatch(cls, dispatch_cls) - dispatch_cls._active_history = False - return dispatch - - @classmethod - def _accept_with(cls, target): - # TODO: coverage - if isinstance(target, interfaces.MapperProperty): - return getattr(target.parent.class_, target.key) - else: - return target - - @classmethod - def _listen(cls, event_key, active_history=False, - raw=False, retval=False, - propagate=False): - - target, identifier, fn = \ - event_key.dispatch_target, event_key.identifier, \ - event_key._listen_fn - - if active_history: - target.dispatch._active_history = True - - if not raw or not retval: - def wrap(target, value, *arg): - if not raw: - target = target.obj() - if not retval: - fn(target, value, *arg) - return value - else: - return fn(target, value, *arg) - event_key = event_key.with_wrapper(wrap) - - event_key.base_listen(propagate=propagate) - - if propagate: - manager = instrumentation.manager_of_class(target.class_) - - for mgr in manager.subclass_managers(True): - event_key.with_dispatch_target( - mgr[target.key]).base_listen(propagate=True) - - def append(self, target, value, initiator): - """Receive a collection append event. - - :param target: the object instance receiving the event. - If the listener is registered with ``raw=True``, this will - be the :class:`.InstanceState` object. - :param value: the value being appended. If this listener - is registered with ``retval=True``, the listener - function must return this value, or a new value which - replaces it. - :param initiator: An instance of :class:`.attributes.Event` - representing the initiation of the event. May be modified - from its original value by backref handlers in order to control - chained event propagation. - - .. versionchanged:: 0.9.0 the ``initiator`` argument is now - passed as a :class:`.attributes.Event` object, and may be - modified by backref handlers within a chain of backref-linked - events. - - :return: if the event was registered with ``retval=True``, - the given value, or a new effective value, should be returned. - - """ - - def remove(self, target, value, initiator): - """Receive a collection remove event. - - :param target: the object instance receiving the event. - If the listener is registered with ``raw=True``, this will - be the :class:`.InstanceState` object. - :param value: the value being removed. - :param initiator: An instance of :class:`.attributes.Event` - representing the initiation of the event. May be modified - from its original value by backref handlers in order to control - chained event propagation. - - .. versionchanged:: 0.9.0 the ``initiator`` argument is now - passed as a :class:`.attributes.Event` object, and may be - modified by backref handlers within a chain of backref-linked - events. - - :return: No return value is defined for this event. - """ - - def set(self, target, value, oldvalue, initiator): - """Receive a scalar set event. - - :param target: the object instance receiving the event. - If the listener is registered with ``raw=True``, this will - be the :class:`.InstanceState` object. - :param value: the value being set. If this listener - is registered with ``retval=True``, the listener - function must return this value, or a new value which - replaces it. - :param oldvalue: the previous value being replaced. This - may also be the symbol ``NEVER_SET`` or ``NO_VALUE``. - If the listener is registered with ``active_history=True``, - the previous value of the attribute will be loaded from - the database if the existing value is currently unloaded - or expired. - :param initiator: An instance of :class:`.attributes.Event` - representing the initiation of the event. May be modified - from its original value by backref handlers in order to control - chained event propagation. - - .. versionchanged:: 0.9.0 the ``initiator`` argument is now - passed as a :class:`.attributes.Event` object, and may be - modified by backref handlers within a chain of backref-linked - events. - - :return: if the event was registered with ``retval=True``, - the given value, or a new effective value, should be returned. - - """ - - def init_collection(self, target, collection, collection_adapter): - """Receive a 'collection init' event. - - This event is triggered for a collection-based attribute, when - the initial "empty collection" is first generated for a blank - attribute, as well as for when the collection is replaced with - a new one, such as via a set event. - - E.g., given that ``User.addresses`` is a relationship-based - collection, the event is triggered here:: - - u1 = User() - u1.addresses.append(a1) # <- new collection - - and also during replace operations:: - - u1.addresess = [a2, a3] # <- new collection - - :param target: the object instance receiving the event. - If the listener is registered with ``raw=True``, this will - be the :class:`.InstanceState` object. - :param collection: the new collection. This will always be generated - from what was specified as - :paramref:`.RelationshipProperty.collection_class`, and will always - be empty. - :param collection_adpater: the :class:`.CollectionAdapter` that will - mediate internal access to the collection. - - .. versionadded:: 1.0.0 the :meth:`.AttributeEvents.init_collection` - and :meth:`.AttributeEvents.dispose_collection` events supersede - the :class:`.collection.linker` hook. - - """ - - def dispose_collection(self, target, collection, collection_adpater): - """Receive a 'collection dispose' event. - - This event is triggered for a collection-based attribute when - a collection is replaced, that is:: - - u1.addresses.append(a1) - - u1.addresses = [a2, a3] # <- old collection is disposed - - The mechanics of the event will typically include that the given - collection is empty, even if it stored objects while being replaced. - - .. versionadded:: 1.0.0 the :meth:`.AttributeEvents.init_collection` - and :meth:`.AttributeEvents.dispose_collection` events supersede - the :class:`.collection.linker` hook. - - """ - - -class QueryEvents(event.Events): - """Represent events within the construction of a :class:`.Query` object. - - The events here are intended to be used with an as-yet-unreleased - inspection system for :class:`.Query`. Some very basic operations - are possible now, however the inspection system is intended to allow - complex query manipulations to be automated. - - .. versionadded:: 1.0.0 - - """ - - _target_class_doc = "SomeQuery" - _dispatch_target = Query - - def before_compile(self, query): - """Receive the :class:`.Query` object before it is composed into a - core :class:`.Select` object. - - This event is intended to allow changes to the query given:: - - @event.listens_for(Query, "before_compile", retval=True) - def no_deleted(query): - for desc in query.column_descriptions: - if desc['type'] is User: - entity = desc['expr'] - query = query.filter(entity.deleted == False) - return query - - The event should normally be listened with the ``retval=True`` - parameter set, so that the modified query may be returned. - - - """ - - @classmethod - def _listen( - cls, event_key, retval=False, **kw): - fn = event_key._listen_fn - - if not retval: - def wrap(*arg, **kw): - if not retval: - query = arg[0] - fn(*arg, **kw) - return query - else: - return fn(*arg, **kw) - event_key = event_key.with_wrapper(wrap) - - event_key.base_listen(**kw) diff --git a/python/sqlalchemy/orm/exc.py b/python/sqlalchemy/orm/exc.py deleted file mode 100644 index e010a295..00000000 --- a/python/sqlalchemy/orm/exc.py +++ /dev/null @@ -1,165 +0,0 @@ -# orm/exc.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""SQLAlchemy ORM exceptions.""" -from .. import exc as sa_exc, util - -NO_STATE = (AttributeError, KeyError) -"""Exception types that may be raised by instrumentation implementations.""" - - -class StaleDataError(sa_exc.SQLAlchemyError): - """An operation encountered database state that is unaccounted for. - - Conditions which cause this to happen include: - - * A flush may have attempted to update or delete rows - and an unexpected number of rows were matched during - the UPDATE or DELETE statement. Note that when - version_id_col is used, rows in UPDATE or DELETE statements - are also matched against the current known version - identifier. - - * A mapped object with version_id_col was refreshed, - and the version number coming back from the database does - not match that of the object itself. - - * A object is detached from its parent object, however - the object was previously attached to a different parent - identity which was garbage collected, and a decision - cannot be made if the new parent was really the most - recent "parent". - - .. versionadded:: 0.7.4 - - """ - -ConcurrentModificationError = StaleDataError - - -class FlushError(sa_exc.SQLAlchemyError): - """A invalid condition was detected during flush().""" - - -class UnmappedError(sa_exc.InvalidRequestError): - """Base for exceptions that involve expected mappings not present.""" - - -class ObjectDereferencedError(sa_exc.SQLAlchemyError): - """An operation cannot complete due to an object being garbage - collected. - - """ - - -class DetachedInstanceError(sa_exc.SQLAlchemyError): - """An attempt to access unloaded attributes on a - mapped instance that is detached.""" - - -class UnmappedInstanceError(UnmappedError): - """An mapping operation was requested for an unknown instance.""" - - @util.dependencies("sqlalchemy.orm.base") - def __init__(self, base, obj, msg=None): - if not msg: - try: - base.class_mapper(type(obj)) - name = _safe_cls_name(type(obj)) - msg = ("Class %r is mapped, but this instance lacks " - "instrumentation. This occurs when the instance" - "is created before sqlalchemy.orm.mapper(%s) " - "was called." % (name, name)) - except UnmappedClassError: - msg = _default_unmapped(type(obj)) - if isinstance(obj, type): - msg += ( - '; was a class (%s) supplied where an instance was ' - 'required?' % _safe_cls_name(obj)) - UnmappedError.__init__(self, msg) - - def __reduce__(self): - return self.__class__, (None, self.args[0]) - - -class UnmappedClassError(UnmappedError): - """An mapping operation was requested for an unknown class.""" - - def __init__(self, cls, msg=None): - if not msg: - msg = _default_unmapped(cls) - UnmappedError.__init__(self, msg) - - def __reduce__(self): - return self.__class__, (None, self.args[0]) - - -class ObjectDeletedError(sa_exc.InvalidRequestError): - """A refresh operation failed to retrieve the database - row corresponding to an object's known primary key identity. - - A refresh operation proceeds when an expired attribute is - accessed on an object, or when :meth:`.Query.get` is - used to retrieve an object which is, upon retrieval, detected - as expired. A SELECT is emitted for the target row - based on primary key; if no row is returned, this - exception is raised. - - The true meaning of this exception is simply that - no row exists for the primary key identifier associated - with a persistent object. The row may have been - deleted, or in some cases the primary key updated - to a new value, outside of the ORM's management of the target - object. - - """ - @util.dependencies("sqlalchemy.orm.base") - def __init__(self, base, state, msg=None): - if not msg: - msg = "Instance '%s' has been deleted, or its "\ - "row is otherwise not present." % base.state_str(state) - - sa_exc.InvalidRequestError.__init__(self, msg) - - def __reduce__(self): - return self.__class__, (None, self.args[0]) - - -class UnmappedColumnError(sa_exc.InvalidRequestError): - """Mapping operation was requested on an unknown column.""" - - -class NoResultFound(sa_exc.InvalidRequestError): - """A database result was required but none was found.""" - - -class MultipleResultsFound(sa_exc.InvalidRequestError): - """A single database result was required but more than one were found.""" - - -def _safe_cls_name(cls): - try: - cls_name = '.'.join((cls.__module__, cls.__name__)) - except AttributeError: - cls_name = getattr(cls, '__name__', None) - if cls_name is None: - cls_name = repr(cls) - return cls_name - - -@util.dependencies("sqlalchemy.orm.base") -def _default_unmapped(base, cls): - try: - mappers = base.manager_of_class(cls).mappers - except NO_STATE: - mappers = {} - except TypeError: - mappers = {} - name = _safe_cls_name(cls) - - if not mappers: - return "Class '%s' is not mapped" % name diff --git a/python/sqlalchemy/orm/identity.py b/python/sqlalchemy/orm/identity.py deleted file mode 100644 index b4270385..00000000 --- a/python/sqlalchemy/orm/identity.py +++ /dev/null @@ -1,314 +0,0 @@ -# orm/identity.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -import weakref -from . import attributes -from .. import util - - -class IdentityMap(object): - def __init__(self): - self._dict = {} - self._modified = set() - self._wr = weakref.ref(self) - - def keys(self): - return self._dict.keys() - - def replace(self, state): - raise NotImplementedError() - - def add(self, state): - raise NotImplementedError() - - def _add_unpresent(self, state, key): - """optional inlined form of add() which can assume item isn't present - in the map""" - self.add(state) - - def update(self, dict): - raise NotImplementedError("IdentityMap uses add() to insert data") - - def clear(self): - raise NotImplementedError("IdentityMap uses remove() to remove data") - - def _manage_incoming_state(self, state): - state._instance_dict = self._wr - - if state.modified: - self._modified.add(state) - - def _manage_removed_state(self, state): - del state._instance_dict - if state.modified: - self._modified.discard(state) - - def _dirty_states(self): - return self._modified - - def check_modified(self): - """return True if any InstanceStates present have been marked - as 'modified'. - - """ - return bool(self._modified) - - def has_key(self, key): - return key in self - - def popitem(self): - raise NotImplementedError("IdentityMap uses remove() to remove data") - - def pop(self, key, *args): - raise NotImplementedError("IdentityMap uses remove() to remove data") - - def setdefault(self, key, default=None): - raise NotImplementedError("IdentityMap uses add() to insert data") - - def __len__(self): - return len(self._dict) - - def copy(self): - raise NotImplementedError() - - def __setitem__(self, key, value): - raise NotImplementedError("IdentityMap uses add() to insert data") - - def __delitem__(self, key): - raise NotImplementedError("IdentityMap uses remove() to remove data") - - -class WeakInstanceDict(IdentityMap): - - def __getitem__(self, key): - state = self._dict[key] - o = state.obj() - if o is None: - raise KeyError(key) - return o - - def __contains__(self, key): - try: - if key in self._dict: - state = self._dict[key] - o = state.obj() - else: - return False - except KeyError: - return False - else: - return o is not None - - def contains_state(self, state): - return state.key in self._dict and self._dict[state.key] is state - - def replace(self, state): - if state.key in self._dict: - existing = self._dict[state.key] - if existing is not state: - self._manage_removed_state(existing) - else: - return - - self._dict[state.key] = state - self._manage_incoming_state(state) - - def add(self, state): - key = state.key - # inline of self.__contains__ - if key in self._dict: - try: - existing_state = self._dict[key] - if existing_state is not state: - o = existing_state.obj() - if o is not None: - raise AssertionError( - "A conflicting state is already " - "present in the identity map for key %r" - % (key, )) - else: - return - except KeyError: - pass - self._dict[key] = state - self._manage_incoming_state(state) - - def _add_unpresent(self, state, key): - # inlined form of add() called by loading.py - self._dict[key] = state - state._instance_dict = self._wr - - def get(self, key, default=None): - if key not in self._dict: - return default - state = self._dict[key] - o = state.obj() - if o is None: - return default - return o - - def items(self): - values = self.all_states() - result = [] - for state in values: - value = state.obj() - if value is not None: - result.append((state.key, value)) - return result - - def values(self): - values = self.all_states() - result = [] - for state in values: - value = state.obj() - if value is not None: - result.append(value) - - return result - - def __iter__(self): - return iter(self.keys()) - - if util.py2k: - - def iteritems(self): - return iter(self.items()) - - def itervalues(self): - return iter(self.values()) - - def all_states(self): - if util.py2k: - return self._dict.values() - else: - return list(self._dict.values()) - - def _fast_discard(self, state): - self._dict.pop(state.key, None) - - def discard(self, state): - st = self._dict.pop(state.key, None) - if st: - assert st is state - self._manage_removed_state(state) - - def safe_discard(self, state): - if state.key in self._dict: - st = self._dict[state.key] - if st is state: - self._dict.pop(state.key, None) - self._manage_removed_state(state) - - def prune(self): - return 0 - - -class StrongInstanceDict(IdentityMap): - """A 'strong-referencing' version of the identity map. - - .. deprecated:: this object is present in order to fulfill - the ``weak_identity_map=False`` option of the Session. - This option is present to allow compatibility with older applications, - but it is recommended that strong references to objects - be maintained by the calling application - externally to the :class:`.Session` itself, to the degree - that is needed by the application. - - """ - - if util.py2k: - def itervalues(self): - return self._dict.itervalues() - - def iteritems(self): - return self._dict.iteritems() - - def __iter__(self): - return iter(self.dict_) - - def __getitem__(self, key): - return self._dict[key] - - def __contains__(self, key): - return key in self._dict - - def get(self, key, default=None): - return self._dict.get(key, default) - - def values(self): - return self._dict.values() - - def items(self): - return self._dict.items() - - def all_states(self): - return [attributes.instance_state(o) for o in self.values()] - - def contains_state(self, state): - return ( - state.key in self and - attributes.instance_state(self[state.key]) is state) - - def replace(self, state): - if state.key in self._dict: - existing = self._dict[state.key] - existing = attributes.instance_state(existing) - if existing is not state: - self._manage_removed_state(existing) - else: - return - - self._dict[state.key] = state.obj() - self._manage_incoming_state(state) - - def add(self, state): - if state.key in self: - if attributes.instance_state(self._dict[state.key]) is not state: - raise AssertionError('A conflicting state is already ' - 'present in the identity map for key %r' - % (state.key, )) - else: - self._dict[state.key] = state.obj() - self._manage_incoming_state(state) - - def _add_unpresent(self, state, key): - # inlined form of add() called by loading.py - self._dict[key] = state.obj() - state._instance_dict = self._wr - - def _fast_discard(self, state): - self._dict.pop(state.key, None) - - def discard(self, state): - obj = self._dict.pop(state.key, None) - if obj is not None: - self._manage_removed_state(state) - st = attributes.instance_state(obj) - assert st is state - - def safe_discard(self, state): - if state.key in self._dict: - obj = self._dict[state.key] - st = attributes.instance_state(obj) - if st is state: - self._dict.pop(state.key, None) - self._manage_removed_state(state) - - def prune(self): - """prune unreferenced, non-dirty states.""" - - ref_count = len(self) - dirty = [s.obj() for s in self.all_states() if s.modified] - - # work around http://bugs.python.org/issue6149 - keepers = weakref.WeakValueDictionary() - keepers.update(self) - - self._dict.clear() - self._dict.update(keepers) - self.modified = bool(dirty) - return ref_count - len(self) diff --git a/python/sqlalchemy/orm/instrumentation.py b/python/sqlalchemy/orm/instrumentation.py deleted file mode 100644 index be2fe91c..00000000 --- a/python/sqlalchemy/orm/instrumentation.py +++ /dev/null @@ -1,528 +0,0 @@ -# orm/instrumentation.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Defines SQLAlchemy's system of class instrumentation. - -This module is usually not directly visible to user applications, but -defines a large part of the ORM's interactivity. - -instrumentation.py deals with registration of end-user classes -for state tracking. It interacts closely with state.py -and attributes.py which establish per-instance and per-class-attribute -instrumentation, respectively. - -The class instrumentation system can be customized on a per-class -or global basis using the :mod:`sqlalchemy.ext.instrumentation` -module, which provides the means to build and specify -alternate instrumentation forms. - -.. versionchanged: 0.8 - The instrumentation extension system was moved out of the - ORM and into the external :mod:`sqlalchemy.ext.instrumentation` - package. When that package is imported, it installs - itself within sqlalchemy.orm so that its more comprehensive - resolution mechanics take effect. - -""" - - -from . import exc, collections, interfaces, state -from .. import util -from . import base - - -_memoized_key_collection = util.group_expirable_memoized_property() - - -class ClassManager(dict): - """tracks state information at the class level.""" - - MANAGER_ATTR = base.DEFAULT_MANAGER_ATTR - STATE_ATTR = base.DEFAULT_STATE_ATTR - - _state_setter = staticmethod(util.attrsetter(STATE_ATTR)) - - deferred_scalar_loader = None - - original_init = object.__init__ - - factory = None - - def __init__(self, class_): - self.class_ = class_ - self.info = {} - self.new_init = None - self.local_attrs = {} - self.originals = {} - - self._bases = [mgr for mgr in [ - manager_of_class(base) - for base in self.class_.__bases__ - if isinstance(base, type) - ] if mgr is not None] - - for base in self._bases: - self.update(base) - - self.dispatch._events._new_classmanager_instance(class_, self) - # events._InstanceEventsHold.populate(class_, self) - - for basecls in class_.__mro__: - mgr = manager_of_class(basecls) - if mgr is not None: - self.dispatch._update(mgr.dispatch) - self.manage() - self._instrument_init() - - if '__del__' in class_.__dict__: - util.warn("__del__() method on class %s will " - "cause unreachable cycles and memory leaks, " - "as SQLAlchemy instrumentation often creates " - "reference cycles. Please remove this method." % - class_) - - def __hash__(self): - return id(self) - - def __eq__(self, other): - return other is self - - @property - def is_mapped(self): - return 'mapper' in self.__dict__ - - @_memoized_key_collection - def _all_key_set(self): - return frozenset(self) - - @_memoized_key_collection - def _collection_impl_keys(self): - return frozenset([ - attr.key for attr in self.values() if attr.impl.collection]) - - @_memoized_key_collection - def _scalar_loader_impls(self): - return frozenset([ - attr.impl for attr in - self.values() if attr.impl.accepts_scalar_loader]) - - @util.memoized_property - def mapper(self): - # raises unless self.mapper has been assigned - raise exc.UnmappedClassError(self.class_) - - def _all_sqla_attributes(self, exclude=None): - """return an iterator of all classbound attributes that are - implement :class:`.InspectionAttr`. - - This includes :class:`.QueryableAttribute` as well as extension - types such as :class:`.hybrid_property` and - :class:`.AssociationProxy`. - - """ - if exclude is None: - exclude = set() - for supercls in self.class_.__mro__: - for key in set(supercls.__dict__).difference(exclude): - exclude.add(key) - val = supercls.__dict__[key] - if isinstance(val, interfaces.InspectionAttr): - yield key, val - - def _attr_has_impl(self, key): - """Return True if the given attribute is fully initialized. - - i.e. has an impl. - """ - - return key in self and self[key].impl is not None - - def _subclass_manager(self, cls): - """Create a new ClassManager for a subclass of this ClassManager's - class. - - This is called automatically when attributes are instrumented so that - the attributes can be propagated to subclasses against their own - class-local manager, without the need for mappers etc. to have already - pre-configured managers for the full class hierarchy. Mappers - can post-configure the auto-generated ClassManager when needed. - - """ - manager = manager_of_class(cls) - if manager is None: - manager = _instrumentation_factory.create_manager_for_cls(cls) - return manager - - def _instrument_init(self): - # TODO: self.class_.__init__ is often the already-instrumented - # __init__ from an instrumented superclass. We still need to make - # our own wrapper, but it would - # be nice to wrap the original __init__ and not our existing wrapper - # of such, since this adds method overhead. - self.original_init = self.class_.__init__ - self.new_init = _generate_init(self.class_, self) - self.install_member('__init__', self.new_init) - - def _uninstrument_init(self): - if self.new_init: - self.uninstall_member('__init__') - self.new_init = None - - @util.memoized_property - def _state_constructor(self): - self.dispatch.first_init(self, self.class_) - return state.InstanceState - - def manage(self): - """Mark this instance as the manager for its class.""" - - setattr(self.class_, self.MANAGER_ATTR, self) - - def dispose(self): - """Dissasociate this manager from its class.""" - - delattr(self.class_, self.MANAGER_ATTR) - - @util.hybridmethod - def manager_getter(self): - return _default_manager_getter - - @util.hybridmethod - def state_getter(self): - """Return a (instance) -> InstanceState callable. - - "state getter" callables should raise either KeyError or - AttributeError if no InstanceState could be found for the - instance. - """ - - return _default_state_getter - - @util.hybridmethod - def dict_getter(self): - return _default_dict_getter - - def instrument_attribute(self, key, inst, propagated=False): - if propagated: - if key in self.local_attrs: - return # don't override local attr with inherited attr - else: - self.local_attrs[key] = inst - self.install_descriptor(key, inst) - _memoized_key_collection.expire_instance(self) - self[key] = inst - - for cls in self.class_.__subclasses__(): - manager = self._subclass_manager(cls) - manager.instrument_attribute(key, inst, True) - - def subclass_managers(self, recursive): - for cls in self.class_.__subclasses__(): - mgr = manager_of_class(cls) - if mgr is not None and mgr is not self: - yield mgr - if recursive: - for m in mgr.subclass_managers(True): - yield m - - def post_configure_attribute(self, key): - _instrumentation_factory.dispatch.\ - attribute_instrument(self.class_, key, self[key]) - - def uninstrument_attribute(self, key, propagated=False): - if key not in self: - return - if propagated: - if key in self.local_attrs: - return # don't get rid of local attr - else: - del self.local_attrs[key] - self.uninstall_descriptor(key) - _memoized_key_collection.expire_instance(self) - del self[key] - for cls in self.class_.__subclasses__(): - manager = manager_of_class(cls) - if manager: - manager.uninstrument_attribute(key, True) - - def unregister(self): - """remove all instrumentation established by this ClassManager.""" - - self._uninstrument_init() - - self.mapper = self.dispatch = None - self.info.clear() - - for key in list(self): - if key in self.local_attrs: - self.uninstrument_attribute(key) - - def install_descriptor(self, key, inst): - if key in (self.STATE_ATTR, self.MANAGER_ATTR): - raise KeyError("%r: requested attribute name conflicts with " - "instrumentation attribute of the same name." % - key) - setattr(self.class_, key, inst) - - def uninstall_descriptor(self, key): - delattr(self.class_, key) - - def install_member(self, key, implementation): - if key in (self.STATE_ATTR, self.MANAGER_ATTR): - raise KeyError("%r: requested attribute name conflicts with " - "instrumentation attribute of the same name." % - key) - self.originals.setdefault(key, getattr(self.class_, key, None)) - setattr(self.class_, key, implementation) - - def uninstall_member(self, key): - original = self.originals.pop(key, None) - if original is not None: - setattr(self.class_, key, original) - - def instrument_collection_class(self, key, collection_class): - return collections.prepare_instrumentation(collection_class) - - def initialize_collection(self, key, state, factory): - user_data = factory() - adapter = collections.CollectionAdapter( - self.get_impl(key), state, user_data) - return adapter, user_data - - def is_instrumented(self, key, search=False): - if search: - return key in self - else: - return key in self.local_attrs - - def get_impl(self, key): - return self[key].impl - - @property - def attributes(self): - return iter(self.values()) - - # InstanceState management - - def new_instance(self, state=None): - instance = self.class_.__new__(self.class_) - if state is None: - state = self._state_constructor(instance, self) - self._state_setter(instance, state) - return instance - - def setup_instance(self, instance, state=None): - if state is None: - state = self._state_constructor(instance, self) - self._state_setter(instance, state) - - def teardown_instance(self, instance): - delattr(instance, self.STATE_ATTR) - - def _serialize(self, state, state_dict): - return _SerializeManager(state, state_dict) - - def _new_state_if_none(self, instance): - """Install a default InstanceState if none is present. - - A private convenience method used by the __init__ decorator. - - """ - if hasattr(instance, self.STATE_ATTR): - return False - elif self.class_ is not instance.__class__ and \ - self.is_mapped: - # this will create a new ClassManager for the - # subclass, without a mapper. This is likely a - # user error situation but allow the object - # to be constructed, so that it is usable - # in a non-ORM context at least. - return self._subclass_manager(instance.__class__).\ - _new_state_if_none(instance) - else: - state = self._state_constructor(instance, self) - self._state_setter(instance, state) - return state - - def has_state(self, instance): - return hasattr(instance, self.STATE_ATTR) - - def has_parent(self, state, key, optimistic=False): - """TODO""" - return self.get_impl(key).hasparent(state, optimistic=optimistic) - - def __bool__(self): - """All ClassManagers are non-zero regardless of attribute state.""" - return True - - __nonzero__ = __bool__ - - def __repr__(self): - return '<%s of %r at %x>' % ( - self.__class__.__name__, self.class_, id(self)) - - -class _SerializeManager(object): - """Provide serialization of a :class:`.ClassManager`. - - The :class:`.InstanceState` uses ``__init__()`` on serialize - and ``__call__()`` on deserialize. - - """ - - def __init__(self, state, d): - self.class_ = state.class_ - manager = state.manager - manager.dispatch.pickle(state, d) - - def __call__(self, state, inst, state_dict): - state.manager = manager = manager_of_class(self.class_) - if manager is None: - raise exc.UnmappedInstanceError( - inst, - "Cannot deserialize object of type %r - " - "no mapper() has " - "been configured for this class within the current " - "Python process!" % - self.class_) - elif manager.is_mapped and not manager.mapper.configured: - manager.mapper._configure_all() - - # setup _sa_instance_state ahead of time so that - # unpickle events can access the object normally. - # see [ticket:2362] - if inst is not None: - manager.setup_instance(inst, state) - manager.dispatch.unpickle(state, state_dict) - - -class InstrumentationFactory(object): - """Factory for new ClassManager instances.""" - - def create_manager_for_cls(self, class_): - assert class_ is not None - assert manager_of_class(class_) is None - - # give a more complicated subclass - # a chance to do what it wants here - manager, factory = self._locate_extended_factory(class_) - - if factory is None: - factory = ClassManager - manager = factory(class_) - - self._check_conflicts(class_, factory) - - manager.factory = factory - - self.dispatch.class_instrument(class_) - return manager - - def _locate_extended_factory(self, class_): - """Overridden by a subclass to do an extended lookup.""" - return None, None - - def _check_conflicts(self, class_, factory): - """Overridden by a subclass to test for conflicting factories.""" - return - - def unregister(self, class_): - manager = manager_of_class(class_) - manager.unregister() - manager.dispose() - self.dispatch.class_uninstrument(class_) - if ClassManager.MANAGER_ATTR in class_.__dict__: - delattr(class_, ClassManager.MANAGER_ATTR) - -# this attribute is replaced by sqlalchemy.ext.instrumentation -# when importred. -_instrumentation_factory = InstrumentationFactory() - -# these attributes are replaced by sqlalchemy.ext.instrumentation -# when a non-standard InstrumentationManager class is first -# used to instrument a class. -instance_state = _default_state_getter = base.instance_state - -instance_dict = _default_dict_getter = base.instance_dict - -manager_of_class = _default_manager_getter = base.manager_of_class - - -def register_class(class_): - """Register class instrumentation. - - Returns the existing or newly created class manager. - - """ - - manager = manager_of_class(class_) - if manager is None: - manager = _instrumentation_factory.create_manager_for_cls(class_) - return manager - - -def unregister_class(class_): - """Unregister class instrumentation.""" - - _instrumentation_factory.unregister(class_) - - -def is_instrumented(instance, key): - """Return True if the given attribute on the given instance is - instrumented by the attributes package. - - This function may be used regardless of instrumentation - applied directly to the class, i.e. no descriptors are required. - - """ - return manager_of_class(instance.__class__).\ - is_instrumented(key, search=True) - - -def _generate_init(class_, class_manager): - """Build an __init__ decorator that triggers ClassManager events.""" - - # TODO: we should use the ClassManager's notion of the - # original '__init__' method, once ClassManager is fixed - # to always reference that. - original__init__ = class_.__init__ - assert original__init__ - - # Go through some effort here and don't change the user's __init__ - # calling signature, including the unlikely case that it has - # a return value. - # FIXME: need to juggle local names to avoid constructor argument - # clashes. - func_body = """\ -def __init__(%(apply_pos)s): - new_state = class_manager._new_state_if_none(%(self_arg)s) - if new_state: - return new_state._initialize_instance(%(apply_kw)s) - else: - return original__init__(%(apply_kw)s) -""" - func_vars = util.format_argspec_init(original__init__, grouped=False) - func_text = func_body % func_vars - - if util.py2k: - func = getattr(original__init__, 'im_func', original__init__) - func_defaults = getattr(func, 'func_defaults', None) - else: - func_defaults = getattr(original__init__, '__defaults__', None) - func_kw_defaults = getattr(original__init__, '__kwdefaults__', None) - - env = locals().copy() - exec(func_text, env) - __init__ = env['__init__'] - __init__.__doc__ = original__init__.__doc__ - - if func_defaults: - __init__.__defaults__ = func_defaults - if not util.py2k and func_kw_defaults: - __init__.__kwdefaults__ = func_kw_defaults - - return __init__ diff --git a/python/sqlalchemy/orm/interfaces.py b/python/sqlalchemy/orm/interfaces.py deleted file mode 100644 index cd4a0116..00000000 --- a/python/sqlalchemy/orm/interfaces.py +++ /dev/null @@ -1,640 +0,0 @@ -# orm/interfaces.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" - -Contains various base classes used throughout the ORM. - -Defines some key base classes prominent within the internals, -as well as the now-deprecated ORM extension classes. - -Other than the deprecated extensions, this module and the -classes within are mostly private, though some attributes -are exposed when inspecting mappings. - -""" - -from __future__ import absolute_import - -from .. import util -from ..sql import operators -from .base import (ONETOMANY, MANYTOONE, MANYTOMANY, - EXT_CONTINUE, EXT_STOP, NOT_EXTENSION) -from .base import (InspectionAttr, InspectionAttr, - InspectionAttrInfo, _MappedAttribute) -import collections -from .. import inspect - -# imported later -MapperExtension = SessionExtension = AttributeExtension = None - -__all__ = ( - 'AttributeExtension', - 'EXT_CONTINUE', - 'EXT_STOP', - 'ONETOMANY', - 'MANYTOMANY', - 'MANYTOONE', - 'NOT_EXTENSION', - 'LoaderStrategy', - 'MapperExtension', - 'MapperOption', - 'MapperProperty', - 'PropComparator', - 'SessionExtension', - 'StrategizedProperty', -) - - -class MapperProperty(_MappedAttribute, InspectionAttr, util.MemoizedSlots): - """Represent a particular class attribute mapped by :class:`.Mapper`. - - The most common occurrences of :class:`.MapperProperty` are the - mapped :class:`.Column`, which is represented in a mapping as - an instance of :class:`.ColumnProperty`, - and a reference to another class produced by :func:`.relationship`, - represented in the mapping as an instance of - :class:`.RelationshipProperty`. - - """ - - __slots__ = ( - '_configure_started', '_configure_finished', 'parent', 'key', - 'info' - ) - - cascade = frozenset() - """The set of 'cascade' attribute names. - - This collection is checked before the 'cascade_iterator' method is called. - - The collection typically only applies to a RelationshipProperty. - - """ - - is_property = True - """Part of the InspectionAttr interface; states this object is a - mapper property. - - """ - - def _memoized_attr_info(self): - """Info dictionary associated with the object, allowing user-defined - data to be associated with this :class:`.InspectionAttr`. - - The dictionary is generated when first accessed. Alternatively, - it can be specified as a constructor argument to the - :func:`.column_property`, :func:`.relationship`, or :func:`.composite` - functions. - - .. versionadded:: 0.8 Added support for .info to all - :class:`.MapperProperty` subclasses. - - .. versionchanged:: 1.0.0 :attr:`.MapperProperty.info` is also - available on extension types via the - :attr:`.InspectionAttrInfo.info` attribute, so that it can apply - to a wider variety of ORM and extension constructs. - - .. seealso:: - - :attr:`.QueryableAttribute.info` - - :attr:`.SchemaItem.info` - - """ - return {} - - def setup(self, context, entity, path, adapter, **kwargs): - """Called by Query for the purposes of constructing a SQL statement. - - Each MapperProperty associated with the target mapper processes the - statement referenced by the query context, adding columns and/or - criterion as appropriate. - - """ - - def create_row_processor(self, context, path, - mapper, result, adapter, populators): - """Produce row processing functions and append to the given - set of populators lists. - - """ - - def cascade_iterator(self, type_, state, visited_instances=None, - halt_on=None): - """Iterate through instances related to the given instance for - a particular 'cascade', starting with this MapperProperty. - - Return an iterator3-tuples (instance, mapper, state). - - Note that the 'cascade' collection on this MapperProperty is - checked first for the given type before cascade_iterator is called. - - This method typically only applies to RelationshipProperty. - - """ - - return iter(()) - - def set_parent(self, parent, init): - """Set the parent mapper that references this MapperProperty. - - This method is overridden by some subclasses to perform extra - setup when the mapper is first known. - - """ - self.parent = parent - - def instrument_class(self, mapper): - """Hook called by the Mapper to the property to initiate - instrumentation of the class attribute managed by this - MapperProperty. - - The MapperProperty here will typically call out to the - attributes module to set up an InstrumentedAttribute. - - This step is the first of two steps to set up an InstrumentedAttribute, - and is called early in the mapper setup process. - - The second step is typically the init_class_attribute step, - called from StrategizedProperty via the post_instrument_class() - hook. This step assigns additional state to the InstrumentedAttribute - (specifically the "impl") which has been determined after the - MapperProperty has determined what kind of persistence - management it needs to do (e.g. scalar, object, collection, etc). - - """ - - def __init__(self): - self._configure_started = False - self._configure_finished = False - - def init(self): - """Called after all mappers are created to assemble - relationships between mappers and perform other post-mapper-creation - initialization steps. - - """ - self._configure_started = True - self.do_init() - self._configure_finished = True - - @property - def class_attribute(self): - """Return the class-bound descriptor corresponding to this - :class:`.MapperProperty`. - - This is basically a ``getattr()`` call:: - - return getattr(self.parent.class_, self.key) - - I.e. if this :class:`.MapperProperty` were named ``addresses``, - and the class to which it is mapped is ``User``, this sequence - is possible:: - - >>> from sqlalchemy import inspect - >>> mapper = inspect(User) - >>> addresses_property = mapper.attrs.addresses - >>> addresses_property.class_attribute is User.addresses - True - >>> User.addresses.property is addresses_property - True - - - """ - - return getattr(self.parent.class_, self.key) - - def do_init(self): - """Perform subclass-specific initialization post-mapper-creation - steps. - - This is a template method called by the ``MapperProperty`` - object's init() method. - - """ - - def post_instrument_class(self, mapper): - """Perform instrumentation adjustments that need to occur - after init() has completed. - - The given Mapper is the Mapper invoking the operation, which - may not be the same Mapper as self.parent in an inheritance - scenario; however, Mapper will always at least be a sub-mapper of - self.parent. - - This method is typically used by StrategizedProperty, which delegates - it to LoaderStrategy.init_class_attribute() to perform final setup - on the class-bound InstrumentedAttribute. - - """ - - def merge(self, session, source_state, source_dict, dest_state, - dest_dict, load, _recursive): - """Merge the attribute represented by this ``MapperProperty`` - from source to destination object. - - """ - - def __repr__(self): - return '<%s at 0x%x; %s>' % ( - self.__class__.__name__, - id(self), getattr(self, 'key', 'no key')) - - -class PropComparator(operators.ColumnOperators): - """Defines SQL operators for :class:`.MapperProperty` objects. - - SQLAlchemy allows for operators to - be redefined at both the Core and ORM level. :class:`.PropComparator` - is the base class of operator redefinition for ORM-level operations, - including those of :class:`.ColumnProperty`, - :class:`.RelationshipProperty`, and :class:`.CompositeProperty`. - - .. note:: With the advent of Hybrid properties introduced in SQLAlchemy - 0.7, as well as Core-level operator redefinition in - SQLAlchemy 0.8, the use case for user-defined :class:`.PropComparator` - instances is extremely rare. See :ref:`hybrids_toplevel` as well - as :ref:`types_operators`. - - User-defined subclasses of :class:`.PropComparator` may be created. The - built-in Python comparison and math operator methods, such as - :meth:`.operators.ColumnOperators.__eq__`, - :meth:`.operators.ColumnOperators.__lt__`, and - :meth:`.operators.ColumnOperators.__add__`, can be overridden to provide - new operator behavior. The custom :class:`.PropComparator` is passed to - the :class:`.MapperProperty` instance via the ``comparator_factory`` - argument. In each case, - the appropriate subclass of :class:`.PropComparator` should be used:: - - # definition of custom PropComparator subclasses - - from sqlalchemy.orm.properties import \\ - ColumnProperty,\\ - CompositeProperty,\\ - RelationshipProperty - - class MyColumnComparator(ColumnProperty.Comparator): - def __eq__(self, other): - return self.__clause_element__() == other - - class MyRelationshipComparator(RelationshipProperty.Comparator): - def any(self, expression): - "define the 'any' operation" - # ... - - class MyCompositeComparator(CompositeProperty.Comparator): - def __gt__(self, other): - "redefine the 'greater than' operation" - - return sql.and_(*[a>b for a, b in - zip(self.__clause_element__().clauses, - other.__composite_values__())]) - - - # application of custom PropComparator subclasses - - from sqlalchemy.orm import column_property, relationship, composite - from sqlalchemy import Column, String - - class SomeMappedClass(Base): - some_column = column_property(Column("some_column", String), - comparator_factory=MyColumnComparator) - - some_relationship = relationship(SomeOtherClass, - comparator_factory=MyRelationshipComparator) - - some_composite = composite( - Column("a", String), Column("b", String), - comparator_factory=MyCompositeComparator - ) - - Note that for column-level operator redefinition, it's usually - simpler to define the operators at the Core level, using the - :attr:`.TypeEngine.comparator_factory` attribute. See - :ref:`types_operators` for more detail. - - See also: - - :class:`.ColumnProperty.Comparator` - - :class:`.RelationshipProperty.Comparator` - - :class:`.CompositeProperty.Comparator` - - :class:`.ColumnOperators` - - :ref:`types_operators` - - :attr:`.TypeEngine.comparator_factory` - - """ - - __slots__ = 'prop', 'property', '_parententity', '_adapt_to_entity' - - def __init__(self, prop, parentmapper, adapt_to_entity=None): - self.prop = self.property = prop - self._parententity = adapt_to_entity or parentmapper - self._adapt_to_entity = adapt_to_entity - - def __clause_element__(self): - raise NotImplementedError("%r" % self) - - def _query_clause_element(self): - return self.__clause_element__() - - def adapt_to_entity(self, adapt_to_entity): - """Return a copy of this PropComparator which will use the given - :class:`.AliasedInsp` to produce corresponding expressions. - """ - return self.__class__(self.prop, self._parententity, adapt_to_entity) - - @property - def _parentmapper(self): - """legacy; this is renamed to _parententity to be - compatible with QueryableAttribute.""" - return inspect(self._parententity).mapper - - @property - def adapter(self): - """Produce a callable that adapts column expressions - to suit an aliased version of this comparator. - - """ - if self._adapt_to_entity is None: - return None - else: - return self._adapt_to_entity._adapt_element - - @property - def info(self): - return self.property.info - - @staticmethod - def any_op(a, b, **kwargs): - return a.any(b, **kwargs) - - @staticmethod - def has_op(a, b, **kwargs): - return a.has(b, **kwargs) - - @staticmethod - def of_type_op(a, class_): - return a.of_type(class_) - - def of_type(self, class_): - """Redefine this object in terms of a polymorphic subclass. - - Returns a new PropComparator from which further criterion can be - evaluated. - - e.g.:: - - query.join(Company.employees.of_type(Engineer)).\\ - filter(Engineer.name=='foo') - - :param \class_: a class or mapper indicating that criterion will be - against this specific subclass. - - - """ - - return self.operate(PropComparator.of_type_op, class_) - - def any(self, criterion=None, **kwargs): - """Return true if this collection contains any member that meets the - given criterion. - - The usual implementation of ``any()`` is - :meth:`.RelationshipProperty.Comparator.any`. - - :param criterion: an optional ClauseElement formulated against the - member class' table or attributes. - - :param \**kwargs: key/value pairs corresponding to member class - attribute names which will be compared via equality to the - corresponding values. - - """ - - return self.operate(PropComparator.any_op, criterion, **kwargs) - - def has(self, criterion=None, **kwargs): - """Return true if this element references a member which meets the - given criterion. - - The usual implementation of ``has()`` is - :meth:`.RelationshipProperty.Comparator.has`. - - :param criterion: an optional ClauseElement formulated against the - member class' table or attributes. - - :param \**kwargs: key/value pairs corresponding to member class - attribute names which will be compared via equality to the - corresponding values. - - """ - - return self.operate(PropComparator.has_op, criterion, **kwargs) - - -class StrategizedProperty(MapperProperty): - """A MapperProperty which uses selectable strategies to affect - loading behavior. - - There is a single strategy selected by default. Alternate - strategies can be selected at Query time through the usage of - ``StrategizedOption`` objects via the Query.options() method. - - The mechanics of StrategizedProperty are used for every Query - invocation for every mapped attribute participating in that Query, - to determine first how the attribute will be rendered in SQL - and secondly how the attribute will retrieve a value from a result - row and apply it to a mapped object. The routines here are very - performance-critical. - - """ - - __slots__ = '_strategies', 'strategy' - - strategy_wildcard_key = None - - def _get_context_loader(self, context, path): - load = None - - # use EntityRegistry.__getitem__()->PropRegistry here so - # that the path is stated in terms of our base - search_path = dict.__getitem__(path, self) - - # search among: exact match, "attr.*", "default" strategy - # if any. - for path_key in ( - search_path._loader_key, - search_path._wildcard_path_loader_key, - search_path._default_path_loader_key - ): - if path_key in context.attributes: - load = context.attributes[path_key] - break - - return load - - def _get_strategy(self, key): - try: - return self._strategies[key] - except KeyError: - cls = self._strategy_lookup(*key) - self._strategies[key] = self._strategies[ - cls] = strategy = cls(self) - return strategy - - def _get_strategy_by_cls(self, cls): - return self._get_strategy(cls._strategy_keys[0]) - - def setup( - self, context, entity, path, adapter, **kwargs): - loader = self._get_context_loader(context, path) - if loader and loader.strategy: - strat = self._get_strategy(loader.strategy) - else: - strat = self.strategy - strat.setup_query(context, entity, path, loader, adapter, **kwargs) - - def create_row_processor( - self, context, path, mapper, - result, adapter, populators): - loader = self._get_context_loader(context, path) - if loader and loader.strategy: - strat = self._get_strategy(loader.strategy) - else: - strat = self.strategy - strat.create_row_processor( - context, path, loader, - mapper, result, adapter, populators) - - def do_init(self): - self._strategies = {} - self.strategy = self._get_strategy_by_cls(self.strategy_class) - - def post_instrument_class(self, mapper): - if not self.parent.non_primary and \ - not mapper.class_manager._attr_has_impl(self.key): - self.strategy.init_class_attribute(mapper) - - _all_strategies = collections.defaultdict(dict) - - @classmethod - def strategy_for(cls, **kw): - def decorate(dec_cls): - # ensure each subclass of the strategy has its - # own _strategy_keys collection - if '_strategy_keys' not in dec_cls.__dict__: - dec_cls._strategy_keys = [] - key = tuple(sorted(kw.items())) - cls._all_strategies[cls][key] = dec_cls - dec_cls._strategy_keys.append(key) - return dec_cls - return decorate - - @classmethod - def _strategy_lookup(cls, *key): - for prop_cls in cls.__mro__: - if prop_cls in cls._all_strategies: - strategies = cls._all_strategies[prop_cls] - try: - return strategies[key] - except KeyError: - pass - raise Exception("can't locate strategy for %s %s" % (cls, key)) - - -class MapperOption(object): - """Describe a modification to a Query.""" - - propagate_to_loaders = False - """if True, indicate this option should be carried along - to "secondary" Query objects produced during lazy loads - or refresh operations. - - """ - - def process_query(self, query): - """Apply a modification to the given :class:`.Query`.""" - - def process_query_conditionally(self, query): - """same as process_query(), except that this option may not - apply to the given query. - - This is typically used during a lazy load or scalar refresh - operation to propagate options stated in the original Query to the - new Query being used for the load. It occurs for those options that - specify propagate_to_loaders=True. - - """ - - self.process_query(query) - - -class LoaderStrategy(object): - """Describe the loading behavior of a StrategizedProperty object. - - The ``LoaderStrategy`` interacts with the querying process in three - ways: - - * it controls the configuration of the ``InstrumentedAttribute`` - placed on a class to handle the behavior of the attribute. this - may involve setting up class-level callable functions to fire - off a select operation when the attribute is first accessed - (i.e. a lazy load) - - * it processes the ``QueryContext`` at statement construction time, - where it can modify the SQL statement that is being produced. - For example, simple column attributes will add their represented - column to the list of selected columns, a joined eager loader - may establish join clauses to add to the statement. - - * It produces "row processor" functions at result fetching time. - These "row processor" functions populate a particular attribute - on a particular mapped instance. - - """ - - __slots__ = 'parent_property', 'is_class_level', 'parent', 'key' - - def __init__(self, parent): - self.parent_property = parent - self.is_class_level = False - self.parent = self.parent_property.parent - self.key = self.parent_property.key - - def init_class_attribute(self, mapper): - pass - - def setup_query(self, context, entity, path, loadopt, adapter, **kwargs): - """Establish column and other state for a given QueryContext. - - This method fulfills the contract specified by MapperProperty.setup(). - - StrategizedProperty delegates its setup() method - directly to this method. - - """ - - def create_row_processor(self, context, path, loadopt, mapper, - result, adapter, populators): - """Establish row processing functions for a given QueryContext. - - This method fulfills the contract specified by - MapperProperty.create_row_processor(). - - StrategizedProperty delegates its create_row_processor() method - directly to this method. - - """ - - def __str__(self): - return str(self.parent_property) diff --git a/python/sqlalchemy/orm/loading.py b/python/sqlalchemy/orm/loading.py deleted file mode 100644 index b81e98a5..00000000 --- a/python/sqlalchemy/orm/loading.py +++ /dev/null @@ -1,669 +0,0 @@ -# orm/loading.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""private module containing functions used to convert database -rows into object instances and associated state. - -the functions here are called primarily by Query, Mapper, -as well as some of the attribute loading strategies. - -""" -from __future__ import absolute_import - -from .. import util -from . import attributes, exc as orm_exc -from ..sql import util as sql_util -from . import strategy_options - -from .util import _none_set, state_str -from .base import _SET_DEFERRED_EXPIRED, _DEFER_FOR_STATE -from .. import exc as sa_exc -import collections - -_new_runid = util.counter() - - -def instances(query, cursor, context): - """Return an ORM result as an iterator.""" - - context.runid = _new_runid() - - filter_fns = [ent.filter_fn for ent in query._entities] - filtered = id in filter_fns - - single_entity = len(query._entities) == 1 and \ - query._entities[0].supports_single_entity - - if filtered: - if single_entity: - filter_fn = id - else: - def filter_fn(row): - return tuple(fn(x) for x, fn in zip(row, filter_fns)) - - try: - (process, labels) = \ - list(zip(*[ - query_entity.row_processor(query, - context, cursor) - for query_entity in query._entities - ])) - - if not single_entity: - keyed_tuple = util.lightweight_named_tuple('result', labels) - - while True: - context.partials = {} - - if query._yield_per: - fetch = cursor.fetchmany(query._yield_per) - if not fetch: - break - else: - fetch = cursor.fetchall() - - if single_entity: - proc = process[0] - rows = [proc(row) for row in fetch] - else: - rows = [keyed_tuple([proc(row) for proc in process]) - for row in fetch] - - if filtered: - rows = util.unique_list(rows, filter_fn) - - for row in rows: - yield row - - if not query._yield_per: - break - except Exception as err: - cursor.close() - util.raise_from_cause(err) - - -@util.dependencies("sqlalchemy.orm.query") -def merge_result(querylib, query, iterator, load=True): - """Merge a result into this :class:`.Query` object's Session.""" - - session = query.session - if load: - # flush current contents if we expect to load data - session._autoflush() - - autoflush = session.autoflush - try: - session.autoflush = False - single_entity = len(query._entities) == 1 - if single_entity: - if isinstance(query._entities[0], querylib._MapperEntity): - result = [session._merge( - attributes.instance_state(instance), - attributes.instance_dict(instance), - load=load, _recursive={}) - for instance in iterator] - else: - result = list(iterator) - else: - mapped_entities = [i for i, e in enumerate(query._entities) - if isinstance(e, querylib._MapperEntity)] - result = [] - keys = [ent._label_name for ent in query._entities] - keyed_tuple = util.lightweight_named_tuple('result', keys) - for row in iterator: - newrow = list(row) - for i in mapped_entities: - if newrow[i] is not None: - newrow[i] = session._merge( - attributes.instance_state(newrow[i]), - attributes.instance_dict(newrow[i]), - load=load, _recursive={}) - result.append(keyed_tuple(newrow)) - - return iter(result) - finally: - session.autoflush = autoflush - - -def get_from_identity(session, key, passive): - """Look up the given key in the given session's identity map, - check the object for expired state if found. - - """ - instance = session.identity_map.get(key) - if instance is not None: - - state = attributes.instance_state(instance) - - # expired - ensure it still exists - if state.expired: - if not passive & attributes.SQL_OK: - # TODO: no coverage here - return attributes.PASSIVE_NO_RESULT - elif not passive & attributes.RELATED_OBJECT_OK: - # this mode is used within a flush and the instance's - # expired state will be checked soon enough, if necessary - return instance - try: - state._load_expired(state, passive) - except orm_exc.ObjectDeletedError: - session._remove_newly_deleted([state]) - return None - return instance - else: - return None - - -def load_on_ident(query, key, - refresh_state=None, lockmode=None, - only_load_props=None): - """Load the given identity key from the database.""" - - if key is not None: - ident = key[1] - else: - ident = None - - if refresh_state is None: - q = query._clone() - q._get_condition() - else: - q = query._clone() - - if ident is not None: - mapper = query._mapper_zero() - - (_get_clause, _get_params) = mapper._get_clause - - # None present in ident - turn those comparisons - # into "IS NULL" - if None in ident: - nones = set([ - _get_params[col].key for col, value in - zip(mapper.primary_key, ident) if value is None - ]) - _get_clause = sql_util.adapt_criterion_to_null( - _get_clause, nones) - - _get_clause = q._adapt_clause(_get_clause, True, False) - q._criterion = _get_clause - - params = dict([ - (_get_params[primary_key].key, id_val) - for id_val, primary_key in zip(ident, mapper.primary_key) - ]) - - q._params = params - - if lockmode is not None: - version_check = True - q = q.with_lockmode(lockmode) - elif query._for_update_arg is not None: - version_check = True - q._for_update_arg = query._for_update_arg - else: - version_check = False - - q._get_options( - populate_existing=bool(refresh_state), - version_check=version_check, - only_load_props=only_load_props, - refresh_state=refresh_state) - q._order_by = None - - try: - return q.one() - except orm_exc.NoResultFound: - return None - - -def _setup_entity_query( - context, mapper, query_entity, - path, adapter, column_collection, - with_polymorphic=None, only_load_props=None, - polymorphic_discriminator=None, **kw): - - if with_polymorphic: - poly_properties = mapper._iterate_polymorphic_properties( - with_polymorphic) - else: - poly_properties = mapper._polymorphic_properties - - quick_populators = {} - - path.set( - context.attributes, - "memoized_setups", - quick_populators) - - for value in poly_properties: - if only_load_props and \ - value.key not in only_load_props: - continue - value.setup( - context, - query_entity, - path, - adapter, - only_load_props=only_load_props, - column_collection=column_collection, - memoized_populators=quick_populators, - **kw - ) - - if polymorphic_discriminator is not None and \ - polymorphic_discriminator \ - is not mapper.polymorphic_on: - - if adapter: - pd = adapter.columns[polymorphic_discriminator] - else: - pd = polymorphic_discriminator - column_collection.append(pd) - - -def _instance_processor( - mapper, context, result, path, adapter, - only_load_props=None, refresh_state=None, - polymorphic_discriminator=None, - _polymorphic_from=None): - """Produce a mapper level row processor callable - which processes rows into mapped instances.""" - - # note that this method, most of which exists in a closure - # called _instance(), resists being broken out, as - # attempts to do so tend to add significant function - # call overhead. _instance() is the most - # performance-critical section in the whole ORM. - - pk_cols = mapper.primary_key - - if adapter: - pk_cols = [adapter.columns[c] for c in pk_cols] - - identity_class = mapper._identity_class - - populators = collections.defaultdict(list) - - props = mapper._prop_set - if only_load_props is not None: - props = props.intersection( - mapper._props[k] for k in only_load_props) - - quick_populators = path.get( - context.attributes, "memoized_setups", _none_set) - - for prop in props: - if prop in quick_populators: - # this is an inlined path just for column-based attributes. - col = quick_populators[prop] - if col is _DEFER_FOR_STATE: - populators["new"].append( - (prop.key, prop._deferred_column_loader)) - elif col is _SET_DEFERRED_EXPIRED: - # note that in this path, we are no longer - # searching in the result to see if the column might - # be present in some unexpected way. - populators["expire"].append((prop.key, False)) - else: - if adapter: - col = adapter.columns[col] - getter = result._getter(col) - if getter: - populators["quick"].append((prop.key, getter)) - else: - # fall back to the ColumnProperty itself, which - # will iterate through all of its columns - # to see if one fits - prop.create_row_processor( - context, path, mapper, result, adapter, populators) - else: - prop.create_row_processor( - context, path, mapper, result, adapter, populators) - - propagate_options = context.propagate_options - if propagate_options: - load_path = context.query._current_path + path \ - if context.query._current_path.path else path - - session_identity_map = context.session.identity_map - - populate_existing = context.populate_existing or mapper.always_refresh - load_evt = bool(mapper.class_manager.dispatch.load) - refresh_evt = bool(mapper.class_manager.dispatch.refresh) - instance_state = attributes.instance_state - instance_dict = attributes.instance_dict - session_id = context.session.hash_key - version_check = context.version_check - runid = context.runid - - if refresh_state: - refresh_identity_key = refresh_state.key - if refresh_identity_key is None: - # super-rare condition; a refresh is being called - # on a non-instance-key instance; this is meant to only - # occur within a flush() - refresh_identity_key = \ - mapper._identity_key_from_state(refresh_state) - else: - refresh_identity_key = None - - if mapper.allow_partial_pks: - is_not_primary_key = _none_set.issuperset - else: - is_not_primary_key = _none_set.intersection - - def _instance(row): - - # determine the state that we'll be populating - if refresh_identity_key: - # fixed state that we're refreshing - state = refresh_state - instance = state.obj() - dict_ = instance_dict(instance) - isnew = state.runid != runid - currentload = True - loaded_instance = False - else: - # look at the row, see if that identity is in the - # session, or we have to create a new one - identitykey = ( - identity_class, - tuple([row[column] for column in pk_cols]) - ) - - instance = session_identity_map.get(identitykey) - - if instance is not None: - # existing instance - state = instance_state(instance) - dict_ = instance_dict(instance) - - isnew = state.runid != runid - currentload = not isnew - loaded_instance = False - - if version_check and not currentload: - _validate_version_id(mapper, state, dict_, row, adapter) - - else: - # create a new instance - - # check for non-NULL values in the primary key columns, - # else no entity is returned for the row - if is_not_primary_key(identitykey[1]): - return None - - isnew = True - currentload = True - loaded_instance = True - - instance = mapper.class_manager.new_instance() - - dict_ = instance_dict(instance) - state = instance_state(instance) - state.key = identitykey - - # attach instance to session. - state.session_id = session_id - session_identity_map._add_unpresent(state, identitykey) - - # populate. this looks at whether this state is new - # for this load or was existing, and whether or not this - # row is the first row with this identity. - if currentload or populate_existing: - # full population routines. Objects here are either - # just created, or we are doing a populate_existing - - if isnew and propagate_options: - state.load_options = propagate_options - state.load_path = load_path - - _populate_full( - context, row, state, dict_, isnew, - loaded_instance, populate_existing, populators) - - if isnew: - if loaded_instance and load_evt: - state.manager.dispatch.load(state, context) - elif refresh_evt: - state.manager.dispatch.refresh( - state, context, only_load_props) - - if populate_existing or state.modified: - if refresh_state and only_load_props: - state._commit(dict_, only_load_props) - else: - state._commit_all(dict_, session_identity_map) - - else: - # partial population routines, for objects that were already - # in the Session, but a row matches them; apply eager loaders - # on existing objects, etc. - unloaded = state.unloaded - isnew = state not in context.partials - - if not isnew or unloaded or populators["eager"]: - # state is having a partial set of its attributes - # refreshed. Populate those attributes, - # and add to the "context.partials" collection. - - to_load = _populate_partial( - context, row, state, dict_, isnew, - unloaded, populators) - - if isnew: - if refresh_evt: - state.manager.dispatch.refresh( - state, context, to_load) - - state._commit(dict_, to_load) - - return instance - - if mapper.polymorphic_map and not _polymorphic_from and not refresh_state: - # if we are doing polymorphic, dispatch to a different _instance() - # method specific to the subclass mapper - _instance = _decorate_polymorphic_switch( - _instance, context, mapper, result, path, - polymorphic_discriminator, adapter) - - return _instance - - -def _populate_full( - context, row, state, dict_, isnew, - loaded_instance, populate_existing, populators): - if isnew: - # first time we are seeing a row with this identity. - state.runid = context.runid - - for key, getter in populators["quick"]: - dict_[key] = getter(row) - if populate_existing: - for key, set_callable in populators["expire"]: - dict_.pop(key, None) - if set_callable: - state.expired_attributes.add(key) - else: - for key, set_callable in populators["expire"]: - if set_callable: - state.expired_attributes.add(key) - for key, populator in populators["new"]: - populator(state, dict_, row) - for key, populator in populators["delayed"]: - populator(state, dict_, row) - else: - # have already seen rows with this identity. - for key, populator in populators["existing"]: - populator(state, dict_, row) - - -def _populate_partial( - context, row, state, dict_, isnew, - unloaded, populators): - if not isnew: - to_load = context.partials[state] - for key, populator in populators["existing"]: - if key in to_load: - populator(state, dict_, row) - else: - to_load = unloaded - context.partials[state] = to_load - - for key, getter in populators["quick"]: - if key in to_load: - dict_[key] = getter(row) - for key, set_callable in populators["expire"]: - if key in to_load: - dict_.pop(key, None) - if set_callable: - state.expired_attributes.add(key) - for key, populator in populators["new"]: - if key in to_load: - populator(state, dict_, row) - for key, populator in populators["delayed"]: - if key in to_load: - populator(state, dict_, row) - for key, populator in populators["eager"]: - if key not in unloaded: - populator(state, dict_, row) - - return to_load - - -def _validate_version_id(mapper, state, dict_, row, adapter): - - version_id_col = mapper.version_id_col - - if version_id_col is None: - return - - if adapter: - version_id_col = adapter.columns[version_id_col] - - if mapper._get_state_attr_by_column( - state, dict_, mapper.version_id_col) != row[version_id_col]: - raise orm_exc.StaleDataError( - "Instance '%s' has version id '%s' which " - "does not match database-loaded version id '%s'." - % (state_str(state), mapper._get_state_attr_by_column( - state, dict_, mapper.version_id_col), - row[version_id_col])) - - -def _decorate_polymorphic_switch( - instance_fn, context, mapper, result, path, - polymorphic_discriminator, adapter): - if polymorphic_discriminator is not None: - polymorphic_on = polymorphic_discriminator - else: - polymorphic_on = mapper.polymorphic_on - if polymorphic_on is None: - return instance_fn - - if adapter: - polymorphic_on = adapter.columns[polymorphic_on] - - def configure_subclass_mapper(discriminator): - try: - sub_mapper = mapper.polymorphic_map[discriminator] - except KeyError: - raise AssertionError( - "No such polymorphic_identity %r is defined" % - discriminator) - else: - if sub_mapper is mapper: - return None - - return _instance_processor( - sub_mapper, context, result, - path, adapter, _polymorphic_from=mapper) - - polymorphic_instances = util.PopulateDict( - configure_subclass_mapper - ) - - def polymorphic_instance(row): - discriminator = row[polymorphic_on] - if discriminator is not None: - _instance = polymorphic_instances[discriminator] - if _instance: - return _instance(row) - return instance_fn(row) - return polymorphic_instance - - -def load_scalar_attributes(mapper, state, attribute_names): - """initiate a column-based attribute refresh operation.""" - - # assert mapper is _state_mapper(state) - session = state.session - if not session: - raise orm_exc.DetachedInstanceError( - "Instance %s is not bound to a Session; " - "attribute refresh operation cannot proceed" % - (state_str(state))) - - has_key = bool(state.key) - - result = False - - if mapper.inherits and not mapper.concrete: - # because we are using Core to produce a select() that we - # pass to the Query, we aren't calling setup() for mapped - # attributes; in 1.0 this means deferred attrs won't get loaded - # by default - statement = mapper._optimized_get_statement(state, attribute_names) - if statement is not None: - result = load_on_ident( - session.query(mapper). - options( - strategy_options.Load(mapper).undefer("*") - ).from_statement(statement), - None, - only_load_props=attribute_names, - refresh_state=state - ) - - if result is False: - if has_key: - identity_key = state.key - else: - # this codepath is rare - only valid when inside a flush, and the - # object is becoming persistent but hasn't yet been assigned - # an identity_key. - # check here to ensure we have the attrs we need. - pk_attrs = [mapper._columntoproperty[col].key - for col in mapper.primary_key] - if state.expired_attributes.intersection(pk_attrs): - raise sa_exc.InvalidRequestError( - "Instance %s cannot be refreshed - it's not " - " persistent and does not " - "contain a full primary key." % state_str(state)) - identity_key = mapper._identity_key_from_state(state) - - if (_none_set.issubset(identity_key) and - not mapper.allow_partial_pks) or \ - _none_set.issuperset(identity_key): - util.warn_limited( - "Instance %s to be refreshed doesn't " - "contain a full primary key - can't be refreshed " - "(and shouldn't be expired, either).", - state_str(state)) - return - - result = load_on_ident( - session.query(mapper), - identity_key, - refresh_state=state, - only_load_props=attribute_names) - - # if instance is pending, a refresh operation - # may not complete (even if PK attributes are assigned) - if has_key and result is None: - raise orm_exc.ObjectDeletedError(state) diff --git a/python/sqlalchemy/orm/mapper.py b/python/sqlalchemy/orm/mapper.py deleted file mode 100644 index 5da69445..00000000 --- a/python/sqlalchemy/orm/mapper.py +++ /dev/null @@ -1,2897 +0,0 @@ -# orm/mapper.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Logic to map Python classes to and from selectables. - -Defines the :class:`~sqlalchemy.orm.mapper.Mapper` class, the central -configurational unit which associates a class with a database table. - -This is a semi-private module; the main configurational API of the ORM is -available in :class:`~sqlalchemy.orm.`. - -""" -from __future__ import absolute_import - -import types -import weakref -from itertools import chain -from collections import deque - -from .. import sql, util, log, exc as sa_exc, event, schema, inspection -from ..sql import expression, visitors, operators, util as sql_util -from . import instrumentation, attributes, exc as orm_exc, loading -from . import properties -from . import util as orm_util -from .interfaces import MapperProperty, InspectionAttr, _MappedAttribute - -from .base import _class_to_mapper, _state_mapper, class_mapper, \ - state_str, _INSTRUMENTOR -from .path_registry import PathRegistry - -import sys - - -_mapper_registry = weakref.WeakKeyDictionary() -_already_compiling = False - -_memoized_configured_property = util.group_expirable_memoized_property() - - -# a constant returned by _get_attr_by_column to indicate -# this mapper is not handling an attribute for a particular -# column -NO_ATTRIBUTE = util.symbol('NO_ATTRIBUTE') - -# lock used to synchronize the "mapper configure" step -_CONFIGURE_MUTEX = util.threading.RLock() - - -@inspection._self_inspects -@log.class_logger -class Mapper(InspectionAttr): - """Define the correlation of class attributes to database table - columns. - - The :class:`.Mapper` object is instantiated using the - :func:`~sqlalchemy.orm.mapper` function. For information - about instantiating new :class:`.Mapper` objects, see - that function's documentation. - - - When :func:`.mapper` is used - explicitly to link a user defined class with table - metadata, this is referred to as *classical mapping*. - Modern SQLAlchemy usage tends to favor the - :mod:`sqlalchemy.ext.declarative` extension for class - configuration, which - makes usage of :func:`.mapper` behind the scenes. - - Given a particular class known to be mapped by the ORM, - the :class:`.Mapper` which maintains it can be acquired - using the :func:`.inspect` function:: - - from sqlalchemy import inspect - - mapper = inspect(MyClass) - - A class which was mapped by the :mod:`sqlalchemy.ext.declarative` - extension will also have its mapper available via the ``__mapper__`` - attribute. - - - """ - - _new_mappers = False - - def __init__(self, - class_, - local_table=None, - properties=None, - primary_key=None, - non_primary=False, - inherits=None, - inherit_condition=None, - inherit_foreign_keys=None, - extension=None, - order_by=False, - always_refresh=False, - version_id_col=None, - version_id_generator=None, - polymorphic_on=None, - _polymorphic_map=None, - polymorphic_identity=None, - concrete=False, - with_polymorphic=None, - allow_partial_pks=True, - batch=True, - column_prefix=None, - include_properties=None, - exclude_properties=None, - passive_updates=True, - confirm_deleted_rows=True, - eager_defaults=False, - legacy_is_orphan=False, - _compiled_cache_size=100, - ): - """Return a new :class:`~.Mapper` object. - - This function is typically used behind the scenes - via the Declarative extension. When using Declarative, - many of the usual :func:`.mapper` arguments are handled - by the Declarative extension itself, including ``class_``, - ``local_table``, ``properties``, and ``inherits``. - Other options are passed to :func:`.mapper` using - the ``__mapper_args__`` class variable:: - - class MyClass(Base): - __tablename__ = 'my_table' - id = Column(Integer, primary_key=True) - type = Column(String(50)) - alt = Column("some_alt", Integer) - - __mapper_args__ = { - 'polymorphic_on' : type - } - - - Explicit use of :func:`.mapper` - is often referred to as *classical mapping*. The above - declarative example is equivalent in classical form to:: - - my_table = Table("my_table", metadata, - Column('id', Integer, primary_key=True), - Column('type', String(50)), - Column("some_alt", Integer) - ) - - class MyClass(object): - pass - - mapper(MyClass, my_table, - polymorphic_on=my_table.c.type, - properties={ - 'alt':my_table.c.some_alt - }) - - .. seealso:: - - :ref:`classical_mapping` - discussion of direct usage of - :func:`.mapper` - - :param class\_: The class to be mapped. When using Declarative, - this argument is automatically passed as the declared class - itself. - - :param local_table: The :class:`.Table` or other selectable - to which the class is mapped. May be ``None`` if - this mapper inherits from another mapper using single-table - inheritance. When using Declarative, this argument is - automatically passed by the extension, based on what - is configured via the ``__table__`` argument or via the - :class:`.Table` produced as a result of the ``__tablename__`` - and :class:`.Column` arguments present. - - :param always_refresh: If True, all query operations for this mapped - class will overwrite all data within object instances that already - exist within the session, erasing any in-memory changes with - whatever information was loaded from the database. Usage of this - flag is highly discouraged; as an alternative, see the method - :meth:`.Query.populate_existing`. - - :param allow_partial_pks: Defaults to True. Indicates that a - composite primary key with some NULL values should be considered as - possibly existing within the database. This affects whether a - mapper will assign an incoming row to an existing identity, as well - as if :meth:`.Session.merge` will check the database first for a - particular primary key value. A "partial primary key" can occur if - one has mapped to an OUTER JOIN, for example. - - :param batch: Defaults to ``True``, indicating that save operations - of multiple entities can be batched together for efficiency. - Setting to False indicates - that an instance will be fully saved before saving the next - instance. This is used in the extremely rare case that a - :class:`.MapperEvents` listener requires being called - in between individual row persistence operations. - - :param column_prefix: A string which will be prepended - to the mapped attribute name when :class:`.Column` - objects are automatically assigned as attributes to the - mapped class. Does not affect explicitly specified - column-based properties. - - See the section :ref:`column_prefix` for an example. - - :param concrete: If True, indicates this mapper should use concrete - table inheritance with its parent mapper. - - See the section :ref:`concrete_inheritance` for an example. - - :param confirm_deleted_rows: defaults to True; when a DELETE occurs - of one more rows based on specific primary keys, a warning is - emitted when the number of rows matched does not equal the number - of rows expected. This parameter may be set to False to handle the - case where database ON DELETE CASCADE rules may be deleting some of - those rows automatically. The warning may be changed to an - exception in a future release. - - .. versionadded:: 0.9.4 - added - :paramref:`.mapper.confirm_deleted_rows` as well as conditional - matched row checking on delete. - - :param eager_defaults: if True, the ORM will immediately fetch the - value of server-generated default values after an INSERT or UPDATE, - rather than leaving them as expired to be fetched on next access. - This can be used for event schemes where the server-generated values - are needed immediately before the flush completes. By default, - this scheme will emit an individual ``SELECT`` statement per row - inserted or updated, which note can add significant performance - overhead. However, if the - target database supports :term:`RETURNING`, the default values will - be returned inline with the INSERT or UPDATE statement, which can - greatly enhance performance for an application that needs frequent - access to just-generated server defaults. - - .. versionchanged:: 0.9.0 The ``eager_defaults`` option can now - make use of :term:`RETURNING` for backends which support it. - - :param exclude_properties: A list or set of string column names to - be excluded from mapping. - - See :ref:`include_exclude_cols` for an example. - - :param extension: A :class:`.MapperExtension` instance or - list of :class:`.MapperExtension` instances which will be applied - to all operations by this :class:`.Mapper`. **Deprecated.** - Please see :class:`.MapperEvents`. - - :param include_properties: An inclusive list or set of string column - names to map. - - See :ref:`include_exclude_cols` for an example. - - :param inherits: A mapped class or the corresponding :class:`.Mapper` - of one indicating a superclass to which this :class:`.Mapper` - should *inherit* from. The mapped class here must be a subclass - of the other mapper's class. When using Declarative, this argument - is passed automatically as a result of the natural class - hierarchy of the declared classes. - - .. seealso:: - - :ref:`inheritance_toplevel` - - :param inherit_condition: For joined table inheritance, a SQL - expression which will - define how the two tables are joined; defaults to a natural join - between the two tables. - - :param inherit_foreign_keys: When ``inherit_condition`` is used and - the columns present are missing a :class:`.ForeignKey` - configuration, this parameter can be used to specify which columns - are "foreign". In most cases can be left as ``None``. - - :param legacy_is_orphan: Boolean, defaults to ``False``. - When ``True``, specifies that "legacy" orphan consideration - is to be applied to objects mapped by this mapper, which means - that a pending (that is, not persistent) object is auto-expunged - from an owning :class:`.Session` only when it is de-associated - from *all* parents that specify a ``delete-orphan`` cascade towards - this mapper. The new default behavior is that the object is - auto-expunged when it is de-associated with *any* of its parents - that specify ``delete-orphan`` cascade. This behavior is more - consistent with that of a persistent object, and allows behavior to - be consistent in more scenarios independently of whether or not an - orphanable object has been flushed yet or not. - - See the change note and example at :ref:`legacy_is_orphan_addition` - for more detail on this change. - - .. versionadded:: 0.8 - the consideration of a pending object as - an "orphan" has been modified to more closely match the - behavior as that of persistent objects, which is that the object - is expunged from the :class:`.Session` as soon as it is - de-associated from any of its orphan-enabled parents. Previously, - the pending object would be expunged only if de-associated - from all of its orphan-enabled parents. The new flag - ``legacy_is_orphan`` is added to :func:`.orm.mapper` which - re-establishes the legacy behavior. - - :param non_primary: Specify that this :class:`.Mapper` is in addition - to the "primary" mapper, that is, the one used for persistence. - The :class:`.Mapper` created here may be used for ad-hoc - mapping of the class to an alternate selectable, for loading - only. - - :paramref:`.Mapper.non_primary` is not an often used option, but - is useful in some specific :func:`.relationship` cases. - - .. seealso:: - - :ref:`relationship_non_primary_mapper` - - :param order_by: A single :class:`.Column` or list of :class:`.Column` - objects for which selection operations should use as the default - ordering for entities. By default mappers have no pre-defined - ordering. - - :param passive_updates: Indicates UPDATE behavior of foreign key - columns when a primary key column changes on a joined-table - inheritance mapping. Defaults to ``True``. - - When True, it is assumed that ON UPDATE CASCADE is configured on - the foreign key in the database, and that the database will handle - propagation of an UPDATE from a source column to dependent columns - on joined-table rows. - - When False, it is assumed that the database does not enforce - referential integrity and will not be issuing its own CASCADE - operation for an update. The unit of work process will - emit an UPDATE statement for the dependent columns during a - primary key change. - - .. seealso:: - - :ref:`passive_updates` - description of a similar feature as - used with :func:`.relationship` - - :param polymorphic_on: Specifies the column, attribute, or - SQL expression used to determine the target class for an - incoming row, when inheriting classes are present. - - This value is commonly a :class:`.Column` object that's - present in the mapped :class:`.Table`:: - - class Employee(Base): - __tablename__ = 'employee' - - id = Column(Integer, primary_key=True) - discriminator = Column(String(50)) - - __mapper_args__ = { - "polymorphic_on":discriminator, - "polymorphic_identity":"employee" - } - - It may also be specified - as a SQL expression, as in this example where we - use the :func:`.case` construct to provide a conditional - approach:: - - class Employee(Base): - __tablename__ = 'employee' - - id = Column(Integer, primary_key=True) - discriminator = Column(String(50)) - - __mapper_args__ = { - "polymorphic_on":case([ - (discriminator == "EN", "engineer"), - (discriminator == "MA", "manager"), - ], else_="employee"), - "polymorphic_identity":"employee" - } - - It may also refer to any attribute - configured with :func:`.column_property`, or to the - string name of one:: - - class Employee(Base): - __tablename__ = 'employee' - - id = Column(Integer, primary_key=True) - discriminator = Column(String(50)) - employee_type = column_property( - case([ - (discriminator == "EN", "engineer"), - (discriminator == "MA", "manager"), - ], else_="employee") - ) - - __mapper_args__ = { - "polymorphic_on":employee_type, - "polymorphic_identity":"employee" - } - - .. versionchanged:: 0.7.4 - ``polymorphic_on`` may be specified as a SQL expression, - or refer to any attribute configured with - :func:`.column_property`, or to the string name of one. - - When setting ``polymorphic_on`` to reference an - attribute or expression that's not present in the - locally mapped :class:`.Table`, yet the value - of the discriminator should be persisted to the database, - the value of the - discriminator is not automatically set on new - instances; this must be handled by the user, - either through manual means or via event listeners. - A typical approach to establishing such a listener - looks like:: - - from sqlalchemy import event - from sqlalchemy.orm import object_mapper - - @event.listens_for(Employee, "init", propagate=True) - def set_identity(instance, *arg, **kw): - mapper = object_mapper(instance) - instance.discriminator = mapper.polymorphic_identity - - Where above, we assign the value of ``polymorphic_identity`` - for the mapped class to the ``discriminator`` attribute, - thus persisting the value to the ``discriminator`` column - in the database. - - .. warning:: - - Currently, **only one discriminator column may be set**, typically - on the base-most class in the hierarchy. "Cascading" polymorphic - columns are not yet supported. - - .. seealso:: - - :ref:`inheritance_toplevel` - - :param polymorphic_identity: Specifies the value which - identifies this particular class as returned by the - column expression referred to by the ``polymorphic_on`` - setting. As rows are received, the value corresponding - to the ``polymorphic_on`` column expression is compared - to this value, indicating which subclass should - be used for the newly reconstructed object. - - :param properties: A dictionary mapping the string names of object - attributes to :class:`.MapperProperty` instances, which define the - persistence behavior of that attribute. Note that :class:`.Column` - objects present in - the mapped :class:`.Table` are automatically placed into - ``ColumnProperty`` instances upon mapping, unless overridden. - When using Declarative, this argument is passed automatically, - based on all those :class:`.MapperProperty` instances declared - in the declared class body. - - :param primary_key: A list of :class:`.Column` objects which define - the primary key to be used against this mapper's selectable unit. - This is normally simply the primary key of the ``local_table``, but - can be overridden here. - - :param version_id_col: A :class:`.Column` - that will be used to keep a running version id of rows - in the table. This is used to detect concurrent updates or - the presence of stale data in a flush. The methodology is to - detect if an UPDATE statement does not match the last known - version id, a - :class:`~sqlalchemy.orm.exc.StaleDataError` exception is - thrown. - By default, the column must be of :class:`.Integer` type, - unless ``version_id_generator`` specifies an alternative version - generator. - - .. seealso:: - - :ref:`mapper_version_counter` - discussion of version counting - and rationale. - - :param version_id_generator: Define how new version ids should - be generated. Defaults to ``None``, which indicates that - a simple integer counting scheme be employed. To provide a custom - versioning scheme, provide a callable function of the form:: - - def generate_version(version): - return next_version - - Alternatively, server-side versioning functions such as triggers, - or programmatic versioning schemes outside of the version id - generator may be used, by specifying the value ``False``. - Please see :ref:`server_side_version_counter` for a discussion - of important points when using this option. - - .. versionadded:: 0.9.0 ``version_id_generator`` supports - server-side version number generation. - - .. seealso:: - - :ref:`custom_version_counter` - - :ref:`server_side_version_counter` - - - :param with_polymorphic: A tuple in the form ``(, - )`` indicating the default style of "polymorphic" - loading, that is, which tables are queried at once. is - any single or list of mappers and/or classes indicating the - inherited classes that should be loaded at once. The special value - ``'*'`` may be used to indicate all descending classes should be - loaded immediately. The second tuple argument - indicates a selectable that will be used to query for multiple - classes. - - .. seealso:: - - :ref:`with_polymorphic` - discussion of polymorphic querying - techniques. - - """ - - self.class_ = util.assert_arg_type(class_, type, 'class_') - - self.class_manager = None - - self._primary_key_argument = util.to_list(primary_key) - self.non_primary = non_primary - - if order_by is not False: - self.order_by = util.to_list(order_by) - else: - self.order_by = order_by - - self.always_refresh = always_refresh - - if isinstance(version_id_col, MapperProperty): - self.version_id_prop = version_id_col - self.version_id_col = None - else: - self.version_id_col = version_id_col - if version_id_generator is False: - self.version_id_generator = False - elif version_id_generator is None: - self.version_id_generator = lambda x: (x or 0) + 1 - else: - self.version_id_generator = version_id_generator - - self.concrete = concrete - self.single = False - self.inherits = inherits - self.local_table = local_table - self.inherit_condition = inherit_condition - self.inherit_foreign_keys = inherit_foreign_keys - self._init_properties = properties or {} - self._delete_orphans = [] - self.batch = batch - self.eager_defaults = eager_defaults - self.column_prefix = column_prefix - self.polymorphic_on = expression._clause_element_as_expr( - polymorphic_on) - self._dependency_processors = [] - self.validators = util.immutabledict() - self.passive_updates = passive_updates - self.legacy_is_orphan = legacy_is_orphan - self._clause_adapter = None - self._requires_row_aliasing = False - self._inherits_equated_pairs = None - self._memoized_values = {} - self._compiled_cache_size = _compiled_cache_size - self._reconstructor = None - self._deprecated_extensions = util.to_list(extension or []) - self.allow_partial_pks = allow_partial_pks - - if self.inherits and not self.concrete: - self.confirm_deleted_rows = False - else: - self.confirm_deleted_rows = confirm_deleted_rows - - self._set_with_polymorphic(with_polymorphic) - - if isinstance(self.local_table, expression.SelectBase): - raise sa_exc.InvalidRequestError( - "When mapping against a select() construct, map against " - "an alias() of the construct instead." - "This because several databases don't allow a " - "SELECT from a subquery that does not have an alias." - ) - - if self.with_polymorphic and \ - isinstance(self.with_polymorphic[1], - expression.SelectBase): - self.with_polymorphic = (self.with_polymorphic[0], - self.with_polymorphic[1].alias()) - - # our 'polymorphic identity', a string name that when located in a - # result set row indicates this Mapper should be used to construct - # the object instance for that row. - self.polymorphic_identity = polymorphic_identity - - # a dictionary of 'polymorphic identity' names, associating those - # names with Mappers that will be used to construct object instances - # upon a select operation. - if _polymorphic_map is None: - self.polymorphic_map = {} - else: - self.polymorphic_map = _polymorphic_map - - if include_properties is not None: - self.include_properties = util.to_set(include_properties) - else: - self.include_properties = None - if exclude_properties: - self.exclude_properties = util.to_set(exclude_properties) - else: - self.exclude_properties = None - - self.configured = False - - # prevent this mapper from being constructed - # while a configure_mappers() is occurring (and defer a - # configure_mappers() until construction succeeds) - _CONFIGURE_MUTEX.acquire() - try: - self.dispatch._events._new_mapper_instance(class_, self) - self._configure_inheritance() - self._configure_legacy_instrument_class() - self._configure_class_instrumentation() - self._configure_listeners() - self._configure_properties() - self._configure_polymorphic_setter() - self._configure_pks() - Mapper._new_mappers = True - self._log("constructed") - self._expire_memoizations() - finally: - _CONFIGURE_MUTEX.release() - - # major attributes initialized at the classlevel so that - # they can be Sphinx-documented. - - is_mapper = True - """Part of the inspection API.""" - - @property - def mapper(self): - """Part of the inspection API. - - Returns self. - - """ - return self - - @property - def entity(self): - """Part of the inspection API. - - Returns self.class\_. - - """ - return self.class_ - - local_table = None - """The :class:`.Selectable` which this :class:`.Mapper` manages. - - Typically is an instance of :class:`.Table` or :class:`.Alias`. - May also be ``None``. - - The "local" table is the - selectable that the :class:`.Mapper` is directly responsible for - managing from an attribute access and flush perspective. For - non-inheriting mappers, the local table is the same as the - "mapped" table. For joined-table inheritance mappers, local_table - will be the particular sub-table of the overall "join" which - this :class:`.Mapper` represents. If this mapper is a - single-table inheriting mapper, local_table will be ``None``. - - .. seealso:: - - :attr:`~.Mapper.mapped_table`. - - """ - - mapped_table = None - """The :class:`.Selectable` to which this :class:`.Mapper` is mapped. - - Typically an instance of :class:`.Table`, :class:`.Join`, or - :class:`.Alias`. - - The "mapped" table is the selectable that - the mapper selects from during queries. For non-inheriting - mappers, the mapped table is the same as the "local" table. - For joined-table inheritance mappers, mapped_table references the - full :class:`.Join` representing full rows for this particular - subclass. For single-table inheritance mappers, mapped_table - references the base table. - - .. seealso:: - - :attr:`~.Mapper.local_table`. - - """ - - inherits = None - """References the :class:`.Mapper` which this :class:`.Mapper` - inherits from, if any. - - This is a *read only* attribute determined during mapper construction. - Behavior is undefined if directly modified. - - """ - - configured = None - """Represent ``True`` if this :class:`.Mapper` has been configured. - - This is a *read only* attribute determined during mapper construction. - Behavior is undefined if directly modified. - - .. seealso:: - - :func:`.configure_mappers`. - - """ - - concrete = None - """Represent ``True`` if this :class:`.Mapper` is a concrete - inheritance mapper. - - This is a *read only* attribute determined during mapper construction. - Behavior is undefined if directly modified. - - """ - - tables = None - """An iterable containing the collection of :class:`.Table` objects - which this :class:`.Mapper` is aware of. - - If the mapper is mapped to a :class:`.Join`, or an :class:`.Alias` - representing a :class:`.Select`, the individual :class:`.Table` - objects that comprise the full construct will be represented here. - - This is a *read only* attribute determined during mapper construction. - Behavior is undefined if directly modified. - - """ - - primary_key = None - """An iterable containing the collection of :class:`.Column` objects - which comprise the 'primary key' of the mapped table, from the - perspective of this :class:`.Mapper`. - - This list is against the selectable in :attr:`~.Mapper.mapped_table`. In - the case of inheriting mappers, some columns may be managed by a - superclass mapper. For example, in the case of a :class:`.Join`, the - primary key is determined by all of the primary key columns across all - tables referenced by the :class:`.Join`. - - The list is also not necessarily the same as the primary key column - collection associated with the underlying tables; the :class:`.Mapper` - features a ``primary_key`` argument that can override what the - :class:`.Mapper` considers as primary key columns. - - This is a *read only* attribute determined during mapper construction. - Behavior is undefined if directly modified. - - """ - - class_ = None - """The Python class which this :class:`.Mapper` maps. - - This is a *read only* attribute determined during mapper construction. - Behavior is undefined if directly modified. - - """ - - class_manager = None - """The :class:`.ClassManager` which maintains event listeners - and class-bound descriptors for this :class:`.Mapper`. - - This is a *read only* attribute determined during mapper construction. - Behavior is undefined if directly modified. - - """ - - single = None - """Represent ``True`` if this :class:`.Mapper` is a single table - inheritance mapper. - - :attr:`~.Mapper.local_table` will be ``None`` if this flag is set. - - This is a *read only* attribute determined during mapper construction. - Behavior is undefined if directly modified. - - """ - - non_primary = None - """Represent ``True`` if this :class:`.Mapper` is a "non-primary" - mapper, e.g. a mapper that is used only to selet rows but not for - persistence management. - - This is a *read only* attribute determined during mapper construction. - Behavior is undefined if directly modified. - - """ - - polymorphic_on = None - """The :class:`.Column` or SQL expression specified as the - ``polymorphic_on`` argument - for this :class:`.Mapper`, within an inheritance scenario. - - This attribute is normally a :class:`.Column` instance but - may also be an expression, such as one derived from - :func:`.cast`. - - This is a *read only* attribute determined during mapper construction. - Behavior is undefined if directly modified. - - """ - - polymorphic_map = None - """A mapping of "polymorphic identity" identifiers mapped to - :class:`.Mapper` instances, within an inheritance scenario. - - The identifiers can be of any type which is comparable to the - type of column represented by :attr:`~.Mapper.polymorphic_on`. - - An inheritance chain of mappers will all reference the same - polymorphic map object. The object is used to correlate incoming - result rows to target mappers. - - This is a *read only* attribute determined during mapper construction. - Behavior is undefined if directly modified. - - """ - - polymorphic_identity = None - """Represent an identifier which is matched against the - :attr:`~.Mapper.polymorphic_on` column during result row loading. - - Used only with inheritance, this object can be of any type which is - comparable to the type of column represented by - :attr:`~.Mapper.polymorphic_on`. - - This is a *read only* attribute determined during mapper construction. - Behavior is undefined if directly modified. - - """ - - base_mapper = None - """The base-most :class:`.Mapper` in an inheritance chain. - - In a non-inheriting scenario, this attribute will always be this - :class:`.Mapper`. In an inheritance scenario, it references - the :class:`.Mapper` which is parent to all other :class:`.Mapper` - objects in the inheritance chain. - - This is a *read only* attribute determined during mapper construction. - Behavior is undefined if directly modified. - - """ - - columns = None - """A collection of :class:`.Column` or other scalar expression - objects maintained by this :class:`.Mapper`. - - The collection behaves the same as that of the ``c`` attribute on - any :class:`.Table` object, except that only those columns included in - this mapping are present, and are keyed based on the attribute name - defined in the mapping, not necessarily the ``key`` attribute of the - :class:`.Column` itself. Additionally, scalar expressions mapped - by :func:`.column_property` are also present here. - - This is a *read only* attribute determined during mapper construction. - Behavior is undefined if directly modified. - - """ - - validators = None - """An immutable dictionary of attributes which have been decorated - using the :func:`~.orm.validates` decorator. - - The dictionary contains string attribute names as keys - mapped to the actual validation method. - - """ - - c = None - """A synonym for :attr:`~.Mapper.columns`.""" - - @util.memoized_property - def _path_registry(self): - return PathRegistry.per_mapper(self) - - def _configure_inheritance(self): - """Configure settings related to inherting and/or inherited mappers - being present.""" - - # a set of all mappers which inherit from this one. - self._inheriting_mappers = util.WeakSequence() - - if self.inherits: - if isinstance(self.inherits, type): - self.inherits = class_mapper(self.inherits, configure=False) - if not issubclass(self.class_, self.inherits.class_): - raise sa_exc.ArgumentError( - "Class '%s' does not inherit from '%s'" % - (self.class_.__name__, self.inherits.class_.__name__)) - if self.non_primary != self.inherits.non_primary: - np = not self.non_primary and "primary" or "non-primary" - raise sa_exc.ArgumentError( - "Inheritance of %s mapper for class '%s' is " - "only allowed from a %s mapper" % - (np, self.class_.__name__, np)) - # inherit_condition is optional. - if self.local_table is None: - self.local_table = self.inherits.local_table - self.mapped_table = self.inherits.mapped_table - self.single = True - elif self.local_table is not self.inherits.local_table: - if self.concrete: - self.mapped_table = self.local_table - for mapper in self.iterate_to_root(): - if mapper.polymorphic_on is not None: - mapper._requires_row_aliasing = True - else: - if self.inherit_condition is None: - # figure out inherit condition from our table to the - # immediate table of the inherited mapper, not its - # full table which could pull in other stuff we don't - # want (allows test/inheritance.InheritTest4 to pass) - self.inherit_condition = sql_util.join_condition( - self.inherits.local_table, - self.local_table) - self.mapped_table = sql.join( - self.inherits.mapped_table, - self.local_table, - self.inherit_condition) - - fks = util.to_set(self.inherit_foreign_keys) - self._inherits_equated_pairs = \ - sql_util.criterion_as_pairs( - self.mapped_table.onclause, - consider_as_foreign_keys=fks) - else: - self.mapped_table = self.local_table - - if self.polymorphic_identity is not None and not self.concrete: - self._identity_class = self.inherits._identity_class - else: - self._identity_class = self.class_ - - if self.version_id_col is None: - self.version_id_col = self.inherits.version_id_col - self.version_id_generator = self.inherits.version_id_generator - elif self.inherits.version_id_col is not None and \ - self.version_id_col is not self.inherits.version_id_col: - util.warn( - "Inheriting version_id_col '%s' does not match inherited " - "version_id_col '%s' and will not automatically populate " - "the inherited versioning column. " - "version_id_col should only be specified on " - "the base-most mapper that includes versioning." % - (self.version_id_col.description, - self.inherits.version_id_col.description) - ) - - if self.order_by is False and \ - not self.concrete and \ - self.inherits.order_by is not False: - self.order_by = self.inherits.order_by - - self.polymorphic_map = self.inherits.polymorphic_map - self.batch = self.inherits.batch - self.inherits._inheriting_mappers.append(self) - self.base_mapper = self.inherits.base_mapper - self.passive_updates = self.inherits.passive_updates - self._all_tables = self.inherits._all_tables - - if self.polymorphic_identity is not None: - if self.polymorphic_identity in self.polymorphic_map: - util.warn( - "Reassigning polymorphic association for identity %r " - "from %r to %r: Check for duplicate use of %r as " - "value for polymorphic_identity." % - (self.polymorphic_identity, - self.polymorphic_map[self.polymorphic_identity], - self, self.polymorphic_identity) - ) - self.polymorphic_map[self.polymorphic_identity] = self - - else: - self._all_tables = set() - self.base_mapper = self - self.mapped_table = self.local_table - if self.polymorphic_identity is not None: - self.polymorphic_map[self.polymorphic_identity] = self - self._identity_class = self.class_ - - if self.mapped_table is None: - raise sa_exc.ArgumentError( - "Mapper '%s' does not have a mapped_table specified." - % self) - - def _set_with_polymorphic(self, with_polymorphic): - if with_polymorphic == '*': - self.with_polymorphic = ('*', None) - elif isinstance(with_polymorphic, (tuple, list)): - if isinstance( - with_polymorphic[0], util.string_types + (tuple, list)): - self.with_polymorphic = with_polymorphic - else: - self.with_polymorphic = (with_polymorphic, None) - elif with_polymorphic is not None: - raise sa_exc.ArgumentError("Invalid setting for with_polymorphic") - else: - self.with_polymorphic = None - - if isinstance(self.local_table, expression.SelectBase): - raise sa_exc.InvalidRequestError( - "When mapping against a select() construct, map against " - "an alias() of the construct instead." - "This because several databases don't allow a " - "SELECT from a subquery that does not have an alias." - ) - - if self.with_polymorphic and \ - isinstance(self.with_polymorphic[1], - expression.SelectBase): - self.with_polymorphic = (self.with_polymorphic[0], - self.with_polymorphic[1].alias()) - if self.configured: - self._expire_memoizations() - - def _set_concrete_base(self, mapper): - """Set the given :class:`.Mapper` as the 'inherits' for this - :class:`.Mapper`, assuming this :class:`.Mapper` is concrete - and does not already have an inherits.""" - - assert self.concrete - assert not self.inherits - assert isinstance(mapper, Mapper) - self.inherits = mapper - self.inherits.polymorphic_map.update(self.polymorphic_map) - self.polymorphic_map = self.inherits.polymorphic_map - for mapper in self.iterate_to_root(): - if mapper.polymorphic_on is not None: - mapper._requires_row_aliasing = True - self.batch = self.inherits.batch - for mp in self.self_and_descendants: - mp.base_mapper = self.inherits.base_mapper - self.inherits._inheriting_mappers.append(self) - self.passive_updates = self.inherits.passive_updates - self._all_tables = self.inherits._all_tables - for key, prop in mapper._props.items(): - if key not in self._props and \ - not self._should_exclude(key, key, local=False, - column=None): - self._adapt_inherited_property(key, prop, False) - - def _set_polymorphic_on(self, polymorphic_on): - self.polymorphic_on = polymorphic_on - self._configure_polymorphic_setter(True) - - def _configure_legacy_instrument_class(self): - - if self.inherits: - self.dispatch._update(self.inherits.dispatch) - super_extensions = set( - chain(*[m._deprecated_extensions - for m in self.inherits.iterate_to_root()])) - else: - super_extensions = set() - - for ext in self._deprecated_extensions: - if ext not in super_extensions: - ext._adapt_instrument_class(self, ext) - - def _configure_listeners(self): - if self.inherits: - super_extensions = set( - chain(*[m._deprecated_extensions - for m in self.inherits.iterate_to_root()])) - else: - super_extensions = set() - - for ext in self._deprecated_extensions: - if ext not in super_extensions: - ext._adapt_listener(self, ext) - - def _configure_class_instrumentation(self): - """If this mapper is to be a primary mapper (i.e. the - non_primary flag is not set), associate this Mapper with the - given class_ and entity name. - - Subsequent calls to ``class_mapper()`` for the class_/entity - name combination will return this mapper. Also decorate the - `__init__` method on the mapped class to include optional - auto-session attachment logic. - - """ - - manager = attributes.manager_of_class(self.class_) - - if self.non_primary: - if not manager or not manager.is_mapped: - raise sa_exc.InvalidRequestError( - "Class %s has no primary mapper configured. Configure " - "a primary mapper first before setting up a non primary " - "Mapper." % self.class_) - self.class_manager = manager - self._identity_class = manager.mapper._identity_class - _mapper_registry[self] = True - return - - if manager is not None: - assert manager.class_ is self.class_ - if manager.is_mapped: - raise sa_exc.ArgumentError( - "Class '%s' already has a primary mapper defined. " - "Use non_primary=True to " - "create a non primary Mapper. clear_mappers() will " - "remove *all* current mappers from all classes." % - self.class_) - # else: - # a ClassManager may already exist as - # ClassManager.instrument_attribute() creates - # new managers for each subclass if they don't yet exist. - - _mapper_registry[self] = True - - # note: this *must be called before instrumentation.register_class* - # to maintain the documented behavior of instrument_class - self.dispatch.instrument_class(self, self.class_) - - if manager is None: - manager = instrumentation.register_class(self.class_) - - self.class_manager = manager - - manager.mapper = self - manager.deferred_scalar_loader = util.partial( - loading.load_scalar_attributes, self) - - # The remaining members can be added by any mapper, - # e_name None or not. - if manager.info.get(_INSTRUMENTOR, False): - return - - event.listen(manager, 'first_init', _event_on_first_init, raw=True) - event.listen(manager, 'init', _event_on_init, raw=True) - - for key, method in util.iterate_attributes(self.class_): - if isinstance(method, types.FunctionType): - if hasattr(method, '__sa_reconstructor__'): - self._reconstructor = method - event.listen(manager, 'load', _event_on_load, raw=True) - elif hasattr(method, '__sa_validators__'): - validation_opts = method.__sa_validation_opts__ - for name in method.__sa_validators__: - self.validators = self.validators.union( - {name: (method, validation_opts)} - ) - - manager.info[_INSTRUMENTOR] = self - - @classmethod - def _configure_all(cls): - """Class-level path to the :func:`.configure_mappers` call. - """ - configure_mappers() - - def dispose(self): - # Disable any attribute-based compilation. - self.configured = True - - if hasattr(self, '_configure_failed'): - del self._configure_failed - - if not self.non_primary and \ - self.class_manager is not None and \ - self.class_manager.is_mapped and \ - self.class_manager.mapper is self: - instrumentation.unregister_class(self.class_) - - def _configure_pks(self): - - self.tables = sql_util.find_tables(self.mapped_table) - - self._pks_by_table = {} - self._cols_by_table = {} - - all_cols = util.column_set(chain(*[ - col.proxy_set for col in - self._columntoproperty])) - - pk_cols = util.column_set(c for c in all_cols if c.primary_key) - - # identify primary key columns which are also mapped by this mapper. - tables = set(self.tables + [self.mapped_table]) - self._all_tables.update(tables) - for t in tables: - if t.primary_key and pk_cols.issuperset(t.primary_key): - # ordering is important since it determines the ordering of - # mapper.primary_key (and therefore query.get()) - self._pks_by_table[t] = \ - util.ordered_column_set(t.primary_key).\ - intersection(pk_cols) - self._cols_by_table[t] = \ - util.ordered_column_set(t.c).\ - intersection(all_cols) - - # if explicit PK argument sent, add those columns to the - # primary key mappings - if self._primary_key_argument: - for k in self._primary_key_argument: - if k.table not in self._pks_by_table: - self._pks_by_table[k.table] = util.OrderedSet() - self._pks_by_table[k.table].add(k) - - # otherwise, see that we got a full PK for the mapped table - elif self.mapped_table not in self._pks_by_table or \ - len(self._pks_by_table[self.mapped_table]) == 0: - raise sa_exc.ArgumentError( - "Mapper %s could not assemble any primary " - "key columns for mapped table '%s'" % - (self, self.mapped_table.description)) - elif self.local_table not in self._pks_by_table and \ - isinstance(self.local_table, schema.Table): - util.warn("Could not assemble any primary " - "keys for locally mapped table '%s' - " - "no rows will be persisted in this Table." - % self.local_table.description) - - if self.inherits and \ - not self.concrete and \ - not self._primary_key_argument: - # if inheriting, the "primary key" for this mapper is - # that of the inheriting (unless concrete or explicit) - self.primary_key = self.inherits.primary_key - else: - # determine primary key from argument or mapped_table pks - - # reduce to the minimal set of columns - if self._primary_key_argument: - primary_key = sql_util.reduce_columns( - [self.mapped_table.corresponding_column(c) for c in - self._primary_key_argument], - ignore_nonexistent_tables=True) - else: - primary_key = sql_util.reduce_columns( - self._pks_by_table[self.mapped_table], - ignore_nonexistent_tables=True) - - if len(primary_key) == 0: - raise sa_exc.ArgumentError( - "Mapper %s could not assemble any primary " - "key columns for mapped table '%s'" % - (self, self.mapped_table.description)) - - self.primary_key = tuple(primary_key) - self._log("Identified primary key columns: %s", primary_key) - - # determine cols that aren't expressed within our tables; mark these - # as "read only" properties which are refreshed upon INSERT/UPDATE - self._readonly_props = set( - self._columntoproperty[col] - for col in self._columntoproperty - if self._columntoproperty[col] not in self._identity_key_props and - (not hasattr(col, 'table') or - col.table not in self._cols_by_table)) - - def _configure_properties(self): - - # Column and other ClauseElement objects which are mapped - self.columns = self.c = util.OrderedProperties() - - # object attribute names mapped to MapperProperty objects - self._props = util.OrderedDict() - - # table columns mapped to lists of MapperProperty objects - # using a list allows a single column to be defined as - # populating multiple object attributes - self._columntoproperty = _ColumnMapping(self) - - # load custom properties - if self._init_properties: - for key, prop in self._init_properties.items(): - self._configure_property(key, prop, False) - - # pull properties from the inherited mapper if any. - if self.inherits: - for key, prop in self.inherits._props.items(): - if key not in self._props and \ - not self._should_exclude(key, key, local=False, - column=None): - self._adapt_inherited_property(key, prop, False) - - # create properties for each column in the mapped table, - # for those columns which don't already map to a property - for column in self.mapped_table.columns: - if column in self._columntoproperty: - continue - - column_key = (self.column_prefix or '') + column.key - - if self._should_exclude( - column.key, column_key, - local=self.local_table.c.contains_column(column), - column=column - ): - continue - - # adjust the "key" used for this column to that - # of the inheriting mapper - for mapper in self.iterate_to_root(): - if column in mapper._columntoproperty: - column_key = mapper._columntoproperty[column].key - - self._configure_property(column_key, - column, - init=False, - setparent=True) - - def _configure_polymorphic_setter(self, init=False): - """Configure an attribute on the mapper representing the - 'polymorphic_on' column, if applicable, and not - already generated by _configure_properties (which is typical). - - Also create a setter function which will assign this - attribute to the value of the 'polymorphic_identity' - upon instance construction, also if applicable. This - routine will run when an instance is created. - - """ - setter = False - - if self.polymorphic_on is not None: - setter = True - - if isinstance(self.polymorphic_on, util.string_types): - # polymorphic_on specified as a string - link - # it to mapped ColumnProperty - try: - self.polymorphic_on = self._props[self.polymorphic_on] - except KeyError: - raise sa_exc.ArgumentError( - "Can't determine polymorphic_on " - "value '%s' - no attribute is " - "mapped to this name." % self.polymorphic_on) - - if self.polymorphic_on in self._columntoproperty: - # polymorphic_on is a column that is already mapped - # to a ColumnProperty - prop = self._columntoproperty[self.polymorphic_on] - polymorphic_key = prop.key - self.polymorphic_on = prop.columns[0] - polymorphic_key = prop.key - elif isinstance(self.polymorphic_on, MapperProperty): - # polymorphic_on is directly a MapperProperty, - # ensure it's a ColumnProperty - if not isinstance(self.polymorphic_on, - properties.ColumnProperty): - raise sa_exc.ArgumentError( - "Only direct column-mapped " - "property or SQL expression " - "can be passed for polymorphic_on") - prop = self.polymorphic_on - self.polymorphic_on = prop.columns[0] - polymorphic_key = prop.key - elif not expression._is_column(self.polymorphic_on): - # polymorphic_on is not a Column and not a ColumnProperty; - # not supported right now. - raise sa_exc.ArgumentError( - "Only direct column-mapped " - "property or SQL expression " - "can be passed for polymorphic_on" - ) - else: - # polymorphic_on is a Column or SQL expression and - # doesn't appear to be mapped. this means it can be 1. - # only present in the with_polymorphic selectable or - # 2. a totally standalone SQL expression which we'd - # hope is compatible with this mapper's mapped_table - col = self.mapped_table.corresponding_column( - self.polymorphic_on) - if col is None: - # polymorphic_on doesn't derive from any - # column/expression isn't present in the mapped - # table. we will make a "hidden" ColumnProperty - # for it. Just check that if it's directly a - # schema.Column and we have with_polymorphic, it's - # likely a user error if the schema.Column isn't - # represented somehow in either mapped_table or - # with_polymorphic. Otherwise as of 0.7.4 we - # just go with it and assume the user wants it - # that way (i.e. a CASE statement) - setter = False - instrument = False - col = self.polymorphic_on - if isinstance(col, schema.Column) and ( - self.with_polymorphic is None or - self.with_polymorphic[1]. - corresponding_column(col) is None): - raise sa_exc.InvalidRequestError( - "Could not map polymorphic_on column " - "'%s' to the mapped table - polymorphic " - "loads will not function properly" - % col.description) - else: - # column/expression that polymorphic_on derives from - # is present in our mapped table - # and is probably mapped, but polymorphic_on itself - # is not. This happens when - # the polymorphic_on is only directly present in the - # with_polymorphic selectable, as when use - # polymorphic_union. - # we'll make a separate ColumnProperty for it. - instrument = True - key = getattr(col, 'key', None) - if key: - if self._should_exclude(col.key, col.key, False, col): - raise sa_exc.InvalidRequestError( - "Cannot exclude or override the " - "discriminator column %r" % - col.key) - else: - self.polymorphic_on = col = \ - col.label("_sa_polymorphic_on") - key = col.key - - self._configure_property( - key, - properties.ColumnProperty(col, - _instrument=instrument), - init=init, setparent=True) - polymorphic_key = key - else: - # no polymorphic_on was set. - # check inheriting mappers for one. - for mapper in self.iterate_to_root(): - # determine if polymorphic_on of the parent - # should be propagated here. If the col - # is present in our mapped table, or if our mapped - # table is the same as the parent (i.e. single table - # inheritance), we can use it - if mapper.polymorphic_on is not None: - if self.mapped_table is mapper.mapped_table: - self.polymorphic_on = mapper.polymorphic_on - else: - self.polymorphic_on = \ - self.mapped_table.corresponding_column( - mapper.polymorphic_on) - # we can use the parent mapper's _set_polymorphic_identity - # directly; it ensures the polymorphic_identity of the - # instance's mapper is used so is portable to subclasses. - if self.polymorphic_on is not None: - self._set_polymorphic_identity = \ - mapper._set_polymorphic_identity - self._validate_polymorphic_identity = \ - mapper._validate_polymorphic_identity - else: - self._set_polymorphic_identity = None - return - - if setter: - def _set_polymorphic_identity(state): - dict_ = state.dict - state.get_impl(polymorphic_key).set( - state, dict_, - state.manager.mapper.polymorphic_identity, - None) - - def _validate_polymorphic_identity(mapper, state, dict_): - if polymorphic_key in dict_ and \ - dict_[polymorphic_key] not in \ - mapper._acceptable_polymorphic_identities: - util.warn_limited( - "Flushing object %s with " - "incompatible polymorphic identity %r; the " - "object may not refresh and/or load correctly", - (state_str(state), dict_[polymorphic_key]) - ) - - self._set_polymorphic_identity = _set_polymorphic_identity - self._validate_polymorphic_identity = \ - _validate_polymorphic_identity - else: - self._set_polymorphic_identity = None - - _validate_polymorphic_identity = None - - @_memoized_configured_property - def _version_id_prop(self): - if self.version_id_col is not None: - return self._columntoproperty[self.version_id_col] - else: - return None - - @_memoized_configured_property - def _acceptable_polymorphic_identities(self): - identities = set() - - stack = deque([self]) - while stack: - item = stack.popleft() - if item.mapped_table is self.mapped_table: - identities.add(item.polymorphic_identity) - stack.extend(item._inheriting_mappers) - - return identities - - @_memoized_configured_property - def _prop_set(self): - return frozenset(self._props.values()) - - def _adapt_inherited_property(self, key, prop, init): - if not self.concrete: - self._configure_property(key, prop, init=False, setparent=False) - elif key not in self._props: - self._configure_property( - key, - properties.ConcreteInheritedProperty(), - init=init, setparent=True) - - def _configure_property(self, key, prop, init=True, setparent=True): - self._log("_configure_property(%s, %s)", key, prop.__class__.__name__) - - if not isinstance(prop, MapperProperty): - prop = self._property_from_column(key, prop) - - if isinstance(prop, properties.ColumnProperty): - col = self.mapped_table.corresponding_column(prop.columns[0]) - - # if the column is not present in the mapped table, - # test if a column has been added after the fact to the - # parent table (or their parent, etc.) [ticket:1570] - if col is None and self.inherits: - path = [self] - for m in self.inherits.iterate_to_root(): - col = m.local_table.corresponding_column(prop.columns[0]) - if col is not None: - for m2 in path: - m2.mapped_table._reset_exported() - col = self.mapped_table.corresponding_column( - prop.columns[0]) - break - path.append(m) - - # subquery expression, column not present in the mapped - # selectable. - if col is None: - col = prop.columns[0] - - # column is coming in after _readonly_props was - # initialized; check for 'readonly' - if hasattr(self, '_readonly_props') and \ - (not hasattr(col, 'table') or - col.table not in self._cols_by_table): - self._readonly_props.add(prop) - - else: - # if column is coming in after _cols_by_table was - # initialized, ensure the col is in the right set - if hasattr(self, '_cols_by_table') and \ - col.table in self._cols_by_table and \ - col not in self._cols_by_table[col.table]: - self._cols_by_table[col.table].add(col) - - # if this properties.ColumnProperty represents the "polymorphic - # discriminator" column, mark it. We'll need this when rendering - # columns in SELECT statements. - if not hasattr(prop, '_is_polymorphic_discriminator'): - prop._is_polymorphic_discriminator = \ - (col is self.polymorphic_on or - prop.columns[0] is self.polymorphic_on) - - self.columns[key] = col - for col in prop.columns + prop._orig_columns: - for col in col.proxy_set: - self._columntoproperty[col] = prop - - prop.key = key - - if setparent: - prop.set_parent(self, init) - - if key in self._props and \ - getattr(self._props[key], '_mapped_by_synonym', False): - syn = self._props[key]._mapped_by_synonym - raise sa_exc.ArgumentError( - "Can't call map_column=True for synonym %r=%r, " - "a ColumnProperty already exists keyed to the name " - "%r for column %r" % (syn, key, key, syn) - ) - - if key in self._props and \ - not isinstance(prop, properties.ColumnProperty) and \ - not isinstance(self._props[key], properties.ColumnProperty): - util.warn("Property %s on %s being replaced with new " - "property %s; the old property will be discarded" % ( - self._props[key], - self, - prop, - )) - oldprop = self._props[key] - self._path_registry.pop(oldprop, None) - - self._props[key] = prop - - if not self.non_primary: - prop.instrument_class(self) - - for mapper in self._inheriting_mappers: - mapper._adapt_inherited_property(key, prop, init) - - if init: - prop.init() - prop.post_instrument_class(self) - - if self.configured: - self._expire_memoizations() - - def _property_from_column(self, key, prop): - """generate/update a :class:`.ColumnProprerty` given a - :class:`.Column` object. """ - - # we were passed a Column or a list of Columns; - # generate a properties.ColumnProperty - columns = util.to_list(prop) - column = columns[0] - if not expression._is_column(column): - raise sa_exc.ArgumentError( - "%s=%r is not an instance of MapperProperty or Column" - % (key, prop)) - - prop = self._props.get(key, None) - - if isinstance(prop, properties.ColumnProperty): - if ( - not self._inherits_equated_pairs or - (prop.columns[0], column) not in self._inherits_equated_pairs - ) and \ - not prop.columns[0].shares_lineage(column) and \ - prop.columns[0] is not self.version_id_col and \ - column is not self.version_id_col: - warn_only = prop.parent is not self - msg = ("Implicitly combining column %s with column " - "%s under attribute '%s'. Please configure one " - "or more attributes for these same-named columns " - "explicitly." % (prop.columns[-1], column, key)) - if warn_only: - util.warn(msg) - else: - raise sa_exc.InvalidRequestError(msg) - - # existing properties.ColumnProperty from an inheriting - # mapper. make a copy and append our column to it - prop = prop.copy() - prop.columns.insert(0, column) - self._log("inserting column to existing list " - "in properties.ColumnProperty %s" % (key)) - return prop - elif prop is None or isinstance(prop, - properties.ConcreteInheritedProperty): - mapped_column = [] - for c in columns: - mc = self.mapped_table.corresponding_column(c) - if mc is None: - mc = self.local_table.corresponding_column(c) - if mc is not None: - # if the column is in the local table but not the - # mapped table, this corresponds to adding a - # column after the fact to the local table. - # [ticket:1523] - self.mapped_table._reset_exported() - mc = self.mapped_table.corresponding_column(c) - if mc is None: - raise sa_exc.ArgumentError( - "When configuring property '%s' on %s, " - "column '%s' is not represented in the mapper's " - "table. Use the `column_property()` function to " - "force this column to be mapped as a read-only " - "attribute." % (key, self, c)) - mapped_column.append(mc) - return properties.ColumnProperty(*mapped_column) - else: - raise sa_exc.ArgumentError( - "WARNING: when configuring property '%s' on %s, " - "column '%s' conflicts with property '%r'. " - "To resolve this, map the column to the class under a " - "different name in the 'properties' dictionary. Or, " - "to remove all awareness of the column entirely " - "(including its availability as a foreign key), " - "use the 'include_properties' or 'exclude_properties' " - "mapper arguments to control specifically which table " - "columns get mapped." % - (key, self, column.key, prop)) - - def _post_configure_properties(self): - """Call the ``init()`` method on all ``MapperProperties`` - attached to this mapper. - - This is a deferred configuration step which is intended - to execute once all mappers have been constructed. - - """ - - self._log("_post_configure_properties() started") - l = [(key, prop) for key, prop in self._props.items()] - for key, prop in l: - self._log("initialize prop %s", key) - - if prop.parent is self and not prop._configure_started: - prop.init() - - if prop._configure_finished: - prop.post_instrument_class(self) - - self._log("_post_configure_properties() complete") - self.configured = True - - def add_properties(self, dict_of_properties): - """Add the given dictionary of properties to this mapper, - using `add_property`. - - """ - for key, value in dict_of_properties.items(): - self.add_property(key, value) - - def add_property(self, key, prop): - """Add an individual MapperProperty to this mapper. - - If the mapper has not been configured yet, just adds the - property to the initial properties dictionary sent to the - constructor. If this Mapper has already been configured, then - the given MapperProperty is configured immediately. - - """ - self._init_properties[key] = prop - self._configure_property(key, prop, init=self.configured) - - def _expire_memoizations(self): - for mapper in self.iterate_to_root(): - _memoized_configured_property.expire_instance(mapper) - - @property - def _log_desc(self): - return "(" + self.class_.__name__ + \ - "|" + \ - (self.local_table is not None and - self.local_table.description or - str(self.local_table)) +\ - (self.non_primary and - "|non-primary" or "") + ")" - - def _log(self, msg, *args): - self.logger.info( - "%s " + msg, *((self._log_desc,) + args) - ) - - def _log_debug(self, msg, *args): - self.logger.debug( - "%s " + msg, *((self._log_desc,) + args) - ) - - def __repr__(self): - return '' % ( - id(self), self.class_.__name__) - - def __str__(self): - return "Mapper|%s|%s%s" % ( - self.class_.__name__, - self.local_table is not None and - self.local_table.description or None, - self.non_primary and "|non-primary" or "" - ) - - def _is_orphan(self, state): - orphan_possible = False - for mapper in self.iterate_to_root(): - for (key, cls) in mapper._delete_orphans: - orphan_possible = True - - has_parent = attributes.manager_of_class(cls).has_parent( - state, key, optimistic=state.has_identity) - - if self.legacy_is_orphan and has_parent: - return False - elif not self.legacy_is_orphan and not has_parent: - return True - - if self.legacy_is_orphan: - return orphan_possible - else: - return False - - def has_property(self, key): - return key in self._props - - def get_property(self, key, _configure_mappers=True): - """return a MapperProperty associated with the given key. - """ - - if _configure_mappers and Mapper._new_mappers: - configure_mappers() - - try: - return self._props[key] - except KeyError: - raise sa_exc.InvalidRequestError( - "Mapper '%s' has no property '%s'" % (self, key)) - - def get_property_by_column(self, column): - """Given a :class:`.Column` object, return the - :class:`.MapperProperty` which maps this column.""" - - return self._columntoproperty[column] - - @property - def iterate_properties(self): - """return an iterator of all MapperProperty objects.""" - if Mapper._new_mappers: - configure_mappers() - return iter(self._props.values()) - - def _mappers_from_spec(self, spec, selectable): - """given a with_polymorphic() argument, return the set of mappers it - represents. - - Trims the list of mappers to just those represented within the given - selectable, if present. This helps some more legacy-ish mappings. - - """ - if spec == '*': - mappers = list(self.self_and_descendants) - elif spec: - mappers = set() - for m in util.to_list(spec): - m = _class_to_mapper(m) - if not m.isa(self): - raise sa_exc.InvalidRequestError( - "%r does not inherit from %r" % - (m, self)) - - if selectable is None: - mappers.update(m.iterate_to_root()) - else: - mappers.add(m) - mappers = [m for m in self.self_and_descendants if m in mappers] - else: - mappers = [] - - if selectable is not None: - tables = set(sql_util.find_tables(selectable, - include_aliases=True)) - mappers = [m for m in mappers if m.local_table in tables] - return mappers - - def _selectable_from_mappers(self, mappers, innerjoin): - """given a list of mappers (assumed to be within this mapper's - inheritance hierarchy), construct an outerjoin amongst those mapper's - mapped tables. - - """ - from_obj = self.mapped_table - for m in mappers: - if m is self: - continue - if m.concrete: - raise sa_exc.InvalidRequestError( - "'with_polymorphic()' requires 'selectable' argument " - "when concrete-inheriting mappers are used.") - elif not m.single: - if innerjoin: - from_obj = from_obj.join(m.local_table, - m.inherit_condition) - else: - from_obj = from_obj.outerjoin(m.local_table, - m.inherit_condition) - - return from_obj - - @_memoized_configured_property - def _single_table_criterion(self): - if self.single and \ - self.inherits and \ - self.polymorphic_on is not None: - return self.polymorphic_on.in_( - m.polymorphic_identity - for m in self.self_and_descendants) - else: - return None - - @_memoized_configured_property - def _with_polymorphic_mappers(self): - if Mapper._new_mappers: - configure_mappers() - if not self.with_polymorphic: - return [] - return self._mappers_from_spec(*self.with_polymorphic) - - @_memoized_configured_property - def _with_polymorphic_selectable(self): - if not self.with_polymorphic: - return self.mapped_table - - spec, selectable = self.with_polymorphic - if selectable is not None: - return selectable - else: - return self._selectable_from_mappers( - self._mappers_from_spec(spec, selectable), - False) - - with_polymorphic_mappers = _with_polymorphic_mappers - """The list of :class:`.Mapper` objects included in the - default "polymorphic" query. - - """ - - @_memoized_configured_property - def _insert_cols_as_none(self): - return dict( - ( - table, - frozenset( - col.key for col in columns - if not col.primary_key and - not col.server_default and not col.default) - ) - for table, columns in self._cols_by_table.items() - ) - - @_memoized_configured_property - def _propkey_to_col(self): - return dict( - ( - table, - dict( - (self._columntoproperty[col].key, col) - for col in columns - ) - ) - for table, columns in self._cols_by_table.items() - ) - - @_memoized_configured_property - def _pk_keys_by_table(self): - return dict( - ( - table, - frozenset([col.key for col in pks]) - ) - for table, pks in self._pks_by_table.items() - ) - - @_memoized_configured_property - def _server_default_cols(self): - return dict( - ( - table, - frozenset([ - col for col in columns - if col.server_default is not None]) - ) - for table, columns in self._cols_by_table.items() - ) - - @property - def selectable(self): - """The :func:`.select` construct this :class:`.Mapper` selects from - by default. - - Normally, this is equivalent to :attr:`.mapped_table`, unless - the ``with_polymorphic`` feature is in use, in which case the - full "polymorphic" selectable is returned. - - """ - return self._with_polymorphic_selectable - - def _with_polymorphic_args(self, spec=None, selectable=False, - innerjoin=False): - if self.with_polymorphic: - if not spec: - spec = self.with_polymorphic[0] - if selectable is False: - selectable = self.with_polymorphic[1] - elif selectable is False: - selectable = None - mappers = self._mappers_from_spec(spec, selectable) - if selectable is not None: - return mappers, selectable - else: - return mappers, self._selectable_from_mappers(mappers, - innerjoin) - - @_memoized_configured_property - def _polymorphic_properties(self): - return list(self._iterate_polymorphic_properties( - self._with_polymorphic_mappers)) - - def _iterate_polymorphic_properties(self, mappers=None): - """Return an iterator of MapperProperty objects which will render into - a SELECT.""" - if mappers is None: - mappers = self._with_polymorphic_mappers - - if not mappers: - for c in self.iterate_properties: - yield c - else: - # in the polymorphic case, filter out discriminator columns - # from other mappers, as these are sometimes dependent on that - # mapper's polymorphic selectable (which we don't want rendered) - for c in util.unique_list( - chain(*[ - list(mapper.iterate_properties) for mapper in - [self] + mappers - ]) - ): - if getattr(c, '_is_polymorphic_discriminator', False) and \ - (self.polymorphic_on is None or - c.columns[0] is not self.polymorphic_on): - continue - yield c - - @util.memoized_property - def attrs(self): - """A namespace of all :class:`.MapperProperty` objects - associated this mapper. - - This is an object that provides each property based on - its key name. For instance, the mapper for a - ``User`` class which has ``User.name`` attribute would - provide ``mapper.attrs.name``, which would be the - :class:`.ColumnProperty` representing the ``name`` - column. The namespace object can also be iterated, - which would yield each :class:`.MapperProperty`. - - :class:`.Mapper` has several pre-filtered views - of this attribute which limit the types of properties - returned, inclding :attr:`.synonyms`, :attr:`.column_attrs`, - :attr:`.relationships`, and :attr:`.composites`. - - .. warning:: - - the :attr:`.Mapper.relationships` accessor namespace is an - instance of :class:`.OrderedProperties`. This is - a dictionary-like object which includes a small number of - named methods such as :meth:`.OrderedProperties.items` - and :meth:`.OrderedProperties.values`. When - accessing attributes dynamically, favor using the dict-access - scheme, e.g. ``mapper.attrs[somename]`` over - ``getattr(mapper.attrs, somename)`` to avoid name collisions. - - .. seealso:: - - :attr:`.Mapper.all_orm_descriptors` - - """ - if Mapper._new_mappers: - configure_mappers() - return util.ImmutableProperties(self._props) - - @util.memoized_property - def all_orm_descriptors(self): - """A namespace of all :class:`.InspectionAttr` attributes associated - with the mapped class. - - These attributes are in all cases Python :term:`descriptors` - associated with the mapped class or its superclasses. - - This namespace includes attributes that are mapped to the class - as well as attributes declared by extension modules. - It includes any Python descriptor type that inherits from - :class:`.InspectionAttr`. This includes - :class:`.QueryableAttribute`, as well as extension types such as - :class:`.hybrid_property`, :class:`.hybrid_method` and - :class:`.AssociationProxy`. - - To distinguish between mapped attributes and extension attributes, - the attribute :attr:`.InspectionAttr.extension_type` will refer - to a constant that distinguishes between different extension types. - - When dealing with a :class:`.QueryableAttribute`, the - :attr:`.QueryableAttribute.property` attribute refers to the - :class:`.MapperProperty` property, which is what you get when - referring to the collection of mapped properties via - :attr:`.Mapper.attrs`. - - .. warning:: - - the :attr:`.Mapper.relationships` accessor namespace is an - instance of :class:`.OrderedProperties`. This is - a dictionary-like object which includes a small number of - named methods such as :meth:`.OrderedProperties.items` - and :meth:`.OrderedProperties.values`. When - accessing attributes dynamically, favor using the dict-access - scheme, e.g. ``mapper.attrs[somename]`` over - ``getattr(mapper.attrs, somename)`` to avoid name collisions. - - .. versionadded:: 0.8.0 - - .. seealso:: - - :attr:`.Mapper.attrs` - - """ - return util.ImmutableProperties( - dict(self.class_manager._all_sqla_attributes())) - - @_memoized_configured_property - def synonyms(self): - """Return a namespace of all :class:`.SynonymProperty` - properties maintained by this :class:`.Mapper`. - - .. seealso:: - - :attr:`.Mapper.attrs` - namespace of all :class:`.MapperProperty` - objects. - - """ - return self._filter_properties(properties.SynonymProperty) - - @_memoized_configured_property - def column_attrs(self): - """Return a namespace of all :class:`.ColumnProperty` - properties maintained by this :class:`.Mapper`. - - .. seealso:: - - :attr:`.Mapper.attrs` - namespace of all :class:`.MapperProperty` - objects. - - """ - return self._filter_properties(properties.ColumnProperty) - - @_memoized_configured_property - def relationships(self): - """Return a namespace of all :class:`.RelationshipProperty` - properties maintained by this :class:`.Mapper`. - - .. warning:: - - the :attr:`.Mapper.relationships` accessor namespace is an - instance of :class:`.OrderedProperties`. This is - a dictionary-like object which includes a small number of - named methods such as :meth:`.OrderedProperties.items` - and :meth:`.OrderedProperties.values`. When - accessing attributes dynamically, favor using the dict-access - scheme, e.g. ``mapper.attrs[somename]`` over - ``getattr(mapper.attrs, somename)`` to avoid name collisions. - - .. seealso:: - - :attr:`.Mapper.attrs` - namespace of all :class:`.MapperProperty` - objects. - - """ - return self._filter_properties(properties.RelationshipProperty) - - @_memoized_configured_property - def composites(self): - """Return a namespace of all :class:`.CompositeProperty` - properties maintained by this :class:`.Mapper`. - - .. seealso:: - - :attr:`.Mapper.attrs` - namespace of all :class:`.MapperProperty` - objects. - - """ - return self._filter_properties(properties.CompositeProperty) - - def _filter_properties(self, type_): - if Mapper._new_mappers: - configure_mappers() - return util.ImmutableProperties(util.OrderedDict( - (k, v) for k, v in self._props.items() - if isinstance(v, type_) - )) - - @_memoized_configured_property - def _get_clause(self): - """create a "get clause" based on the primary key. this is used - by query.get() and many-to-one lazyloads to load this item - by primary key. - - """ - params = [(primary_key, sql.bindparam(None, type_=primary_key.type)) - for primary_key in self.primary_key] - return sql.and_(*[k == v for (k, v) in params]), \ - util.column_dict(params) - - @_memoized_configured_property - def _equivalent_columns(self): - """Create a map of all *equivalent* columns, based on - the determination of column pairs that are equated to - one another based on inherit condition. This is designed - to work with the queries that util.polymorphic_union - comes up with, which often don't include the columns from - the base table directly (including the subclass table columns - only). - - The resulting structure is a dictionary of columns mapped - to lists of equivalent columns, i.e. - - { - tablea.col1: - set([tableb.col1, tablec.col1]), - tablea.col2: - set([tabled.col2]) - } - - """ - result = util.column_dict() - - def visit_binary(binary): - if binary.operator == operators.eq: - if binary.left in result: - result[binary.left].add(binary.right) - else: - result[binary.left] = util.column_set((binary.right,)) - if binary.right in result: - result[binary.right].add(binary.left) - else: - result[binary.right] = util.column_set((binary.left,)) - for mapper in self.base_mapper.self_and_descendants: - if mapper.inherit_condition is not None: - visitors.traverse( - mapper.inherit_condition, {}, - {'binary': visit_binary}) - - return result - - def _is_userland_descriptor(self, obj): - if isinstance(obj, (_MappedAttribute, - instrumentation.ClassManager, - expression.ColumnElement)): - return False - else: - return True - - def _should_exclude(self, name, assigned_name, local, column): - """determine whether a particular property should be implicitly - present on the class. - - This occurs when properties are propagated from an inherited class, or - are applied from the columns present in the mapped table. - - """ - - # check for class-bound attributes and/or descriptors, - # either local or from an inherited class - if local: - if self.class_.__dict__.get(assigned_name, None) is not None \ - and self._is_userland_descriptor( - self.class_.__dict__[assigned_name]): - return True - else: - if getattr(self.class_, assigned_name, None) is not None \ - and self._is_userland_descriptor( - getattr(self.class_, assigned_name)): - return True - - if self.include_properties is not None and \ - name not in self.include_properties and \ - (column is None or column not in self.include_properties): - self._log("not including property %s" % (name)) - return True - - if self.exclude_properties is not None and \ - ( - name in self.exclude_properties or - (column is not None and column in self.exclude_properties) - ): - self._log("excluding property %s" % (name)) - return True - - return False - - def common_parent(self, other): - """Return true if the given mapper shares a - common inherited parent as this mapper.""" - - return self.base_mapper is other.base_mapper - - def _canload(self, state, allow_subtypes): - s = self.primary_mapper() - if self.polymorphic_on is not None or allow_subtypes: - return _state_mapper(state).isa(s) - else: - return _state_mapper(state) is s - - def isa(self, other): - """Return True if the this mapper inherits from the given mapper.""" - - m = self - while m and m is not other: - m = m.inherits - return bool(m) - - def iterate_to_root(self): - m = self - while m: - yield m - m = m.inherits - - @_memoized_configured_property - def self_and_descendants(self): - """The collection including this mapper and all descendant mappers. - - This includes not just the immediately inheriting mappers but - all their inheriting mappers as well. - - """ - descendants = [] - stack = deque([self]) - while stack: - item = stack.popleft() - descendants.append(item) - stack.extend(item._inheriting_mappers) - return util.WeakSequence(descendants) - - def polymorphic_iterator(self): - """Iterate through the collection including this mapper and - all descendant mappers. - - This includes not just the immediately inheriting mappers but - all their inheriting mappers as well. - - To iterate through an entire hierarchy, use - ``mapper.base_mapper.polymorphic_iterator()``. - - """ - return iter(self.self_and_descendants) - - def primary_mapper(self): - """Return the primary mapper corresponding to this mapper's class key - (class).""" - - return self.class_manager.mapper - - @property - def primary_base_mapper(self): - return self.class_manager.mapper.base_mapper - - def _result_has_identity_key(self, result, adapter=None): - pk_cols = self.primary_key - if adapter: - pk_cols = [adapter.columns[c] for c in pk_cols] - for col in pk_cols: - if not result._has_key(col): - return False - else: - return True - - def identity_key_from_row(self, row, adapter=None): - """Return an identity-map key for use in storing/retrieving an - item from the identity map. - - :param row: A :class:`.RowProxy` instance. The columns which are - mapped by this :class:`.Mapper` should be locatable in the row, - preferably via the :class:`.Column` object directly (as is the case - when a :func:`.select` construct is executed), or via string names of - the form ``_``. - - """ - pk_cols = self.primary_key - if adapter: - pk_cols = [adapter.columns[c] for c in pk_cols] - - return self._identity_class, \ - tuple(row[column] for column in pk_cols) - - def identity_key_from_primary_key(self, primary_key): - """Return an identity-map key for use in storing/retrieving an - item from an identity map. - - :param primary_key: A list of values indicating the identifier. - - """ - return self._identity_class, tuple(primary_key) - - def identity_key_from_instance(self, instance): - """Return the identity key for the given instance, based on - its primary key attributes. - - If the instance's state is expired, calling this method - will result in a database check to see if the object has been deleted. - If the row no longer exists, - :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised. - - This value is typically also found on the instance state under the - attribute name `key`. - - """ - return self.identity_key_from_primary_key( - self.primary_key_from_instance(instance)) - - def _identity_key_from_state(self, state): - dict_ = state.dict - manager = state.manager - return self._identity_class, tuple([ - manager[self._columntoproperty[col].key]. - impl.get(state, dict_, attributes.PASSIVE_RETURN_NEVER_SET) - for col in self.primary_key - ]) - - def primary_key_from_instance(self, instance): - """Return the list of primary key values for the given - instance. - - If the instance's state is expired, calling this method - will result in a database check to see if the object has been deleted. - If the row no longer exists, - :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised. - - """ - state = attributes.instance_state(instance) - return self._primary_key_from_state(state, attributes.PASSIVE_OFF) - - def _primary_key_from_state( - self, state, passive=attributes.PASSIVE_RETURN_NEVER_SET): - dict_ = state.dict - manager = state.manager - return [ - manager[prop.key]. - impl.get(state, dict_, passive) - for prop in self._identity_key_props - ] - - @_memoized_configured_property - def _identity_key_props(self): - return [self._columntoproperty[col] for col in self.primary_key] - - @_memoized_configured_property - def _all_pk_props(self): - collection = set() - for table in self.tables: - collection.update(self._pks_by_table[table]) - return collection - - @_memoized_configured_property - def _should_undefer_in_wildcard(self): - cols = set(self.primary_key) - if self.polymorphic_on is not None: - cols.add(self.polymorphic_on) - return cols - - @_memoized_configured_property - def _primary_key_propkeys(self): - return set([prop.key for prop in self._all_pk_props]) - - def _get_state_attr_by_column( - self, state, dict_, column, - passive=attributes.PASSIVE_RETURN_NEVER_SET): - prop = self._columntoproperty[column] - return state.manager[prop.key].impl.get(state, dict_, passive=passive) - - def _set_committed_state_attr_by_column(self, state, dict_, column, value): - prop = self._columntoproperty[column] - state.manager[prop.key].impl.set_committed_value(state, dict_, value) - - def _set_state_attr_by_column(self, state, dict_, column, value): - prop = self._columntoproperty[column] - state.manager[prop.key].impl.set(state, dict_, value, None) - - def _get_committed_attr_by_column(self, obj, column): - state = attributes.instance_state(obj) - dict_ = attributes.instance_dict(obj) - return self._get_committed_state_attr_by_column( - state, dict_, column, passive=attributes.PASSIVE_OFF) - - def _get_committed_state_attr_by_column( - self, state, dict_, column, - passive=attributes.PASSIVE_RETURN_NEVER_SET): - - prop = self._columntoproperty[column] - return state.manager[prop.key].impl.\ - get_committed_value(state, dict_, passive=passive) - - def _optimized_get_statement(self, state, attribute_names): - """assemble a WHERE clause which retrieves a given state by primary - key, using a minimized set of tables. - - Applies to a joined-table inheritance mapper where the - requested attribute names are only present on joined tables, - not the base table. The WHERE clause attempts to include - only those tables to minimize joins. - - """ - props = self._props - - tables = set(chain( - *[sql_util.find_tables(c, check_columns=True) - for key in attribute_names - for c in props[key].columns] - )) - - if self.base_mapper.local_table in tables: - return None - - class ColumnsNotAvailable(Exception): - pass - - def visit_binary(binary): - leftcol = binary.left - rightcol = binary.right - if leftcol is None or rightcol is None: - return - - if leftcol.table not in tables: - leftval = self._get_committed_state_attr_by_column( - state, state.dict, - leftcol, - passive=attributes.PASSIVE_NO_INITIALIZE) - if leftval in orm_util._none_set: - raise ColumnsNotAvailable() - binary.left = sql.bindparam(None, leftval, - type_=binary.right.type) - elif rightcol.table not in tables: - rightval = self._get_committed_state_attr_by_column( - state, state.dict, - rightcol, - passive=attributes.PASSIVE_NO_INITIALIZE) - if rightval in orm_util._none_set: - raise ColumnsNotAvailable() - binary.right = sql.bindparam(None, rightval, - type_=binary.right.type) - - allconds = [] - - try: - start = False - for mapper in reversed(list(self.iterate_to_root())): - if mapper.local_table in tables: - start = True - elif not isinstance(mapper.local_table, - expression.TableClause): - return None - if start and not mapper.single: - allconds.append(visitors.cloned_traverse( - mapper.inherit_condition, - {}, - {'binary': visit_binary} - ) - ) - except ColumnsNotAvailable: - return None - - cond = sql.and_(*allconds) - - cols = [] - for key in attribute_names: - cols.extend(props[key].columns) - return sql.select(cols, cond, use_labels=True) - - def cascade_iterator(self, type_, state, halt_on=None): - """Iterate each element and its mapper in an object graph, - for all relationships that meet the given cascade rule. - - :param type_: - The name of the cascade rule (i.e. ``"save-update"``, ``"delete"``, - etc.). - - .. note:: the ``"all"`` cascade is not accepted here. For a generic - object traversal function, see :ref:`faq_walk_objects`. - - :param state: - The lead InstanceState. child items will be processed per - the relationships defined for this object's mapper. - - :return: the method yields individual object instances. - - .. seealso:: - - :ref:`unitofwork_cascades` - - :ref:`faq_walk_objects` - illustrates a generic function to - traverse all objects without relying on cascades. - - """ - visited_states = set() - prp, mpp = object(), object() - - visitables = deque([(deque(self._props.values()), prp, - state, state.dict)]) - - while visitables: - iterator, item_type, parent_state, parent_dict = visitables[-1] - if not iterator: - visitables.pop() - continue - - if item_type is prp: - prop = iterator.popleft() - if type_ not in prop.cascade: - continue - queue = deque(prop.cascade_iterator( - type_, parent_state, parent_dict, - visited_states, halt_on)) - if queue: - visitables.append((queue, mpp, None, None)) - elif item_type is mpp: - instance, instance_mapper, corresponding_state, \ - corresponding_dict = iterator.popleft() - yield instance, instance_mapper, \ - corresponding_state, corresponding_dict - visitables.append((deque(instance_mapper._props.values()), - prp, corresponding_state, - corresponding_dict)) - - @_memoized_configured_property - def _compiled_cache(self): - return util.LRUCache(self._compiled_cache_size) - - @_memoized_configured_property - def _sorted_tables(self): - table_to_mapper = {} - - for mapper in self.base_mapper.self_and_descendants: - for t in mapper.tables: - table_to_mapper.setdefault(t, mapper) - - extra_dependencies = [] - for table, mapper in table_to_mapper.items(): - super_ = mapper.inherits - if super_: - extra_dependencies.extend([ - (super_table, table) - for super_table in super_.tables - ]) - - def skip(fk): - # attempt to skip dependencies that are not - # significant to the inheritance chain - # for two tables that are related by inheritance. - # while that dependency may be important, it's technically - # not what we mean to sort on here. - parent = table_to_mapper.get(fk.parent.table) - dep = table_to_mapper.get(fk.column.table) - if parent is not None and \ - dep is not None and \ - dep is not parent and \ - dep.inherit_condition is not None: - cols = set(sql_util._find_columns(dep.inherit_condition)) - if parent.inherit_condition is not None: - cols = cols.union(sql_util._find_columns( - parent.inherit_condition)) - return fk.parent not in cols and fk.column not in cols - else: - return fk.parent not in cols - return False - - sorted_ = sql_util.sort_tables(table_to_mapper, - skip_fn=skip, - extra_dependencies=extra_dependencies) - - ret = util.OrderedDict() - for t in sorted_: - ret[t] = table_to_mapper[t] - return ret - - def _memo(self, key, callable_): - if key in self._memoized_values: - return self._memoized_values[key] - else: - self._memoized_values[key] = value = callable_() - return value - - @util.memoized_property - def _table_to_equated(self): - """memoized map of tables to collections of columns to be - synchronized upwards to the base mapper.""" - - result = util.defaultdict(list) - - for table in self._sorted_tables: - cols = set(table.c) - for m in self.iterate_to_root(): - if m._inherits_equated_pairs and \ - cols.intersection( - util.reduce(set.union, - [l.proxy_set for l, r in - m._inherits_equated_pairs]) - ): - result[table].append((m, m._inherits_equated_pairs)) - - return result - - -def configure_mappers(): - """Initialize the inter-mapper relationships of all mappers that - have been constructed thus far. - - This function can be called any number of times, but in - most cases is invoked automatically, the first time mappings are used, - as well as whenever mappings are used and additional not-yet-configured - mappers have been constructed. - - Points at which this occur include when a mapped class is instantiated - into an instance, as well as when the :meth:`.Session.query` method - is used. - - The :func:`.configure_mappers` function provides several event hooks - that can be used to augment its functionality. These methods include: - - * :meth:`.MapperEvents.before_configured` - called once before - :func:`.configure_mappers` does any work; this can be used to establish - additional options, properties, or related mappings before the operation - proceeds. - - * :meth:`.MapperEvents.mapper_configured` - called as each indivudal - :class:`.Mapper` is configured within the process; will include all - mapper state except for backrefs set up by other mappers that are still - to be configured. - - * :meth:`.MapperEvents.after_configured` - called once after - :func:`.configure_mappers` is complete; at this stage, all - :class:`.Mapper` objects that are known to SQLAlchemy will be fully - configured. Note that the calling application may still have other - mappings that haven't been produced yet, such as if they are in modules - as yet unimported. - - """ - - if not Mapper._new_mappers: - return - - _CONFIGURE_MUTEX.acquire() - try: - global _already_compiling - if _already_compiling: - return - _already_compiling = True - try: - - # double-check inside mutex - if not Mapper._new_mappers: - return - - Mapper.dispatch._for_class(Mapper).before_configured() - # initialize properties on all mappers - # note that _mapper_registry is unordered, which - # may randomly conceal/reveal issues related to - # the order of mapper compilation - - for mapper in list(_mapper_registry): - if getattr(mapper, '_configure_failed', False): - e = sa_exc.InvalidRequestError( - "One or more mappers failed to initialize - " - "can't proceed with initialization of other " - "mappers. Original exception was: %s" - % mapper._configure_failed) - e._configure_failed = mapper._configure_failed - raise e - if not mapper.configured: - try: - mapper._post_configure_properties() - mapper._expire_memoizations() - mapper.dispatch.mapper_configured( - mapper, mapper.class_) - except Exception: - exc = sys.exc_info()[1] - if not hasattr(exc, '_configure_failed'): - mapper._configure_failed = exc - raise - - Mapper._new_mappers = False - finally: - _already_compiling = False - finally: - _CONFIGURE_MUTEX.release() - Mapper.dispatch._for_class(Mapper).after_configured() - - -def reconstructor(fn): - """Decorate a method as the 'reconstructor' hook. - - Designates a method as the "reconstructor", an ``__init__``-like - method that will be called by the ORM after the instance has been - loaded from the database or otherwise reconstituted. - - The reconstructor will be invoked with no arguments. Scalar - (non-collection) database-mapped attributes of the instance will - be available for use within the function. Eagerly-loaded - collections are generally not yet available and will usually only - contain the first element. ORM state changes made to objects at - this stage will not be recorded for the next flush() operation, so - the activity within a reconstructor should be conservative. - - """ - fn.__sa_reconstructor__ = True - return fn - - -def validates(*names, **kw): - """Decorate a method as a 'validator' for one or more named properties. - - Designates a method as a validator, a method which receives the - name of the attribute as well as a value to be assigned, or in the - case of a collection, the value to be added to the collection. - The function can then raise validation exceptions to halt the - process from continuing (where Python's built-in ``ValueError`` - and ``AssertionError`` exceptions are reasonable choices), or can - modify or replace the value before proceeding. The function should - otherwise return the given value. - - Note that a validator for a collection **cannot** issue a load of that - collection within the validation routine - this usage raises - an assertion to avoid recursion overflows. This is a reentrant - condition which is not supported. - - :param \*names: list of attribute names to be validated. - :param include_removes: if True, "remove" events will be - sent as well - the validation function must accept an additional - argument "is_remove" which will be a boolean. - - .. versionadded:: 0.7.7 - :param include_backrefs: defaults to ``True``; if ``False``, the - validation function will not emit if the originator is an attribute - event related via a backref. This can be used for bi-directional - :func:`.validates` usage where only one validator should emit per - attribute operation. - - .. versionadded:: 0.9.0 - - .. seealso:: - - :ref:`simple_validators` - usage examples for :func:`.validates` - - """ - include_removes = kw.pop('include_removes', False) - include_backrefs = kw.pop('include_backrefs', True) - - def wrap(fn): - fn.__sa_validators__ = names - fn.__sa_validation_opts__ = { - "include_removes": include_removes, - "include_backrefs": include_backrefs - } - return fn - return wrap - - -def _event_on_load(state, ctx): - instrumenting_mapper = state.manager.info[_INSTRUMENTOR] - if instrumenting_mapper._reconstructor: - instrumenting_mapper._reconstructor(state.obj()) - - -def _event_on_first_init(manager, cls): - """Initial mapper compilation trigger. - - instrumentation calls this one when InstanceState - is first generated, and is needed for legacy mutable - attributes to work. - """ - - instrumenting_mapper = manager.info.get(_INSTRUMENTOR) - if instrumenting_mapper: - if Mapper._new_mappers: - configure_mappers() - - -def _event_on_init(state, args, kwargs): - """Run init_instance hooks. - - This also includes mapper compilation, normally not needed - here but helps with some piecemeal configuration - scenarios (such as in the ORM tutorial). - - """ - - instrumenting_mapper = state.manager.info.get(_INSTRUMENTOR) - if instrumenting_mapper: - if Mapper._new_mappers: - configure_mappers() - if instrumenting_mapper._set_polymorphic_identity: - instrumenting_mapper._set_polymorphic_identity(state) - - -class _ColumnMapping(dict): - """Error reporting helper for mapper._columntoproperty.""" - - __slots__ = 'mapper', - - def __init__(self, mapper): - self.mapper = mapper - - def __missing__(self, column): - prop = self.mapper._props.get(column) - if prop: - raise orm_exc.UnmappedColumnError( - "Column '%s.%s' is not available, due to " - "conflicting property '%s':%r" % ( - column.table.name, column.name, column.key, prop)) - raise orm_exc.UnmappedColumnError( - "No column %s is configured on mapper %s..." % - (column, self.mapper)) diff --git a/python/sqlalchemy/orm/path_registry.py b/python/sqlalchemy/orm/path_registry.py deleted file mode 100644 index 9670a07f..00000000 --- a/python/sqlalchemy/orm/path_registry.py +++ /dev/null @@ -1,291 +0,0 @@ -# orm/path_registry.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php -"""Path tracking utilities, representing mapper graph traversals. - -""" - -from .. import inspection -from .. import util -from .. import exc -from itertools import chain -from .base import class_mapper -import logging - -log = logging.getLogger(__name__) - - -def _unreduce_path(path): - return PathRegistry.deserialize(path) - - -_WILDCARD_TOKEN = "*" -_DEFAULT_TOKEN = "_sa_default" - - -class PathRegistry(object): - """Represent query load paths and registry functions. - - Basically represents structures like: - - (, "orders", , "items", ) - - These structures are generated by things like - query options (joinedload(), subqueryload(), etc.) and are - used to compose keys stored in the query._attributes dictionary - for various options. - - They are then re-composed at query compile/result row time as - the query is formed and as rows are fetched, where they again - serve to compose keys to look up options in the context.attributes - dictionary, which is copied from query._attributes. - - The path structure has a limited amount of caching, where each - "root" ultimately pulls from a fixed registry associated with - the first mapper, that also contains elements for each of its - property keys. However paths longer than two elements, which - are the exception rather than the rule, are generated on an - as-needed basis. - - """ - - is_token = False - is_root = False - - def __eq__(self, other): - return other is not None and \ - self.path == other.path - - def set(self, attributes, key, value): - log.debug("set '%s' on path '%s' to '%s'", key, self, value) - attributes[(key, self.path)] = value - - def setdefault(self, attributes, key, value): - log.debug("setdefault '%s' on path '%s' to '%s'", key, self, value) - attributes.setdefault((key, self.path), value) - - def get(self, attributes, key, value=None): - key = (key, self.path) - if key in attributes: - return attributes[key] - else: - return value - - def __len__(self): - return len(self.path) - - @property - def length(self): - return len(self.path) - - def pairs(self): - path = self.path - for i in range(0, len(path), 2): - yield path[i], path[i + 1] - - def contains_mapper(self, mapper): - for path_mapper in [ - self.path[i] for i in range(0, len(self.path), 2) - ]: - if path_mapper.is_mapper and \ - path_mapper.isa(mapper): - return True - else: - return False - - def contains(self, attributes, key): - return (key, self.path) in attributes - - def __reduce__(self): - return _unreduce_path, (self.serialize(), ) - - def serialize(self): - path = self.path - return list(zip( - [m.class_ for m in [path[i] for i in range(0, len(path), 2)]], - [path[i].key for i in range(1, len(path), 2)] + [None] - )) - - @classmethod - def deserialize(cls, path): - if path is None: - return None - - p = tuple(chain(*[(class_mapper(mcls), - class_mapper(mcls).attrs[key] - if key is not None else None) - for mcls, key in path])) - if p and p[-1] is None: - p = p[0:-1] - return cls.coerce(p) - - @classmethod - def per_mapper(cls, mapper): - return EntityRegistry( - cls.root, mapper - ) - - @classmethod - def coerce(cls, raw): - return util.reduce(lambda prev, next: prev[next], raw, cls.root) - - def token(self, token): - if token.endswith(':' + _WILDCARD_TOKEN): - return TokenRegistry(self, token) - elif token.endswith(":" + _DEFAULT_TOKEN): - return TokenRegistry(self.root, token) - else: - raise exc.ArgumentError("invalid token: %s" % token) - - def __add__(self, other): - return util.reduce( - lambda prev, next: prev[next], - other.path, self) - - def __repr__(self): - return "%s(%r)" % (self.__class__.__name__, self.path, ) - - -class RootRegistry(PathRegistry): - """Root registry, defers to mappers so that - paths are maintained per-root-mapper. - - """ - path = () - has_entity = False - is_aliased_class = False - is_root = True - - def __getitem__(self, entity): - return entity._path_registry - -PathRegistry.root = RootRegistry() - - -class TokenRegistry(PathRegistry): - def __init__(self, parent, token): - self.token = token - self.parent = parent - self.path = parent.path + (token,) - - has_entity = False - - is_token = True - - def generate_for_superclasses(self): - if not self.parent.is_aliased_class and not self.parent.is_root: - for ent in self.parent.mapper.iterate_to_root(): - yield TokenRegistry(self.parent.parent[ent], self.token) - else: - yield self - - def __getitem__(self, entity): - raise NotImplementedError() - - -class PropRegistry(PathRegistry): - def __init__(self, parent, prop): - # restate this path in terms of the - # given MapperProperty's parent. - insp = inspection.inspect(parent[-1]) - if not insp.is_aliased_class or insp._use_mapper_path: - parent = parent.parent[prop.parent] - elif insp.is_aliased_class and insp.with_polymorphic_mappers: - if prop.parent is not insp.mapper and \ - prop.parent in insp.with_polymorphic_mappers: - subclass_entity = parent[-1]._entity_for_mapper(prop.parent) - parent = parent.parent[subclass_entity] - - self.prop = prop - self.parent = parent - self.path = parent.path + (prop,) - - def __str__(self): - return " -> ".join( - str(elem) for elem in self.path - ) - - @util.memoized_property - def has_entity(self): - return hasattr(self.prop, "mapper") - - @util.memoized_property - def entity(self): - return self.prop.mapper - - @util.memoized_property - def _wildcard_path_loader_key(self): - """Given a path (mapper A, prop X), replace the prop with the wildcard, - e.g. (mapper A, 'relationship:.*') or (mapper A, 'column:.*'), then - return within the ("loader", path) structure. - - """ - return ("loader", - self.parent.token( - "%s:%s" % ( - self.prop.strategy_wildcard_key, _WILDCARD_TOKEN) - ).path - ) - - @util.memoized_property - def _default_path_loader_key(self): - return ("loader", - self.parent.token( - "%s:%s" % (self.prop.strategy_wildcard_key, - _DEFAULT_TOKEN) - ).path - ) - - @util.memoized_property - def _loader_key(self): - return ("loader", self.path) - - @property - def mapper(self): - return self.entity - - @property - def entity_path(self): - return self[self.entity] - - def __getitem__(self, entity): - if isinstance(entity, (int, slice)): - return self.path[entity] - else: - return EntityRegistry( - self, entity - ) - - -class EntityRegistry(PathRegistry, dict): - is_aliased_class = False - has_entity = True - - def __init__(self, parent, entity): - self.key = entity - self.parent = parent - self.is_aliased_class = entity.is_aliased_class - self.entity = entity - self.path = parent.path + (entity,) - self.entity_path = self - - @property - def mapper(self): - return inspection.inspect(self.entity).mapper - - def __bool__(self): - return True - __nonzero__ = __bool__ - - def __getitem__(self, entity): - if isinstance(entity, (int, slice)): - return self.path[entity] - else: - return dict.__getitem__(self, entity) - - def __missing__(self, key): - self[key] = item = PropRegistry(self, key) - return item diff --git a/python/sqlalchemy/orm/persistence.py b/python/sqlalchemy/orm/persistence.py deleted file mode 100644 index 00a6cc5a..00000000 --- a/python/sqlalchemy/orm/persistence.py +++ /dev/null @@ -1,1385 +0,0 @@ -# orm/persistence.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""private module containing functions used to emit INSERT, UPDATE -and DELETE statements on behalf of a :class:`.Mapper` and its descending -mappers. - -The functions here are called only by the unit of work functions -in unitofwork.py. - -""" - -import operator -from itertools import groupby, chain -from .. import sql, util, exc as sa_exc -from . import attributes, sync, exc as orm_exc, evaluator -from .base import state_str, _attr_as_key, _entity_descriptor -from ..sql import expression -from ..sql.base import _from_objects -from . import loading - - -def _bulk_insert( - mapper, mappings, session_transaction, isstates, return_defaults): - base_mapper = mapper.base_mapper - - cached_connections = _cached_connection_dict(base_mapper) - - if session_transaction.session.connection_callable: - raise NotImplementedError( - "connection_callable / per-instance sharding " - "not supported in bulk_insert()") - - if isstates: - if return_defaults: - states = [(state, state.dict) for state in mappings] - mappings = [dict_ for (state, dict_) in states] - else: - mappings = [state.dict for state in mappings] - else: - mappings = list(mappings) - - connection = session_transaction.connection(base_mapper) - for table, super_mapper in base_mapper._sorted_tables.items(): - if not mapper.isa(super_mapper): - continue - - records = ( - (None, state_dict, params, mapper, - connection, value_params, has_all_pks, has_all_defaults) - for - state, state_dict, params, mp, - conn, value_params, has_all_pks, - has_all_defaults in _collect_insert_commands(table, ( - (None, mapping, mapper, connection) - for mapping in mappings), - bulk=True, return_defaults=return_defaults - ) - ) - _emit_insert_statements(base_mapper, None, - cached_connections, - super_mapper, table, records, - bookkeeping=return_defaults) - - if return_defaults and isstates: - identity_cls = mapper._identity_class - identity_props = [p.key for p in mapper._identity_key_props] - for state, dict_ in states: - state.key = ( - identity_cls, - tuple([dict_[key] for key in identity_props]) - ) - - -def _bulk_update(mapper, mappings, session_transaction, - isstates, update_changed_only): - base_mapper = mapper.base_mapper - - cached_connections = _cached_connection_dict(base_mapper) - - def _changed_dict(mapper, state): - return dict( - (k, v) - for k, v in state.dict.items() if k in state.committed_state or k - in mapper._primary_key_propkeys - ) - - if isstates: - if update_changed_only: - mappings = [_changed_dict(mapper, state) for state in mappings] - else: - mappings = [state.dict for state in mappings] - else: - mappings = list(mappings) - - if session_transaction.session.connection_callable: - raise NotImplementedError( - "connection_callable / per-instance sharding " - "not supported in bulk_update()") - - connection = session_transaction.connection(base_mapper) - - for table, super_mapper in base_mapper._sorted_tables.items(): - if not mapper.isa(super_mapper): - continue - - records = _collect_update_commands(None, table, ( - (None, mapping, mapper, connection, - (mapping[mapper._version_id_prop.key] - if mapper._version_id_prop else None)) - for mapping in mappings - ), bulk=True) - - _emit_update_statements(base_mapper, None, - cached_connections, - super_mapper, table, records, - bookkeeping=False) - - -def save_obj( - base_mapper, states, uowtransaction, single=False): - """Issue ``INSERT`` and/or ``UPDATE`` statements for a list - of objects. - - This is called within the context of a UOWTransaction during a - flush operation, given a list of states to be flushed. The - base mapper in an inheritance hierarchy handles the inserts/ - updates for all descendant mappers. - - """ - - # if batch=false, call _save_obj separately for each object - if not single and not base_mapper.batch: - for state in _sort_states(states): - save_obj(base_mapper, [state], uowtransaction, single=True) - return - - states_to_update = [] - states_to_insert = [] - cached_connections = _cached_connection_dict(base_mapper) - - for (state, dict_, mapper, connection, - has_identity, - row_switch, update_version_id) in _organize_states_for_save( - base_mapper, states, uowtransaction - ): - if has_identity or row_switch: - states_to_update.append( - (state, dict_, mapper, connection, update_version_id) - ) - else: - states_to_insert.append( - (state, dict_, mapper, connection) - ) - - for table, mapper in base_mapper._sorted_tables.items(): - if table not in mapper._pks_by_table: - continue - insert = _collect_insert_commands(table, states_to_insert) - - update = _collect_update_commands( - uowtransaction, table, states_to_update) - - _emit_update_statements(base_mapper, uowtransaction, - cached_connections, - mapper, table, update) - - _emit_insert_statements(base_mapper, uowtransaction, - cached_connections, - mapper, table, insert) - - _finalize_insert_update_commands( - base_mapper, uowtransaction, - chain( - ( - (state, state_dict, mapper, connection, False) - for state, state_dict, mapper, connection in states_to_insert - ), - ( - (state, state_dict, mapper, connection, True) - for state, state_dict, mapper, connection, - update_version_id in states_to_update - ) - ) - ) - - -def post_update(base_mapper, states, uowtransaction, post_update_cols): - """Issue UPDATE statements on behalf of a relationship() which - specifies post_update. - - """ - cached_connections = _cached_connection_dict(base_mapper) - - states_to_update = list(_organize_states_for_post_update( - base_mapper, - states, uowtransaction)) - - for table, mapper in base_mapper._sorted_tables.items(): - if table not in mapper._pks_by_table: - continue - - update = ( - (state, state_dict, sub_mapper, connection) - for - state, state_dict, sub_mapper, connection in states_to_update - if table in sub_mapper._pks_by_table - ) - - update = _collect_post_update_commands(base_mapper, uowtransaction, - table, update, - post_update_cols) - - _emit_post_update_statements(base_mapper, uowtransaction, - cached_connections, - mapper, table, update) - - -def delete_obj(base_mapper, states, uowtransaction): - """Issue ``DELETE`` statements for a list of objects. - - This is called within the context of a UOWTransaction during a - flush operation. - - """ - - cached_connections = _cached_connection_dict(base_mapper) - - states_to_delete = list(_organize_states_for_delete( - base_mapper, - states, - uowtransaction)) - - table_to_mapper = base_mapper._sorted_tables - - for table in reversed(list(table_to_mapper.keys())): - mapper = table_to_mapper[table] - if table not in mapper._pks_by_table: - continue - - delete = _collect_delete_commands(base_mapper, uowtransaction, - table, states_to_delete) - - _emit_delete_statements(base_mapper, uowtransaction, - cached_connections, mapper, table, delete) - - for state, state_dict, mapper, connection, \ - update_version_id in states_to_delete: - mapper.dispatch.after_delete(mapper, connection, state) - - -def _organize_states_for_save(base_mapper, states, uowtransaction): - """Make an initial pass across a set of states for INSERT or - UPDATE. - - This includes splitting out into distinct lists for - each, calling before_insert/before_update, obtaining - key information for each state including its dictionary, - mapper, the connection to use for the execution per state, - and the identity flag. - - """ - - for state, dict_, mapper, connection in _connections_for_states( - base_mapper, uowtransaction, - states): - - has_identity = bool(state.key) - - instance_key = state.key or mapper._identity_key_from_state(state) - - row_switch = update_version_id = None - - # call before_XXX extensions - if not has_identity: - mapper.dispatch.before_insert(mapper, connection, state) - else: - mapper.dispatch.before_update(mapper, connection, state) - - if mapper._validate_polymorphic_identity: - mapper._validate_polymorphic_identity(mapper, state, dict_) - - # detect if we have a "pending" instance (i.e. has - # no instance_key attached to it), and another instance - # with the same identity key already exists as persistent. - # convert to an UPDATE if so. - if not has_identity and \ - instance_key in uowtransaction.session.identity_map: - instance = \ - uowtransaction.session.identity_map[instance_key] - existing = attributes.instance_state(instance) - if not uowtransaction.is_deleted(existing): - raise orm_exc.FlushError( - "New instance %s with identity key %s conflicts " - "with persistent instance %s" % - (state_str(state), instance_key, - state_str(existing))) - - base_mapper._log_debug( - "detected row switch for identity %s. " - "will update %s, remove %s from " - "transaction", instance_key, - state_str(state), state_str(existing)) - - # remove the "delete" flag from the existing element - uowtransaction.remove_state_actions(existing) - row_switch = existing - - if (has_identity or row_switch) and mapper.version_id_col is not None: - update_version_id = mapper._get_committed_state_attr_by_column( - row_switch if row_switch else state, - row_switch.dict if row_switch else dict_, - mapper.version_id_col) - - yield (state, dict_, mapper, connection, - has_identity, row_switch, update_version_id) - - -def _organize_states_for_post_update(base_mapper, states, - uowtransaction): - """Make an initial pass across a set of states for UPDATE - corresponding to post_update. - - This includes obtaining key information for each state - including its dictionary, mapper, the connection to use for - the execution per state. - - """ - return _connections_for_states(base_mapper, uowtransaction, states) - - -def _organize_states_for_delete(base_mapper, states, uowtransaction): - """Make an initial pass across a set of states for DELETE. - - This includes calling out before_delete and obtaining - key information for each state including its dictionary, - mapper, the connection to use for the execution per state. - - """ - for state, dict_, mapper, connection in _connections_for_states( - base_mapper, uowtransaction, - states): - - mapper.dispatch.before_delete(mapper, connection, state) - - if mapper.version_id_col is not None: - update_version_id = \ - mapper._get_committed_state_attr_by_column( - state, dict_, - mapper.version_id_col) - else: - update_version_id = None - - yield ( - state, dict_, mapper, connection, update_version_id) - - -def _collect_insert_commands( - table, states_to_insert, - bulk=False, return_defaults=False): - """Identify sets of values to use in INSERT statements for a - list of states. - - """ - for state, state_dict, mapper, connection in states_to_insert: - if table not in mapper._pks_by_table: - continue - - params = {} - value_params = {} - - propkey_to_col = mapper._propkey_to_col[table] - - for propkey in set(propkey_to_col).intersection(state_dict): - value = state_dict[propkey] - col = propkey_to_col[propkey] - if value is None: - continue - elif not bulk and isinstance(value, sql.ClauseElement): - value_params[col.key] = value - else: - params[col.key] = value - - if not bulk: - for colkey in mapper._insert_cols_as_none[table].\ - difference(params).difference(value_params): - params[colkey] = None - - if not bulk or return_defaults: - has_all_pks = mapper._pk_keys_by_table[table].issubset(params) - - if mapper.base_mapper.eager_defaults: - has_all_defaults = mapper._server_default_cols[table].\ - issubset(params) - else: - has_all_defaults = True - else: - has_all_defaults = has_all_pks = True - - if mapper.version_id_generator is not False \ - and mapper.version_id_col is not None and \ - mapper.version_id_col in mapper._cols_by_table[table]: - params[mapper.version_id_col.key] = \ - mapper.version_id_generator(None) - - yield ( - state, state_dict, params, mapper, - connection, value_params, has_all_pks, - has_all_defaults) - - -def _collect_update_commands( - uowtransaction, table, states_to_update, - bulk=False): - """Identify sets of values to use in UPDATE statements for a - list of states. - - This function works intricately with the history system - to determine exactly what values should be updated - as well as how the row should be matched within an UPDATE - statement. Includes some tricky scenarios where the primary - key of an object might have been changed. - - """ - - for state, state_dict, mapper, connection, \ - update_version_id in states_to_update: - - if table not in mapper._pks_by_table: - continue - - pks = mapper._pks_by_table[table] - - value_params = {} - - propkey_to_col = mapper._propkey_to_col[table] - - if bulk: - params = dict( - (propkey_to_col[propkey].key, state_dict[propkey]) - for propkey in - set(propkey_to_col).intersection(state_dict).difference( - mapper._pk_keys_by_table[table]) - ) - else: - params = {} - for propkey in set(propkey_to_col).intersection( - state.committed_state): - value = state_dict[propkey] - col = propkey_to_col[propkey] - - if isinstance(value, sql.ClauseElement): - value_params[col] = value - # guard against values that generate non-__nonzero__ - # objects for __eq__() - elif state.manager[propkey].impl.is_equal( - value, state.committed_state[propkey]) is not True: - params[col.key] = value - - if update_version_id is not None and \ - mapper.version_id_col in mapper._cols_by_table[table]: - - if not bulk and not (params or value_params): - # HACK: check for history in other tables, in case the - # history is only in a different table than the one - # where the version_id_col is. This logic was lost - # from 0.9 -> 1.0.0 and restored in 1.0.6. - for prop in mapper._columntoproperty.values(): - history = ( - state.manager[prop.key].impl.get_history( - state, state_dict, - attributes.PASSIVE_NO_INITIALIZE)) - if history.added: - break - else: - # no net change, break - continue - - col = mapper.version_id_col - params[col._label] = update_version_id - - if col.key not in params and \ - mapper.version_id_generator is not False: - val = mapper.version_id_generator(update_version_id) - params[col.key] = val - - elif not (params or value_params): - continue - - if bulk: - pk_params = dict( - (propkey_to_col[propkey]._label, state_dict.get(propkey)) - for propkey in - set(propkey_to_col). - intersection(mapper._pk_keys_by_table[table]) - ) - else: - pk_params = {} - for col in pks: - propkey = mapper._columntoproperty[col].key - - history = state.manager[propkey].impl.get_history( - state, state_dict, attributes.PASSIVE_OFF) - - if history.added: - if not history.deleted or \ - ("pk_cascaded", state, col) in \ - uowtransaction.attributes: - pk_params[col._label] = history.added[0] - params.pop(col.key, None) - else: - # else, use the old value to locate the row - pk_params[col._label] = history.deleted[0] - params[col.key] = history.added[0] - else: - pk_params[col._label] = history.unchanged[0] - if pk_params[col._label] is None: - raise orm_exc.FlushError( - "Can't update table %s using NULL for primary " - "key value on column %s" % (table, col)) - - if params or value_params: - params.update(pk_params) - yield ( - state, state_dict, params, mapper, - connection, value_params) - - -def _collect_post_update_commands(base_mapper, uowtransaction, table, - states_to_update, post_update_cols): - """Identify sets of values to use in UPDATE statements for a - list of states within a post_update operation. - - """ - - for state, state_dict, mapper, connection in states_to_update: - - # assert table in mapper._pks_by_table - - pks = mapper._pks_by_table[table] - params = {} - hasdata = False - - for col in mapper._cols_by_table[table]: - if col in pks: - params[col._label] = \ - mapper._get_state_attr_by_column( - state, - state_dict, col, passive=attributes.PASSIVE_OFF) - - elif col in post_update_cols: - prop = mapper._columntoproperty[col] - history = state.manager[prop.key].impl.get_history( - state, state_dict, - attributes.PASSIVE_NO_INITIALIZE) - if history.added: - value = history.added[0] - params[col.key] = value - hasdata = True - if hasdata: - yield params, connection - - -def _collect_delete_commands(base_mapper, uowtransaction, table, - states_to_delete): - """Identify values to use in DELETE statements for a list of - states to be deleted.""" - - for state, state_dict, mapper, connection, \ - update_version_id in states_to_delete: - - if table not in mapper._pks_by_table: - continue - - params = {} - for col in mapper._pks_by_table[table]: - params[col.key] = \ - value = \ - mapper._get_committed_state_attr_by_column( - state, state_dict, col) - if value is None: - raise orm_exc.FlushError( - "Can't delete from table %s " - "using NULL for primary " - "key value on column %s" % (table, col)) - - if update_version_id is not None and \ - mapper.version_id_col in mapper._cols_by_table[table]: - params[mapper.version_id_col.key] = update_version_id - yield params, connection - - -def _emit_update_statements(base_mapper, uowtransaction, - cached_connections, mapper, table, update, - bookkeeping=True): - """Emit UPDATE statements corresponding to value lists collected - by _collect_update_commands().""" - - needs_version_id = mapper.version_id_col is not None and \ - mapper.version_id_col in mapper._cols_by_table[table] - - def update_stmt(): - clause = sql.and_() - - for col in mapper._pks_by_table[table]: - clause.clauses.append(col == sql.bindparam(col._label, - type_=col.type)) - - if needs_version_id: - clause.clauses.append( - mapper.version_id_col == sql.bindparam( - mapper.version_id_col._label, - type_=mapper.version_id_col.type)) - - stmt = table.update(clause) - if mapper.base_mapper.eager_defaults: - stmt = stmt.return_defaults() - elif mapper.version_id_col is not None: - stmt = stmt.return_defaults(mapper.version_id_col) - - return stmt - - statement = base_mapper._memo(('update', table), update_stmt) - - for (connection, paramkeys, hasvalue), \ - records in groupby( - update, - lambda rec: ( - rec[4], # connection - set(rec[2]), # set of parameter keys - bool(rec[5]))): # whether or not we have "value" parameters - - rows = 0 - records = list(records) - - # TODO: would be super-nice to not have to determine this boolean - # inside the loop here, in the 99.9999% of the time there's only - # one connection in use - assert_singlerow = connection.dialect.supports_sane_rowcount - assert_multirow = assert_singlerow and \ - connection.dialect.supports_sane_multi_rowcount - allow_multirow = not needs_version_id - - if hasvalue: - for state, state_dict, params, mapper, \ - connection, value_params in records: - c = connection.execute( - statement.values(value_params), - params) - if bookkeeping: - _postfetch( - mapper, - uowtransaction, - table, - state, - state_dict, - c, - c.context.compiled_parameters[0], - value_params) - rows += c.rowcount - check_rowcount = True - else: - if not allow_multirow: - check_rowcount = assert_singlerow - for state, state_dict, params, mapper, \ - connection, value_params in records: - c = cached_connections[connection].\ - execute(statement, params) - - # TODO: why with bookkeeping=False? - _postfetch( - mapper, - uowtransaction, - table, - state, - state_dict, - c, - c.context.compiled_parameters[0], - value_params) - rows += c.rowcount - else: - multiparams = [rec[2] for rec in records] - - check_rowcount = assert_multirow or ( - assert_singlerow and - len(multiparams) == 1 - ) - - c = cached_connections[connection].\ - execute(statement, multiparams) - - rows += c.rowcount - - # TODO: why with bookkeeping=False? - for state, state_dict, params, mapper, \ - connection, value_params in records: - _postfetch( - mapper, - uowtransaction, - table, - state, - state_dict, - c, - c.context.compiled_parameters[0], - value_params) - - if check_rowcount: - if rows != len(records): - raise orm_exc.StaleDataError( - "UPDATE statement on table '%s' expected to " - "update %d row(s); %d were matched." % - (table.description, len(records), rows)) - - elif needs_version_id: - util.warn("Dialect %s does not support updated rowcount " - "- versioning cannot be verified." % - c.dialect.dialect_description) - - -def _emit_insert_statements(base_mapper, uowtransaction, - cached_connections, mapper, table, insert, - bookkeeping=True): - """Emit INSERT statements corresponding to value lists collected - by _collect_insert_commands().""" - - statement = base_mapper._memo(('insert', table), table.insert) - - for (connection, pkeys, hasvalue, has_all_pks, has_all_defaults), \ - records in groupby( - insert, - lambda rec: ( - rec[4], # connection - set(rec[2]), # parameter keys - bool(rec[5]), # whether we have "value" parameters - rec[6], - rec[7])): - if not bookkeeping or \ - ( - has_all_defaults - or not base_mapper.eager_defaults - or not connection.dialect.implicit_returning - ) and has_all_pks and not hasvalue: - - records = list(records) - multiparams = [rec[2] for rec in records] - - c = cached_connections[connection].\ - execute(statement, multiparams) - - if bookkeeping: - for (state, state_dict, params, mapper_rec, - conn, value_params, has_all_pks, has_all_defaults), \ - last_inserted_params in \ - zip(records, c.context.compiled_parameters): - _postfetch( - mapper_rec, - uowtransaction, - table, - state, - state_dict, - c, - last_inserted_params, - value_params) - - else: - if not has_all_defaults and base_mapper.eager_defaults: - statement = statement.return_defaults() - elif mapper.version_id_col is not None: - statement = statement.return_defaults(mapper.version_id_col) - - for state, state_dict, params, mapper_rec, \ - connection, value_params, \ - has_all_pks, has_all_defaults in records: - - if value_params: - result = connection.execute( - statement.values(value_params), - params) - else: - result = cached_connections[connection].\ - execute(statement, params) - - primary_key = result.context.inserted_primary_key - - if primary_key is not None: - # set primary key attributes - for pk, col in zip(primary_key, - mapper._pks_by_table[table]): - prop = mapper_rec._columntoproperty[col] - if state_dict.get(prop.key) is None: - state_dict[prop.key] = pk - _postfetch( - mapper_rec, - uowtransaction, - table, - state, - state_dict, - result, - result.context.compiled_parameters[0], - value_params) - - -def _emit_post_update_statements(base_mapper, uowtransaction, - cached_connections, mapper, table, update): - """Emit UPDATE statements corresponding to value lists collected - by _collect_post_update_commands().""" - - def update_stmt(): - clause = sql.and_() - - for col in mapper._pks_by_table[table]: - clause.clauses.append(col == sql.bindparam(col._label, - type_=col.type)) - - return table.update(clause) - - statement = base_mapper._memo(('post_update', table), update_stmt) - - # execute each UPDATE in the order according to the original - # list of states to guarantee row access order, but - # also group them into common (connection, cols) sets - # to support executemany(). - for key, grouper in groupby( - update, lambda rec: ( - rec[1], # connection - set(rec[0]) # parameter keys - ) - ): - connection = key[0] - multiparams = [params for params, conn in grouper] - cached_connections[connection].\ - execute(statement, multiparams) - - -def _emit_delete_statements(base_mapper, uowtransaction, cached_connections, - mapper, table, delete): - """Emit DELETE statements corresponding to value lists collected - by _collect_delete_commands().""" - - need_version_id = mapper.version_id_col is not None and \ - mapper.version_id_col in mapper._cols_by_table[table] - - def delete_stmt(): - clause = sql.and_() - for col in mapper._pks_by_table[table]: - clause.clauses.append( - col == sql.bindparam(col.key, type_=col.type)) - - if need_version_id: - clause.clauses.append( - mapper.version_id_col == - sql.bindparam( - mapper.version_id_col.key, - type_=mapper.version_id_col.type - ) - ) - - return table.delete(clause) - - statement = base_mapper._memo(('delete', table), delete_stmt) - for connection, recs in groupby( - delete, - lambda rec: rec[1] # connection - ): - del_objects = [params for params, connection in recs] - - connection = cached_connections[connection] - - expected = len(del_objects) - rows_matched = -1 - only_warn = False - if connection.dialect.supports_sane_multi_rowcount: - c = connection.execute(statement, del_objects) - - if not need_version_id: - only_warn = True - - rows_matched = c.rowcount - - elif need_version_id: - if connection.dialect.supports_sane_rowcount: - rows_matched = 0 - # execute deletes individually so that versioned - # rows can be verified - for params in del_objects: - c = connection.execute(statement, params) - rows_matched += c.rowcount - else: - util.warn( - "Dialect %s does not support deleted rowcount " - "- versioning cannot be verified." % - connection.dialect.dialect_description, - stacklevel=12) - connection.execute(statement, del_objects) - else: - connection.execute(statement, del_objects) - - if base_mapper.confirm_deleted_rows and \ - rows_matched > -1 and expected != rows_matched: - if only_warn: - util.warn( - "DELETE statement on table '%s' expected to " - "delete %d row(s); %d were matched. Please set " - "confirm_deleted_rows=False within the mapper " - "configuration to prevent this warning." % - (table.description, expected, rows_matched) - ) - else: - raise orm_exc.StaleDataError( - "DELETE statement on table '%s' expected to " - "delete %d row(s); %d were matched. Please set " - "confirm_deleted_rows=False within the mapper " - "configuration to prevent this warning." % - (table.description, expected, rows_matched) - ) - - -def _finalize_insert_update_commands(base_mapper, uowtransaction, states): - """finalize state on states that have been inserted or updated, - including calling after_insert/after_update events. - - """ - for state, state_dict, mapper, connection, has_identity in states: - - if mapper._readonly_props: - readonly = state.unmodified_intersection( - [p.key for p in mapper._readonly_props - if p.expire_on_flush or p.key not in state.dict] - ) - if readonly: - state._expire_attributes(state.dict, readonly) - - # if eager_defaults option is enabled, load - # all expired cols. Else if we have a version_id_col, make sure - # it isn't expired. - toload_now = [] - - if base_mapper.eager_defaults: - toload_now.extend(state._unloaded_non_object) - elif mapper.version_id_col is not None and \ - mapper.version_id_generator is False: - if mapper._version_id_prop.key in state.unloaded: - toload_now.extend([mapper._version_id_prop.key]) - - if toload_now: - state.key = base_mapper._identity_key_from_state(state) - loading.load_on_ident( - uowtransaction.session.query(base_mapper), - state.key, refresh_state=state, - only_load_props=toload_now) - - # call after_XXX extensions - if not has_identity: - mapper.dispatch.after_insert(mapper, connection, state) - else: - mapper.dispatch.after_update(mapper, connection, state) - - -def _postfetch(mapper, uowtransaction, table, - state, dict_, result, params, value_params, bulk=False): - """Expire attributes in need of newly persisted database state, - after an INSERT or UPDATE statement has proceeded for that - state.""" - - # TODO: bulk is never non-False, need to clean this up - - prefetch_cols = result.context.compiled.prefetch - postfetch_cols = result.context.compiled.postfetch - returning_cols = result.context.compiled.returning - - if mapper.version_id_col is not None and \ - mapper.version_id_col in mapper._cols_by_table[table]: - prefetch_cols = list(prefetch_cols) + [mapper.version_id_col] - - refresh_flush = bool(mapper.class_manager.dispatch.refresh_flush) - if refresh_flush: - load_evt_attrs = [] - - if returning_cols: - row = result.context.returned_defaults - if row is not None: - for col in returning_cols: - if col.primary_key: - continue - dict_[mapper._columntoproperty[col].key] = row[col] - if refresh_flush: - load_evt_attrs.append(mapper._columntoproperty[col].key) - - for c in prefetch_cols: - if c.key in params and c in mapper._columntoproperty: - dict_[mapper._columntoproperty[c].key] = params[c.key] - if refresh_flush: - load_evt_attrs.append(mapper._columntoproperty[c].key) - - if refresh_flush and load_evt_attrs: - mapper.class_manager.dispatch.refresh_flush( - state, uowtransaction, load_evt_attrs) - - if postfetch_cols and state: - state._expire_attributes(state.dict, - [mapper._columntoproperty[c].key - for c in postfetch_cols if c in - mapper._columntoproperty] - ) - - # synchronize newly inserted ids from one table to the next - # TODO: this still goes a little too often. would be nice to - # have definitive list of "columns that changed" here - for m, equated_pairs in mapper._table_to_equated[table]: - if state is None: - sync.bulk_populate_inherit_keys(dict_, m, equated_pairs) - else: - sync.populate(state, m, state, m, - equated_pairs, - uowtransaction, - mapper.passive_updates) - - -def _connections_for_states(base_mapper, uowtransaction, states): - """Return an iterator of (state, state.dict, mapper, connection). - - The states are sorted according to _sort_states, then paired - with the connection they should be using for the given - unit of work transaction. - - """ - # if session has a connection callable, - # organize individual states with the connection - # to use for update - if uowtransaction.session.connection_callable: - connection_callable = \ - uowtransaction.session.connection_callable - else: - connection = uowtransaction.transaction.connection(base_mapper) - connection_callable = None - - for state in _sort_states(states): - if connection_callable: - connection = connection_callable(base_mapper, state.obj()) - - mapper = state.manager.mapper - - yield state, state.dict, mapper, connection - - -def _cached_connection_dict(base_mapper): - # dictionary of connection->connection_with_cache_options. - return util.PopulateDict( - lambda conn: conn.execution_options( - compiled_cache=base_mapper._compiled_cache - )) - - -def _sort_states(states): - pending = set(states) - persistent = set(s for s in pending if s.key is not None) - pending.difference_update(persistent) - return sorted(pending, key=operator.attrgetter("insert_order")) + \ - sorted(persistent, key=lambda q: q.key[1]) - - -class BulkUD(object): - """Handle bulk update and deletes via a :class:`.Query`.""" - - def __init__(self, query): - self.query = query.enable_eagerloads(False) - self.mapper = self.query._bind_mapper() - self._validate_query_state() - - def _validate_query_state(self): - for attr, methname, notset, op in ( - ('_limit', 'limit()', None, operator.is_), - ('_offset', 'offset()', None, operator.is_), - ('_order_by', 'order_by()', False, operator.is_), - ('_group_by', 'group_by()', False, operator.is_), - ('_distinct', 'distinct()', False, operator.is_), - ( - '_from_obj', - 'join(), outerjoin(), select_from(), or from_self()', - (), operator.eq) - ): - if not op(getattr(self.query, attr), notset): - raise sa_exc.InvalidRequestError( - "Can't call Query.update() or Query.delete() " - "when %s has been called" % - (methname, ) - ) - - @property - def session(self): - return self.query.session - - @classmethod - def _factory(cls, lookup, synchronize_session, *arg): - try: - klass = lookup[synchronize_session] - except KeyError: - raise sa_exc.ArgumentError( - "Valid strategies for session synchronization " - "are %s" % (", ".join(sorted(repr(x) - for x in lookup)))) - else: - return klass(*arg) - - def exec_(self): - self._do_pre() - self._do_pre_synchronize() - self._do_exec() - self._do_post_synchronize() - self._do_post() - - @util.dependencies("sqlalchemy.orm.query") - def _do_pre(self, querylib): - query = self.query - self.context = querylib.QueryContext(query) - - if isinstance(query._entities[0], querylib._ColumnEntity): - # check for special case of query(table) - tables = set() - for ent in query._entities: - if not isinstance(ent, querylib._ColumnEntity): - tables.clear() - break - else: - tables.update(_from_objects(ent.column)) - - if len(tables) != 1: - raise sa_exc.InvalidRequestError( - "This operation requires only one Table or " - "entity be specified as the target." - ) - else: - self.primary_table = tables.pop() - - else: - self.primary_table = query._only_entity_zero( - "This operation requires only one Table or " - "entity be specified as the target." - ).mapper.local_table - - session = query.session - - if query._autoflush: - session._autoflush() - - def _do_pre_synchronize(self): - pass - - def _do_post_synchronize(self): - pass - - -class BulkEvaluate(BulkUD): - """BulkUD which does the 'evaluate' method of session state resolution.""" - - def _additional_evaluators(self, evaluator_compiler): - pass - - def _do_pre_synchronize(self): - query = self.query - target_cls = query._mapper_zero().class_ - - try: - evaluator_compiler = evaluator.EvaluatorCompiler(target_cls) - if query.whereclause is not None: - eval_condition = evaluator_compiler.process( - query.whereclause) - else: - def eval_condition(obj): - return True - - self._additional_evaluators(evaluator_compiler) - - except evaluator.UnevaluatableError: - raise sa_exc.InvalidRequestError( - "Could not evaluate current criteria in Python. " - "Specify 'fetch' or False for the " - "synchronize_session parameter.") - - # TODO: detect when the where clause is a trivial primary key match - self.matched_objects = [ - obj for (cls, pk), obj in - query.session.identity_map.items() - if issubclass(cls, target_cls) and - eval_condition(obj)] - - -class BulkFetch(BulkUD): - """BulkUD which does the 'fetch' method of session state resolution.""" - - def _do_pre_synchronize(self): - query = self.query - session = query.session - context = query._compile_context() - select_stmt = context.statement.with_only_columns( - self.primary_table.primary_key) - self.matched_rows = session.execute( - select_stmt, - mapper=self.mapper, - params=query._params).fetchall() - - -class BulkUpdate(BulkUD): - """BulkUD which handles UPDATEs.""" - - def __init__(self, query, values, update_kwargs): - super(BulkUpdate, self).__init__(query) - self.values = values - self.update_kwargs = update_kwargs - - @classmethod - def factory(cls, query, synchronize_session, values, update_kwargs): - return BulkUD._factory({ - "evaluate": BulkUpdateEvaluate, - "fetch": BulkUpdateFetch, - False: BulkUpdate - }, synchronize_session, query, values, update_kwargs) - - def _resolve_string_to_expr(self, key): - if self.mapper and isinstance(key, util.string_types): - attr = _entity_descriptor(self.mapper, key) - return attr.__clause_element__() - else: - return key - - def _resolve_key_to_attrname(self, key): - if self.mapper and isinstance(key, util.string_types): - attr = _entity_descriptor(self.mapper, key) - return attr.property.key - elif isinstance(key, attributes.InstrumentedAttribute): - return key.key - elif hasattr(key, '__clause_element__'): - key = key.__clause_element__() - - if self.mapper and isinstance(key, expression.ColumnElement): - try: - attr = self.mapper._columntoproperty[key] - except orm_exc.UnmappedColumnError: - return None - else: - return attr.key - else: - raise sa_exc.InvalidRequestError( - "Invalid expression type: %r" % key) - - def _do_exec(self): - values = dict( - (self._resolve_string_to_expr(k), v) - for k, v in self.values.items() - ) - update_stmt = sql.update(self.primary_table, - self.context.whereclause, values, - **self.update_kwargs) - - self.result = self.query.session.execute( - update_stmt, params=self.query._params, - mapper=self.mapper) - self.rowcount = self.result.rowcount - - def _do_post(self): - session = self.query.session - session.dispatch.after_bulk_update(self) - - -class BulkDelete(BulkUD): - """BulkUD which handles DELETEs.""" - - def __init__(self, query): - super(BulkDelete, self).__init__(query) - - @classmethod - def factory(cls, query, synchronize_session): - return BulkUD._factory({ - "evaluate": BulkDeleteEvaluate, - "fetch": BulkDeleteFetch, - False: BulkDelete - }, synchronize_session, query) - - def _do_exec(self): - delete_stmt = sql.delete(self.primary_table, - self.context.whereclause) - - self.result = self.query.session.execute( - delete_stmt, - params=self.query._params, - mapper=self.mapper) - self.rowcount = self.result.rowcount - - def _do_post(self): - session = self.query.session - session.dispatch.after_bulk_delete(self) - - -class BulkUpdateEvaluate(BulkEvaluate, BulkUpdate): - """BulkUD which handles UPDATEs using the "evaluate" - method of session resolution.""" - - def _additional_evaluators(self, evaluator_compiler): - self.value_evaluators = {} - for key, value in self.values.items(): - key = self._resolve_key_to_attrname(key) - if key is not None: - self.value_evaluators[key] = evaluator_compiler.process( - expression._literal_as_binds(value)) - - def _do_post_synchronize(self): - session = self.query.session - states = set() - evaluated_keys = list(self.value_evaluators.keys()) - for obj in self.matched_objects: - state, dict_ = attributes.instance_state(obj),\ - attributes.instance_dict(obj) - - # only evaluate unmodified attributes - to_evaluate = state.unmodified.intersection( - evaluated_keys) - for key in to_evaluate: - dict_[key] = self.value_evaluators[key](obj) - - state._commit(dict_, list(to_evaluate)) - - # expire attributes with pending changes - # (there was no autoflush, so they are overwritten) - state._expire_attributes(dict_, - set(evaluated_keys). - difference(to_evaluate)) - states.add(state) - session._register_altered(states) - - -class BulkDeleteEvaluate(BulkEvaluate, BulkDelete): - """BulkUD which handles DELETEs using the "evaluate" - method of session resolution.""" - - def _do_post_synchronize(self): - self.query.session._remove_newly_deleted( - [attributes.instance_state(obj) - for obj in self.matched_objects]) - - -class BulkUpdateFetch(BulkFetch, BulkUpdate): - """BulkUD which handles UPDATEs using the "fetch" - method of session resolution.""" - - def _do_post_synchronize(self): - session = self.query.session - target_mapper = self.query._mapper_zero() - - states = set([ - attributes.instance_state(session.identity_map[identity_key]) - for identity_key in [ - target_mapper.identity_key_from_primary_key( - list(primary_key)) - for primary_key in self.matched_rows - ] - if identity_key in session.identity_map - ]) - attrib = [_attr_as_key(k) for k in self.values] - for state in states: - session._expire_state(state, attrib) - session._register_altered(states) - - -class BulkDeleteFetch(BulkFetch, BulkDelete): - """BulkUD which handles DELETEs using the "fetch" - method of session resolution.""" - - def _do_post_synchronize(self): - session = self.query.session - target_mapper = self.query._mapper_zero() - for primary_key in self.matched_rows: - # TODO: inline this and call remove_newly_deleted - # once - identity_key = target_mapper.identity_key_from_primary_key( - list(primary_key)) - if identity_key in session.identity_map: - session._remove_newly_deleted( - [attributes.instance_state( - session.identity_map[identity_key] - )] - ) diff --git a/python/sqlalchemy/orm/properties.py b/python/sqlalchemy/orm/properties.py deleted file mode 100644 index b1f1c61c..00000000 --- a/python/sqlalchemy/orm/properties.py +++ /dev/null @@ -1,276 +0,0 @@ -# orm/properties.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""MapperProperty implementations. - -This is a private module which defines the behavior of invidual ORM- -mapped attributes. - -""" -from __future__ import absolute_import - -from .. import util, log -from ..sql import expression -from . import attributes -from .util import _orm_full_deannotate - -from .interfaces import PropComparator, StrategizedProperty - -__all__ = ['ColumnProperty', 'CompositeProperty', 'SynonymProperty', - 'ComparableProperty', 'RelationshipProperty'] - - -@log.class_logger -class ColumnProperty(StrategizedProperty): - """Describes an object attribute that corresponds to a table column. - - Public constructor is the :func:`.orm.column_property` function. - - """ - - strategy_wildcard_key = 'column' - - __slots__ = ( - '_orig_columns', 'columns', 'group', 'deferred', - 'instrument', 'comparator_factory', 'descriptor', 'extension', - 'active_history', 'expire_on_flush', 'info', 'doc', - 'strategy_class', '_creation_order', '_is_polymorphic_discriminator', - '_mapped_by_synonym', '_deferred_column_loader') - - def __init__(self, *columns, **kwargs): - """Provide a column-level property for use with a Mapper. - - Column-based properties can normally be applied to the mapper's - ``properties`` dictionary using the :class:`.Column` element directly. - Use this function when the given column is not directly present within - the mapper's selectable; examples include SQL expressions, functions, - and scalar SELECT queries. - - Columns that aren't present in the mapper's selectable won't be - persisted by the mapper and are effectively "read-only" attributes. - - :param \*cols: - list of Column objects to be mapped. - - :param active_history=False: - When ``True``, indicates that the "previous" value for a - scalar attribute should be loaded when replaced, if not - already loaded. Normally, history tracking logic for - simple non-primary-key scalar values only needs to be - aware of the "new" value in order to perform a flush. This - flag is available for applications that make use of - :func:`.attributes.get_history` or :meth:`.Session.is_modified` - which also need to know - the "previous" value of the attribute. - - .. versionadded:: 0.6.6 - - :param comparator_factory: a class which extends - :class:`.ColumnProperty.Comparator` which provides custom SQL - clause generation for comparison operations. - - :param group: - a group name for this property when marked as deferred. - - :param deferred: - when True, the column property is "deferred", meaning that - it does not load immediately, and is instead loaded when the - attribute is first accessed on an instance. See also - :func:`~sqlalchemy.orm.deferred`. - - :param doc: - optional string that will be applied as the doc on the - class-bound descriptor. - - :param expire_on_flush=True: - Disable expiry on flush. A column_property() which refers - to a SQL expression (and not a single table-bound column) - is considered to be a "read only" property; populating it - has no effect on the state of data, and it can only return - database state. For this reason a column_property()'s value - is expired whenever the parent object is involved in a - flush, that is, has any kind of "dirty" state within a flush. - Setting this parameter to ``False`` will have the effect of - leaving any existing value present after the flush proceeds. - Note however that the :class:`.Session` with default expiration - settings still expires - all attributes after a :meth:`.Session.commit` call, however. - - .. versionadded:: 0.7.3 - - :param info: Optional data dictionary which will be populated into the - :attr:`.MapperProperty.info` attribute of this object. - - .. versionadded:: 0.8 - - :param extension: - an - :class:`.AttributeExtension` - instance, or list of extensions, which will be prepended - to the list of attribute listeners for the resulting - descriptor placed on the class. - **Deprecated.** Please see :class:`.AttributeEvents`. - - """ - super(ColumnProperty, self).__init__() - self._orig_columns = [expression._labeled(c) for c in columns] - self.columns = [expression._labeled(_orm_full_deannotate(c)) - for c in columns] - self.group = kwargs.pop('group', None) - self.deferred = kwargs.pop('deferred', False) - self.instrument = kwargs.pop('_instrument', True) - self.comparator_factory = kwargs.pop('comparator_factory', - self.__class__.Comparator) - self.descriptor = kwargs.pop('descriptor', None) - self.extension = kwargs.pop('extension', None) - self.active_history = kwargs.pop('active_history', False) - self.expire_on_flush = kwargs.pop('expire_on_flush', True) - - if 'info' in kwargs: - self.info = kwargs.pop('info') - - if 'doc' in kwargs: - self.doc = kwargs.pop('doc') - else: - for col in reversed(self.columns): - doc = getattr(col, 'doc', None) - if doc is not None: - self.doc = doc - break - else: - self.doc = None - - if kwargs: - raise TypeError( - "%s received unexpected keyword argument(s): %s" % ( - self.__class__.__name__, - ', '.join(sorted(kwargs.keys())))) - - util.set_creation_order(self) - - self.strategy_class = self._strategy_lookup( - ("deferred", self.deferred), - ("instrument", self.instrument) - ) - - @util.dependencies("sqlalchemy.orm.state", "sqlalchemy.orm.strategies") - def _memoized_attr__deferred_column_loader(self, state, strategies): - return state.InstanceState._instance_level_callable_processor( - self.parent.class_manager, - strategies.LoadDeferredColumns(self.key), self.key) - - @property - def expression(self): - """Return the primary column or expression for this ColumnProperty. - - """ - return self.columns[0] - - def instrument_class(self, mapper): - if not self.instrument: - return - - attributes.register_descriptor( - mapper.class_, - self.key, - comparator=self.comparator_factory(self, mapper), - parententity=mapper, - doc=self.doc - ) - - def do_init(self): - super(ColumnProperty, self).do_init() - if len(self.columns) > 1 and \ - set(self.parent.primary_key).issuperset(self.columns): - util.warn( - ("On mapper %s, primary key column '%s' is being combined " - "with distinct primary key column '%s' in attribute '%s'. " - "Use explicit properties to give each column its own mapped " - "attribute name.") % (self.parent, self.columns[1], - self.columns[0], self.key)) - - def copy(self): - return ColumnProperty( - deferred=self.deferred, - group=self.group, - active_history=self.active_history, - *self.columns) - - def _getcommitted(self, state, dict_, column, - passive=attributes.PASSIVE_OFF): - return state.get_impl(self.key).\ - get_committed_value(state, dict_, passive=passive) - - def merge(self, session, source_state, source_dict, dest_state, - dest_dict, load, _recursive): - if not self.instrument: - return - elif self.key in source_dict: - value = source_dict[self.key] - - if not load: - dest_dict[self.key] = value - else: - impl = dest_state.get_impl(self.key) - impl.set(dest_state, dest_dict, value, None) - elif dest_state.has_identity and self.key not in dest_dict: - dest_state._expire_attributes(dest_dict, [self.key]) - - class Comparator(util.MemoizedSlots, PropComparator): - """Produce boolean, comparison, and other operators for - :class:`.ColumnProperty` attributes. - - See the documentation for :class:`.PropComparator` for a brief - overview. - - See also: - - :class:`.PropComparator` - - :class:`.ColumnOperators` - - :ref:`types_operators` - - :attr:`.TypeEngine.comparator_factory` - - """ - - __slots__ = '__clause_element__', 'info' - - def _memoized_method___clause_element__(self): - if self.adapter: - return self.adapter(self.prop.columns[0]) - else: - # no adapter, so we aren't aliased - # assert self._parententity is self._parentmapper - return self.prop.columns[0]._annotate({ - "parententity": self._parententity, - "parentmapper": self._parententity}) - - def _memoized_attr_info(self): - ce = self.__clause_element__() - try: - return ce.info - except AttributeError: - return self.prop.info - - def _fallback_getattr(self, key): - """proxy attribute access down to the mapped column. - - this allows user-defined comparison methods to be accessed. - """ - return getattr(self.__clause_element__(), key) - - def operate(self, op, *other, **kwargs): - return op(self.__clause_element__(), *other, **kwargs) - - def reverse_operate(self, op, other, **kwargs): - col = self.__clause_element__() - return op(col._bind_param(op, other), col, **kwargs) - - def __str__(self): - return str(self.parent.class_.__name__) + "." + self.key diff --git a/python/sqlalchemy/orm/query.py b/python/sqlalchemy/orm/query.py deleted file mode 100644 index 7b8254a6..00000000 --- a/python/sqlalchemy/orm/query.py +++ /dev/null @@ -1,3848 +0,0 @@ -# orm/query.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""The Query class and support. - -Defines the :class:`.Query` class, the central -construct used by the ORM to construct database queries. - -The :class:`.Query` class should not be confused with the -:class:`.Select` class, which defines database -SELECT operations at the SQL (non-ORM) level. ``Query`` differs from -``Select`` in that it returns ORM-mapped objects and interacts with an -ORM session, whereas the ``Select`` construct interacts directly with the -database to return iterable result sets. - -""" - -from itertools import chain - -from . import ( - attributes, interfaces, object_mapper, persistence, - exc as orm_exc, loading -) -from .base import _entity_descriptor, _is_aliased_class, \ - _is_mapped_class, _orm_columns, _generative, InspectionAttr -from .path_registry import PathRegistry -from .util import ( - AliasedClass, ORMAdapter, join as orm_join, with_parent, aliased -) -from .. import sql, util, log, exc as sa_exc, inspect, inspection -from ..sql.expression import _interpret_as_from -from ..sql import ( - util as sql_util, - expression, visitors -) -from ..sql.base import ColumnCollection -from . import properties - -__all__ = ['Query', 'QueryContext', 'aliased'] - - -_path_registry = PathRegistry.root - - -@inspection._self_inspects -@log.class_logger -class Query(object): - """ORM-level SQL construction object. - - :class:`.Query` is the source of all SELECT statements generated by the - ORM, both those formulated by end-user query operations as well as by - high level internal operations such as related collection loading. It - features a generative interface whereby successive calls return a new - :class:`.Query` object, a copy of the former with additional - criteria and options associated with it. - - :class:`.Query` objects are normally initially generated using the - :meth:`~.Session.query` method of :class:`.Session`. For a full - walkthrough of :class:`.Query` usage, see the - :ref:`ormtutorial_toplevel`. - - """ - - _enable_eagerloads = True - _enable_assertions = True - _with_labels = False - _criterion = None - _yield_per = None - _order_by = False - _group_by = False - _having = None - _distinct = False - _prefixes = None - _suffixes = None - _offset = None - _limit = None - _for_update_arg = None - _statement = None - _correlate = frozenset() - _populate_existing = False - _invoke_all_eagers = True - _version_check = False - _autoflush = True - _only_load_props = None - _refresh_state = None - _from_obj = () - _join_entities = () - _select_from_entity = None - _mapper_adapter_map = {} - _filter_aliases = None - _from_obj_alias = None - _joinpath = _joinpoint = util.immutabledict() - _execution_options = util.immutabledict() - _params = util.immutabledict() - _attributes = util.immutabledict() - _with_options = () - _with_hints = () - _enable_single_crit = True - _orm_only_adapt = True - _orm_only_from_obj_alias = True - _current_path = _path_registry - - def __init__(self, entities, session=None): - self.session = session - self._polymorphic_adapters = {} - self._set_entities(entities) - - def _set_entities(self, entities, entity_wrapper=None): - if entity_wrapper is None: - entity_wrapper = _QueryEntity - self._entities = [] - self._primary_entity = None - for ent in util.to_list(entities): - entity_wrapper(self, ent) - - self._set_entity_selectables(self._entities) - - def _set_entity_selectables(self, entities): - self._mapper_adapter_map = d = self._mapper_adapter_map.copy() - - for ent in entities: - for entity in ent.entities: - if entity not in d: - ext_info = inspect(entity) - if not ext_info.is_aliased_class and \ - ext_info.mapper.with_polymorphic: - if ext_info.mapper.mapped_table not in \ - self._polymorphic_adapters: - self._mapper_loads_polymorphically_with( - ext_info.mapper, - sql_util.ColumnAdapter( - ext_info.selectable, - ext_info.mapper._equivalent_columns - ) - ) - aliased_adapter = None - elif ext_info.is_aliased_class: - aliased_adapter = ext_info._adapter - else: - aliased_adapter = None - - d[entity] = ( - ext_info, - aliased_adapter - ) - ent.setup_entity(*d[entity]) - - def _mapper_loads_polymorphically_with(self, mapper, adapter): - for m2 in mapper._with_polymorphic_mappers or [mapper]: - self._polymorphic_adapters[m2] = adapter - for m in m2.iterate_to_root(): - self._polymorphic_adapters[m.local_table] = adapter - - def _set_select_from(self, obj, set_base_alias): - fa = [] - select_from_alias = None - - for from_obj in obj: - info = inspect(from_obj) - if hasattr(info, 'mapper') and \ - (info.is_mapper or info.is_aliased_class): - self._select_from_entity = from_obj - if set_base_alias: - raise sa_exc.ArgumentError( - "A selectable (FromClause) instance is " - "expected when the base alias is being set.") - fa.append(info.selectable) - elif not info.is_selectable: - raise sa_exc.ArgumentError( - "argument is not a mapped class, mapper, " - "aliased(), or FromClause instance.") - else: - if isinstance(from_obj, expression.SelectBase): - from_obj = from_obj.alias() - if set_base_alias: - select_from_alias = from_obj - fa.append(from_obj) - - self._from_obj = tuple(fa) - - if set_base_alias and \ - len(self._from_obj) == 1 and \ - isinstance(select_from_alias, expression.Alias): - equivs = self.__all_equivs() - self._from_obj_alias = sql_util.ColumnAdapter( - self._from_obj[0], equivs) - - def _reset_polymorphic_adapter(self, mapper): - for m2 in mapper._with_polymorphic_mappers: - self._polymorphic_adapters.pop(m2, None) - for m in m2.iterate_to_root(): - self._polymorphic_adapters.pop(m.local_table, None) - - def _adapt_polymorphic_element(self, element): - if "parententity" in element._annotations: - search = element._annotations['parententity'] - alias = self._polymorphic_adapters.get(search, None) - if alias: - return alias.adapt_clause(element) - - if isinstance(element, expression.FromClause): - search = element - elif hasattr(element, 'table'): - search = element.table - else: - return None - - alias = self._polymorphic_adapters.get(search, None) - if alias: - return alias.adapt_clause(element) - - def _adapt_col_list(self, cols): - return [ - self._adapt_clause( - expression._literal_as_label_reference(o), - True, True) - for o in cols - ] - - @_generative() - def _adapt_all_clauses(self): - self._orm_only_adapt = False - - def _adapt_clause(self, clause, as_filter, orm_only): - """Adapt incoming clauses to transformations which - have been applied within this query.""" - - adapters = [] - # do we adapt all expression elements or only those - # tagged as 'ORM' constructs ? - if not self._orm_only_adapt: - orm_only = False - - if as_filter and self._filter_aliases: - for fa in self._filter_aliases._visitor_iterator: - adapters.append( - ( - orm_only, fa.replace - ) - ) - - if self._from_obj_alias: - # for the "from obj" alias, apply extra rule to the - # 'ORM only' check, if this query were generated from a - # subquery of itself, i.e. _from_selectable(), apply adaption - # to all SQL constructs. - adapters.append( - ( - orm_only if self._orm_only_from_obj_alias else False, - self._from_obj_alias.replace - ) - ) - - if self._polymorphic_adapters: - adapters.append( - ( - orm_only, self._adapt_polymorphic_element - ) - ) - - if not adapters: - return clause - - def replace(elem): - for _orm_only, adapter in adapters: - # if 'orm only', look for ORM annotations - # in the element before adapting. - if not _orm_only or \ - '_orm_adapt' in elem._annotations or \ - "parententity" in elem._annotations: - - e = adapter(elem) - if e is not None: - return e - - return visitors.replacement_traverse( - clause, - {}, - replace - ) - - def _entity_zero(self): - return self._entities[0] - - def _mapper_zero(self): - return self._select_from_entity \ - if self._select_from_entity is not None \ - else self._entity_zero().entity_zero - - @property - def _mapper_entities(self): - for ent in self._entities: - if isinstance(ent, _MapperEntity): - yield ent - - def _joinpoint_zero(self): - return self._joinpoint.get( - '_joinpoint_entity', - self._mapper_zero() - ) - - def _bind_mapper(self): - ezero = self._mapper_zero() - if ezero is not None: - insp = inspect(ezero) - if not insp.is_clause_element: - return insp.mapper - - return None - - def _only_mapper_zero(self, rationale=None): - if len(self._entities) > 1: - raise sa_exc.InvalidRequestError( - rationale or - "This operation requires a Query " - "against a single mapper." - ) - return self._mapper_zero() - - def _only_full_mapper_zero(self, methname): - if self._entities != [self._primary_entity]: - raise sa_exc.InvalidRequestError( - "%s() can only be used against " - "a single mapped class." % methname) - return self._primary_entity.entity_zero - - def _only_entity_zero(self, rationale=None): - if len(self._entities) > 1: - raise sa_exc.InvalidRequestError( - rationale or - "This operation requires a Query " - "against a single mapper." - ) - return self._entity_zero() - - def __all_equivs(self): - equivs = {} - for ent in self._mapper_entities: - equivs.update(ent.mapper._equivalent_columns) - return equivs - - def _get_condition(self): - return self._no_criterion_condition( - "get", order_by=False, distinct=False) - - def _get_existing_condition(self): - self._no_criterion_assertion("get", order_by=False, distinct=False) - - def _no_criterion_assertion(self, meth, order_by=True, distinct=True): - if not self._enable_assertions: - return - if self._criterion is not None or \ - self._statement is not None or self._from_obj or \ - self._limit is not None or self._offset is not None or \ - self._group_by or (order_by and self._order_by) or \ - (distinct and self._distinct): - raise sa_exc.InvalidRequestError( - "Query.%s() being called on a " - "Query with existing criterion. " % meth) - - def _no_criterion_condition(self, meth, order_by=True, distinct=True): - self._no_criterion_assertion(meth, order_by, distinct) - - self._from_obj = () - self._statement = self._criterion = None - self._order_by = self._group_by = self._distinct = False - - def _no_clauseelement_condition(self, meth): - if not self._enable_assertions: - return - if self._order_by: - raise sa_exc.InvalidRequestError( - "Query.%s() being called on a " - "Query with existing criterion. " % meth) - self._no_criterion_condition(meth) - - def _no_statement_condition(self, meth): - if not self._enable_assertions: - return - if self._statement is not None: - raise sa_exc.InvalidRequestError( - ("Query.%s() being called on a Query with an existing full " - "statement - can't apply criterion.") % meth) - - def _no_limit_offset(self, meth): - if not self._enable_assertions: - return - if self._limit is not None or self._offset is not None: - raise sa_exc.InvalidRequestError( - "Query.%s() being called on a Query which already has LIMIT " - "or OFFSET applied. To modify the row-limited results of a " - " Query, call from_self() first. " - "Otherwise, call %s() before limit() or offset() " - "are applied." - % (meth, meth) - ) - - def _get_options(self, populate_existing=None, - version_check=None, - only_load_props=None, - refresh_state=None): - if populate_existing: - self._populate_existing = populate_existing - if version_check: - self._version_check = version_check - if refresh_state: - self._refresh_state = refresh_state - if only_load_props: - self._only_load_props = set(only_load_props) - return self - - def _clone(self): - cls = self.__class__ - q = cls.__new__(cls) - q.__dict__ = self.__dict__.copy() - return q - - @property - def statement(self): - """The full SELECT statement represented by this Query. - - The statement by default will not have disambiguating labels - applied to the construct unless with_labels(True) is called - first. - - """ - - stmt = self._compile_context(labels=self._with_labels).\ - statement - if self._params: - stmt = stmt.params(self._params) - - # TODO: there's no tests covering effects of - # the annotation not being there - return stmt._annotate({'no_replacement_traverse': True}) - - def subquery(self, name=None, with_labels=False, reduce_columns=False): - """return the full SELECT statement represented by - this :class:`.Query`, embedded within an :class:`.Alias`. - - Eager JOIN generation within the query is disabled. - - :param name: string name to be assigned as the alias; - this is passed through to :meth:`.FromClause.alias`. - If ``None``, a name will be deterministically generated - at compile time. - - :param with_labels: if True, :meth:`.with_labels` will be called - on the :class:`.Query` first to apply table-qualified labels - to all columns. - - :param reduce_columns: if True, :meth:`.Select.reduce_columns` will - be called on the resulting :func:`.select` construct, - to remove same-named columns where one also refers to the other - via foreign key or WHERE clause equivalence. - - .. versionchanged:: 0.8 the ``with_labels`` and ``reduce_columns`` - keyword arguments were added. - - """ - q = self.enable_eagerloads(False) - if with_labels: - q = q.with_labels() - q = q.statement - if reduce_columns: - q = q.reduce_columns() - return q.alias(name=name) - - def cte(self, name=None, recursive=False): - """Return the full SELECT statement represented by this - :class:`.Query` represented as a common table expression (CTE). - - .. versionadded:: 0.7.6 - - Parameters and usage are the same as those of the - :meth:`.SelectBase.cte` method; see that method for - further details. - - Here is the `Postgresql WITH - RECURSIVE example - `_. - Note that, in this example, the ``included_parts`` cte and the - ``incl_alias`` alias of it are Core selectables, which - means the columns are accessed via the ``.c.`` attribute. The - ``parts_alias`` object is an :func:`.orm.aliased` instance of the - ``Part`` entity, so column-mapped attributes are available - directly:: - - from sqlalchemy.orm import aliased - - class Part(Base): - __tablename__ = 'part' - part = Column(String, primary_key=True) - sub_part = Column(String, primary_key=True) - quantity = Column(Integer) - - included_parts = session.query( - Part.sub_part, - Part.part, - Part.quantity).\\ - filter(Part.part=="our part").\\ - cte(name="included_parts", recursive=True) - - incl_alias = aliased(included_parts, name="pr") - parts_alias = aliased(Part, name="p") - included_parts = included_parts.union_all( - session.query( - parts_alias.sub_part, - parts_alias.part, - parts_alias.quantity).\\ - filter(parts_alias.part==incl_alias.c.sub_part) - ) - - q = session.query( - included_parts.c.sub_part, - func.sum(included_parts.c.quantity). - label('total_quantity') - ).\\ - group_by(included_parts.c.sub_part) - - .. seealso:: - - :meth:`.SelectBase.cte` - - """ - return self.enable_eagerloads(False).\ - statement.cte(name=name, recursive=recursive) - - def label(self, name): - """Return the full SELECT statement represented by this - :class:`.Query`, converted - to a scalar subquery with a label of the given name. - - Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.label`. - - .. versionadded:: 0.6.5 - - """ - - return self.enable_eagerloads(False).statement.label(name) - - def as_scalar(self): - """Return the full SELECT statement represented by this - :class:`.Query`, converted to a scalar subquery. - - Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.as_scalar`. - - .. versionadded:: 0.6.5 - - """ - - return self.enable_eagerloads(False).statement.as_scalar() - - @property - def selectable(self): - """Return the :class:`.Select` object emitted by this :class:`.Query`. - - Used for :func:`.inspect` compatibility, this is equivalent to:: - - query.enable_eagerloads(False).with_labels().statement - - """ - return self.__clause_element__() - - def __clause_element__(self): - return self.enable_eagerloads(False).with_labels().statement - - @_generative() - def enable_eagerloads(self, value): - """Control whether or not eager joins and subqueries are - rendered. - - When set to False, the returned Query will not render - eager joins regardless of :func:`~sqlalchemy.orm.joinedload`, - :func:`~sqlalchemy.orm.subqueryload` options - or mapper-level ``lazy='joined'``/``lazy='subquery'`` - configurations. - - This is used primarily when nesting the Query's - statement into a subquery or other - selectable, or when using :meth:`.Query.yield_per`. - - """ - self._enable_eagerloads = value - - def _no_yield_per(self, message): - raise sa_exc.InvalidRequestError( - "The yield_per Query option is currently not " - "compatible with %s eager loading. Please " - "specify lazyload('*') or query.enable_eagerloads(False) in " - "order to " - "proceed with query.yield_per()." % message) - - @_generative() - def with_labels(self): - """Apply column labels to the return value of Query.statement. - - Indicates that this Query's `statement` accessor should return - a SELECT statement that applies labels to all columns in the - form _; this is commonly used to - disambiguate columns from multiple tables which have the same - name. - - When the `Query` actually issues SQL to load rows, it always - uses column labeling. - - .. note:: The :meth:`.Query.with_labels` method *only* applies - the output of :attr:`.Query.statement`, and *not* to any of - the result-row invoking systems of :class:`.Query` itself, e.g. - :meth:`.Query.first`, :meth:`.Query.all`, etc. To execute - a query using :meth:`.Query.with_labels`, invoke the - :attr:`.Query.statement` using :meth:`.Session.execute`:: - - result = session.execute(query.with_labels().statement) - - - """ - self._with_labels = True - - @_generative() - def enable_assertions(self, value): - """Control whether assertions are generated. - - When set to False, the returned Query will - not assert its state before certain operations, - including that LIMIT/OFFSET has not been applied - when filter() is called, no criterion exists - when get() is called, and no "from_statement()" - exists when filter()/order_by()/group_by() etc. - is called. This more permissive mode is used by - custom Query subclasses to specify criterion or - other modifiers outside of the usual usage patterns. - - Care should be taken to ensure that the usage - pattern is even possible. A statement applied - by from_statement() will override any criterion - set by filter() or order_by(), for example. - - """ - self._enable_assertions = value - - @property - def whereclause(self): - """A readonly attribute which returns the current WHERE criterion for - this Query. - - This returned value is a SQL expression construct, or ``None`` if no - criterion has been established. - - """ - return self._criterion - - @_generative() - def _with_current_path(self, path): - """indicate that this query applies to objects loaded - within a certain path. - - Used by deferred loaders (see strategies.py) which transfer - query options from an originating query to a newly generated - query intended for the deferred load. - - """ - self._current_path = path - - @_generative(_no_clauseelement_condition) - def with_polymorphic(self, - cls_or_mappers, - selectable=None, - polymorphic_on=None): - """Load columns for inheriting classes. - - :meth:`.Query.with_polymorphic` applies transformations - to the "main" mapped class represented by this :class:`.Query`. - The "main" mapped class here means the :class:`.Query` - object's first argument is a full class, i.e. - ``session.query(SomeClass)``. These transformations allow additional - tables to be present in the FROM clause so that columns for a - joined-inheritance subclass are available in the query, both for the - purposes of load-time efficiency as well as the ability to use - these columns at query time. - - See the documentation section :ref:`with_polymorphic` for - details on how this method is used. - - .. versionchanged:: 0.8 - A new and more flexible function - :func:`.orm.with_polymorphic` supersedes - :meth:`.Query.with_polymorphic`, as it can apply the equivalent - functionality to any set of columns or classes in the - :class:`.Query`, not just the "zero mapper". See that - function for a description of arguments. - - """ - - if not self._primary_entity: - raise sa_exc.InvalidRequestError( - "No primary mapper set up for this Query.") - entity = self._entities[0]._clone() - self._entities = [entity] + self._entities[1:] - entity.set_with_polymorphic(self, - cls_or_mappers, - selectable=selectable, - polymorphic_on=polymorphic_on) - - @_generative() - def yield_per(self, count): - """Yield only ``count`` rows at a time. - - The purpose of this method is when fetching very large result sets - (> 10K rows), to batch results in sub-collections and yield them - out partially, so that the Python interpreter doesn't need to declare - very large areas of memory which is both time consuming and leads - to excessive memory use. The performance from fetching hundreds of - thousands of rows can often double when a suitable yield-per setting - (e.g. approximately 1000) is used, even with DBAPIs that buffer - rows (which are most). - - The :meth:`.Query.yield_per` method **is not compatible with most - eager loading schemes, including subqueryload and joinedload with - collections**. For this reason, it may be helpful to disable - eager loads, either unconditionally with - :meth:`.Query.enable_eagerloads`:: - - q = sess.query(Object).yield_per(100).enable_eagerloads(False) - - Or more selectively using :func:`.lazyload`; such as with - an asterisk to specify the default loader scheme:: - - q = sess.query(Object).yield_per(100).\\ - options(lazyload('*'), joinedload(Object.some_related)) - - .. warning:: - - Use this method with caution; if the same instance is - present in more than one batch of rows, end-user changes - to attributes will be overwritten. - - In particular, it's usually impossible to use this setting - with eagerly loaded collections (i.e. any lazy='joined' or - 'subquery') since those collections will be cleared for a - new load when encountered in a subsequent result batch. - In the case of 'subquery' loading, the full result for all - rows is fetched which generally defeats the purpose of - :meth:`~sqlalchemy.orm.query.Query.yield_per`. - - Also note that while - :meth:`~sqlalchemy.orm.query.Query.yield_per` will set the - ``stream_results`` execution option to True, currently - this is only understood by - :mod:`~sqlalchemy.dialects.postgresql.psycopg2` dialect - which will stream results using server side cursors - instead of pre-buffer all rows for this query. Other - DBAPIs **pre-buffer all rows** before making them - available. The memory use of raw database rows is much less - than that of an ORM-mapped object, but should still be taken into - consideration when benchmarking. - - .. seealso:: - - :meth:`.Query.enable_eagerloads` - - """ - self._yield_per = count - self._execution_options = self._execution_options.union( - {"stream_results": True, - "max_row_buffer": count}) - - def get(self, ident): - """Return an instance based on the given primary key identifier, - or ``None`` if not found. - - E.g.:: - - my_user = session.query(User).get(5) - - some_object = session.query(VersionedFoo).get((5, 10)) - - :meth:`~.Query.get` is special in that it provides direct - access to the identity map of the owning :class:`.Session`. - If the given primary key identifier is present - in the local identity map, the object is returned - directly from this collection and no SQL is emitted, - unless the object has been marked fully expired. - If not present, - a SELECT is performed in order to locate the object. - - :meth:`~.Query.get` also will perform a check if - the object is present in the identity map and - marked as expired - a SELECT - is emitted to refresh the object as well as to - ensure that the row is still present. - If not, :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised. - - :meth:`~.Query.get` is only used to return a single - mapped instance, not multiple instances or - individual column constructs, and strictly - on a single primary key value. The originating - :class:`.Query` must be constructed in this way, - i.e. against a single mapped entity, - with no additional filtering criterion. Loading - options via :meth:`~.Query.options` may be applied - however, and will be used if the object is not - yet locally present. - - A lazy-loading, many-to-one attribute configured - by :func:`.relationship`, using a simple - foreign-key-to-primary-key criterion, will also use an - operation equivalent to :meth:`~.Query.get` in order to retrieve - the target value from the local identity map - before querying the database. See :doc:`/orm/loading_relationships` - for further details on relationship loading. - - :param ident: A scalar or tuple value representing - the primary key. For a composite primary key, - the order of identifiers corresponds in most cases - to that of the mapped :class:`.Table` object's - primary key columns. For a :func:`.mapper` that - was given the ``primary key`` argument during - construction, the order of identifiers corresponds - to the elements present in this collection. - - :return: The object instance, or ``None``. - - """ - return self._get_impl(ident, loading.load_on_ident) - - def _get_impl(self, ident, fallback_fn): - # convert composite types to individual args - if hasattr(ident, '__composite_values__'): - ident = ident.__composite_values__() - - ident = util.to_list(ident) - - mapper = self._only_full_mapper_zero("get") - - if len(ident) != len(mapper.primary_key): - raise sa_exc.InvalidRequestError( - "Incorrect number of values in identifier to formulate " - "primary key for query.get(); primary key columns are %s" % - ','.join("'%s'" % c for c in mapper.primary_key)) - - key = mapper.identity_key_from_primary_key(ident) - - if not self._populate_existing and \ - not mapper.always_refresh and \ - self._for_update_arg is None: - - instance = loading.get_from_identity( - self.session, key, attributes.PASSIVE_OFF) - if instance is not None: - self._get_existing_condition() - # reject calls for id in identity map but class - # mismatch. - if not issubclass(instance.__class__, mapper.class_): - return None - return instance - - return fallback_fn(self, key) - - @_generative() - def correlate(self, *args): - """Return a :class:`.Query` construct which will correlate the given - FROM clauses to that of an enclosing :class:`.Query` or - :func:`~.expression.select`. - - The method here accepts mapped classes, :func:`.aliased` constructs, - and :func:`.mapper` constructs as arguments, which are resolved into - expression constructs, in addition to appropriate expression - constructs. - - The correlation arguments are ultimately passed to - :meth:`.Select.correlate` after coercion to expression constructs. - - The correlation arguments take effect in such cases - as when :meth:`.Query.from_self` is used, or when - a subquery as returned by :meth:`.Query.subquery` is - embedded in another :func:`~.expression.select` construct. - - """ - - self._correlate = self._correlate.union( - _interpret_as_from(s) - if s is not None else None - for s in args) - - @_generative() - def autoflush(self, setting): - """Return a Query with a specific 'autoflush' setting. - - Note that a Session with autoflush=False will - not autoflush, even if this flag is set to True at the - Query level. Therefore this flag is usually used only - to disable autoflush for a specific Query. - - """ - self._autoflush = setting - - @_generative() - def populate_existing(self): - """Return a :class:`.Query` that will expire and refresh all instances - as they are loaded, or reused from the current :class:`.Session`. - - :meth:`.populate_existing` does not improve behavior when - the ORM is used normally - the :class:`.Session` object's usual - behavior of maintaining a transaction and expiring all attributes - after rollback or commit handles object state automatically. - This method is not intended for general use. - - """ - self._populate_existing = True - - @_generative() - def _with_invoke_all_eagers(self, value): - """Set the 'invoke all eagers' flag which causes joined- and - subquery loaders to traverse into already-loaded related objects - and collections. - - Default is that of :attr:`.Query._invoke_all_eagers`. - - """ - self._invoke_all_eagers = value - - def with_parent(self, instance, property=None): - """Add filtering criterion that relates the given instance - to a child object or collection, using its attribute state - as well as an established :func:`.relationship()` - configuration. - - The method uses the :func:`.with_parent` function to generate - the clause, the result of which is passed to :meth:`.Query.filter`. - - Parameters are the same as :func:`.with_parent`, with the exception - that the given property can be None, in which case a search is - performed against this :class:`.Query` object's target mapper. - - """ - - if property is None: - mapper = object_mapper(instance) - - for prop in mapper.iterate_properties: - if isinstance(prop, properties.RelationshipProperty) and \ - prop.mapper is self._mapper_zero(): - property = prop - break - else: - raise sa_exc.InvalidRequestError( - "Could not locate a property which relates instances " - "of class '%s' to instances of class '%s'" % - ( - self._mapper_zero().class_.__name__, - instance.__class__.__name__) - ) - - return self.filter(with_parent(instance, property)) - - @_generative() - def add_entity(self, entity, alias=None): - """add a mapped entity to the list of result columns - to be returned.""" - - if alias is not None: - entity = aliased(entity, alias) - - self._entities = list(self._entities) - m = _MapperEntity(self, entity) - self._set_entity_selectables([m]) - - @_generative() - def with_session(self, session): - """Return a :class:`.Query` that will use the given :class:`.Session`. - - """ - - self.session = session - - def from_self(self, *entities): - """return a Query that selects from this Query's - SELECT statement. - - \*entities - optional list of entities which will replace - those being selected. - - """ - fromclause = self.with_labels().enable_eagerloads(False).\ - statement.correlate(None) - q = self._from_selectable(fromclause) - q._enable_single_crit = False - q._select_from_entity = self._mapper_zero() - if entities: - q._set_entities(entities) - return q - - @_generative() - def _set_enable_single_crit(self, val): - self._enable_single_crit = val - - @_generative() - def _from_selectable(self, fromclause): - for attr in ( - '_statement', '_criterion', - '_order_by', '_group_by', - '_limit', '_offset', - '_joinpath', '_joinpoint', - '_distinct', '_having', - '_prefixes', '_suffixes' - ): - self.__dict__.pop(attr, None) - self._set_select_from([fromclause], True) - - # this enables clause adaptation for non-ORM - # expressions. - self._orm_only_from_obj_alias = False - - old_entities = self._entities - self._entities = [] - for e in old_entities: - e.adapt_to_selectable(self, self._from_obj[0]) - - def values(self, *columns): - """Return an iterator yielding result tuples corresponding - to the given list of columns""" - - if not columns: - return iter(()) - q = self._clone() - q._set_entities(columns, entity_wrapper=_ColumnEntity) - if not q._yield_per: - q._yield_per = 10 - return iter(q) - _values = values - - def value(self, column): - """Return a scalar result corresponding to the given - column expression.""" - try: - return next(self.values(column))[0] - except StopIteration: - return None - - @_generative() - def with_entities(self, *entities): - """Return a new :class:`.Query` replacing the SELECT list with the - given entities. - - e.g.:: - - # Users, filtered on some arbitrary criterion - # and then ordered by related email address - q = session.query(User).\\ - join(User.address).\\ - filter(User.name.like('%ed%')).\\ - order_by(Address.email) - - # given *only* User.id==5, Address.email, and 'q', what - # would the *next* User in the result be ? - subq = q.with_entities(Address.email).\\ - order_by(None).\\ - filter(User.id==5).\\ - subquery() - q = q.join((subq, subq.c.email < Address.email)).\\ - limit(1) - - .. versionadded:: 0.6.5 - - """ - self._set_entities(entities) - - @_generative() - def add_columns(self, *column): - """Add one or more column expressions to the list - of result columns to be returned.""" - - self._entities = list(self._entities) - l = len(self._entities) - for c in column: - _ColumnEntity(self, c) - # _ColumnEntity may add many entities if the - # given arg is a FROM clause - self._set_entity_selectables(self._entities[l:]) - - @util.pending_deprecation("0.7", - ":meth:`.add_column` is superseded " - "by :meth:`.add_columns`", - False) - def add_column(self, column): - """Add a column expression to the list of result columns to be - returned. - - Pending deprecation: :meth:`.add_column` will be superseded by - :meth:`.add_columns`. - - """ - return self.add_columns(column) - - def options(self, *args): - """Return a new Query object, applying the given list of - mapper options. - - Most supplied options regard changing how column- and - relationship-mapped attributes are loaded. See the sections - :ref:`deferred` and :doc:`/orm/loading_relationships` for reference - documentation. - - """ - return self._options(False, *args) - - def _conditional_options(self, *args): - return self._options(True, *args) - - @_generative() - def _options(self, conditional, *args): - # most MapperOptions write to the '_attributes' dictionary, - # so copy that as well - self._attributes = self._attributes.copy() - opts = tuple(util.flatten_iterator(args)) - self._with_options = self._with_options + opts - if conditional: - for opt in opts: - opt.process_query_conditionally(self) - else: - for opt in opts: - opt.process_query(self) - - def with_transformation(self, fn): - """Return a new :class:`.Query` object transformed by - the given function. - - E.g.:: - - def filter_something(criterion): - def transform(q): - return q.filter(criterion) - return transform - - q = q.with_transformation(filter_something(x==5)) - - This allows ad-hoc recipes to be created for :class:`.Query` - objects. See the example at :ref:`hybrid_transformers`. - - .. versionadded:: 0.7.4 - - """ - return fn(self) - - @_generative() - def with_hint(self, selectable, text, dialect_name='*'): - """Add an indexing or other executional context - hint for the given entity or selectable to - this :class:`.Query`. - - Functionality is passed straight through to - :meth:`~sqlalchemy.sql.expression.Select.with_hint`, - with the addition that ``selectable`` can be a - :class:`.Table`, :class:`.Alias`, or ORM entity / mapped class - /etc. - - .. seealso:: - - :meth:`.Query.with_statement_hint` - - """ - if selectable is not None: - selectable = inspect(selectable).selectable - - self._with_hints += ((selectable, text, dialect_name),) - - def with_statement_hint(self, text, dialect_name='*'): - """add a statement hint to this :class:`.Select`. - - This method is similar to :meth:`.Select.with_hint` except that - it does not require an individual table, and instead applies to the - statement as a whole. - - This feature calls down into :meth:`.Select.with_statement_hint`. - - .. versionadded:: 1.0.0 - - .. seealso:: - - :meth:`.Query.with_hint` - - """ - return self.with_hint(None, text, dialect_name) - - @_generative() - def execution_options(self, **kwargs): - """ Set non-SQL options which take effect during execution. - - The options are the same as those accepted by - :meth:`.Connection.execution_options`. - - Note that the ``stream_results`` execution option is enabled - automatically if the :meth:`~sqlalchemy.orm.query.Query.yield_per()` - method is used. - - """ - self._execution_options = self._execution_options.union(kwargs) - - @_generative() - def with_lockmode(self, mode): - """Return a new :class:`.Query` object with the specified "locking mode", - which essentially refers to the ``FOR UPDATE`` clause. - - .. deprecated:: 0.9.0 superseded by :meth:`.Query.with_for_update`. - - :param mode: a string representing the desired locking mode. - Valid values are: - - * ``None`` - translates to no lockmode - - * ``'update'`` - translates to ``FOR UPDATE`` - (standard SQL, supported by most dialects) - - * ``'update_nowait'`` - translates to ``FOR UPDATE NOWAIT`` - (supported by Oracle, PostgreSQL 8.1 upwards) - - * ``'read'`` - translates to ``LOCK IN SHARE MODE`` (for MySQL), - and ``FOR SHARE`` (for PostgreSQL) - - .. seealso:: - - :meth:`.Query.with_for_update` - improved API for - specifying the ``FOR UPDATE`` clause. - - """ - self._for_update_arg = LockmodeArg.parse_legacy_query(mode) - - @_generative() - def with_for_update(self, read=False, nowait=False, of=None): - """return a new :class:`.Query` with the specified options for the - ``FOR UPDATE`` clause. - - The behavior of this method is identical to that of - :meth:`.SelectBase.with_for_update`. When called with no arguments, - the resulting ``SELECT`` statement will have a ``FOR UPDATE`` clause - appended. When additional arguments are specified, backend-specific - options such as ``FOR UPDATE NOWAIT`` or ``LOCK IN SHARE MODE`` - can take effect. - - E.g.:: - - q = sess.query(User).with_for_update(nowait=True, of=User) - - The above query on a Postgresql backend will render like:: - - SELECT users.id AS users_id FROM users FOR UPDATE OF users NOWAIT - - .. versionadded:: 0.9.0 :meth:`.Query.with_for_update` supersedes - the :meth:`.Query.with_lockmode` method. - - .. seealso:: - - :meth:`.GenerativeSelect.with_for_update` - Core level method with - full argument and behavioral description. - - """ - self._for_update_arg = LockmodeArg(read=read, nowait=nowait, of=of) - - @_generative() - def params(self, *args, **kwargs): - """add values for bind parameters which may have been - specified in filter(). - - parameters may be specified using \**kwargs, or optionally a single - dictionary as the first positional argument. The reason for both is - that \**kwargs is convenient, however some parameter dictionaries - contain unicode keys in which case \**kwargs cannot be used. - - """ - if len(args) == 1: - kwargs.update(args[0]) - elif len(args) > 0: - raise sa_exc.ArgumentError( - "params() takes zero or one positional argument, " - "which is a dictionary.") - self._params = self._params.copy() - self._params.update(kwargs) - - @_generative(_no_statement_condition, _no_limit_offset) - def filter(self, *criterion): - """apply the given filtering criterion to a copy - of this :class:`.Query`, using SQL expressions. - - e.g.:: - - session.query(MyClass).filter(MyClass.name == 'some name') - - Multiple criteria may be specified as comma separated; the effect - is that they will be joined together using the :func:`.and_` - function:: - - session.query(MyClass).\\ - filter(MyClass.name == 'some name', MyClass.id > 5) - - The criterion is any SQL expression object applicable to the - WHERE clause of a select. String expressions are coerced - into SQL expression constructs via the :func:`.text` construct. - - .. seealso:: - - :meth:`.Query.filter_by` - filter on keyword expressions. - - """ - for criterion in list(criterion): - criterion = expression._expression_literal_as_text(criterion) - - criterion = self._adapt_clause(criterion, True, True) - - if self._criterion is not None: - self._criterion = self._criterion & criterion - else: - self._criterion = criterion - - def filter_by(self, **kwargs): - """apply the given filtering criterion to a copy - of this :class:`.Query`, using keyword expressions. - - e.g.:: - - session.query(MyClass).filter_by(name = 'some name') - - Multiple criteria may be specified as comma separated; the effect - is that they will be joined together using the :func:`.and_` - function:: - - session.query(MyClass).\\ - filter_by(name = 'some name', id = 5) - - The keyword expressions are extracted from the primary - entity of the query, or the last entity that was the - target of a call to :meth:`.Query.join`. - - .. seealso:: - - :meth:`.Query.filter` - filter on SQL expressions. - - """ - - clauses = [_entity_descriptor(self._joinpoint_zero(), key) == value - for key, value in kwargs.items()] - return self.filter(sql.and_(*clauses)) - - @_generative(_no_statement_condition, _no_limit_offset) - def order_by(self, *criterion): - """apply one or more ORDER BY criterion to the query and return - the newly resulting ``Query`` - - All existing ORDER BY settings can be suppressed by - passing ``None`` - this will suppress any ORDER BY configured - on mappers as well. - - Alternatively, an existing ORDER BY setting on the Query - object can be entirely cancelled by passing ``False`` - as the value - use this before calling methods where - an ORDER BY is invalid. - - """ - - if len(criterion) == 1: - if criterion[0] is False: - if '_order_by' in self.__dict__: - del self._order_by - return - if criterion[0] is None: - self._order_by = None - return - - criterion = self._adapt_col_list(criterion) - - if self._order_by is False or self._order_by is None: - self._order_by = criterion - else: - self._order_by = self._order_by + criterion - - @_generative(_no_statement_condition, _no_limit_offset) - def group_by(self, *criterion): - """apply one or more GROUP BY criterion to the query and return - the newly resulting :class:`.Query`""" - - criterion = list(chain(*[_orm_columns(c) for c in criterion])) - criterion = self._adapt_col_list(criterion) - - if self._group_by is False: - self._group_by = criterion - else: - self._group_by = self._group_by + criterion - - @_generative(_no_statement_condition, _no_limit_offset) - def having(self, criterion): - """apply a HAVING criterion to the query and return the - newly resulting :class:`.Query`. - - :meth:`~.Query.having` is used in conjunction with - :meth:`~.Query.group_by`. - - HAVING criterion makes it possible to use filters on aggregate - functions like COUNT, SUM, AVG, MAX, and MIN, eg.:: - - q = session.query(User.id).\\ - join(User.addresses).\\ - group_by(User.id).\\ - having(func.count(Address.id) > 2) - - """ - - criterion = expression._expression_literal_as_text(criterion) - - if criterion is not None and \ - not isinstance(criterion, sql.ClauseElement): - raise sa_exc.ArgumentError( - "having() argument must be of type " - "sqlalchemy.sql.ClauseElement or string") - - criterion = self._adapt_clause(criterion, True, True) - - if self._having is not None: - self._having = self._having & criterion - else: - self._having = criterion - - def union(self, *q): - """Produce a UNION of this Query against one or more queries. - - e.g.:: - - q1 = sess.query(SomeClass).filter(SomeClass.foo=='bar') - q2 = sess.query(SomeClass).filter(SomeClass.bar=='foo') - - q3 = q1.union(q2) - - The method accepts multiple Query objects so as to control - the level of nesting. A series of ``union()`` calls such as:: - - x.union(y).union(z).all() - - will nest on each ``union()``, and produces:: - - SELECT * FROM (SELECT * FROM (SELECT * FROM X UNION - SELECT * FROM y) UNION SELECT * FROM Z) - - Whereas:: - - x.union(y, z).all() - - produces:: - - SELECT * FROM (SELECT * FROM X UNION SELECT * FROM y UNION - SELECT * FROM Z) - - Note that many database backends do not allow ORDER BY to - be rendered on a query called within UNION, EXCEPT, etc. - To disable all ORDER BY clauses including those configured - on mappers, issue ``query.order_by(None)`` - the resulting - :class:`.Query` object will not render ORDER BY within - its SELECT statement. - - """ - - return self._from_selectable( - expression.union(*([self] + list(q)))) - - def union_all(self, *q): - """Produce a UNION ALL of this Query against one or more queries. - - Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See - that method for usage examples. - - """ - return self._from_selectable( - expression.union_all(*([self] + list(q))) - ) - - def intersect(self, *q): - """Produce an INTERSECT of this Query against one or more queries. - - Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See - that method for usage examples. - - """ - return self._from_selectable( - expression.intersect(*([self] + list(q))) - ) - - def intersect_all(self, *q): - """Produce an INTERSECT ALL of this Query against one or more queries. - - Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See - that method for usage examples. - - """ - return self._from_selectable( - expression.intersect_all(*([self] + list(q))) - ) - - def except_(self, *q): - """Produce an EXCEPT of this Query against one or more queries. - - Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See - that method for usage examples. - - """ - return self._from_selectable( - expression.except_(*([self] + list(q))) - ) - - def except_all(self, *q): - """Produce an EXCEPT ALL of this Query against one or more queries. - - Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See - that method for usage examples. - - """ - return self._from_selectable( - expression.except_all(*([self] + list(q))) - ) - - def join(self, *props, **kwargs): - """Create a SQL JOIN against this :class:`.Query` object's criterion - and apply generatively, returning the newly resulting :class:`.Query`. - - **Simple Relationship Joins** - - Consider a mapping between two classes ``User`` and ``Address``, - with a relationship ``User.addresses`` representing a collection - of ``Address`` objects associated with each ``User``. The most - common usage of :meth:`~.Query.join` is to create a JOIN along this - relationship, using the ``User.addresses`` attribute as an indicator - for how this should occur:: - - q = session.query(User).join(User.addresses) - - Where above, the call to :meth:`~.Query.join` along ``User.addresses`` - will result in SQL equivalent to:: - - SELECT user.* FROM user JOIN address ON user.id = address.user_id - - In the above example we refer to ``User.addresses`` as passed to - :meth:`~.Query.join` as the *on clause*, that is, it indicates - how the "ON" portion of the JOIN should be constructed. For a - single-entity query such as the one above (i.e. we start by selecting - only from ``User`` and nothing else), the relationship can also be - specified by its string name:: - - q = session.query(User).join("addresses") - - :meth:`~.Query.join` can also accommodate multiple - "on clause" arguments to produce a chain of joins, such as below - where a join across four related entities is constructed:: - - q = session.query(User).join("orders", "items", "keywords") - - The above would be shorthand for three separate calls to - :meth:`~.Query.join`, each using an explicit attribute to indicate - the source entity:: - - q = session.query(User).\\ - join(User.orders).\\ - join(Order.items).\\ - join(Item.keywords) - - **Joins to a Target Entity or Selectable** - - A second form of :meth:`~.Query.join` allows any mapped entity - or core selectable construct as a target. In this usage, - :meth:`~.Query.join` will attempt - to create a JOIN along the natural foreign key relationship between - two entities:: - - q = session.query(User).join(Address) - - The above calling form of :meth:`~.Query.join` will raise an error if - either there are no foreign keys between the two entities, or if - there are multiple foreign key linkages between them. In the - above calling form, :meth:`~.Query.join` is called upon to - create the "on clause" automatically for us. The target can - be any mapped entity or selectable, such as a :class:`.Table`:: - - q = session.query(User).join(addresses_table) - - **Joins to a Target with an ON Clause** - - The third calling form allows both the target entity as well - as the ON clause to be passed explicitly. Suppose for - example we wanted to join to ``Address`` twice, using - an alias the second time. We use :func:`~sqlalchemy.orm.aliased` - to create a distinct alias of ``Address``, and join - to it using the ``target, onclause`` form, so that the - alias can be specified explicitly as the target along with - the relationship to instruct how the ON clause should proceed:: - - a_alias = aliased(Address) - - q = session.query(User).\\ - join(User.addresses).\\ - join(a_alias, User.addresses).\\ - filter(Address.email_address=='ed@foo.com').\\ - filter(a_alias.email_address=='ed@bar.com') - - Where above, the generated SQL would be similar to:: - - SELECT user.* FROM user - JOIN address ON user.id = address.user_id - JOIN address AS address_1 ON user.id=address_1.user_id - WHERE address.email_address = :email_address_1 - AND address_1.email_address = :email_address_2 - - The two-argument calling form of :meth:`~.Query.join` - also allows us to construct arbitrary joins with SQL-oriented - "on clause" expressions, not relying upon configured relationships - at all. Any SQL expression can be passed as the ON clause - when using the two-argument form, which should refer to the target - entity in some way as well as an applicable source entity:: - - q = session.query(User).join(Address, User.id==Address.user_id) - - .. versionchanged:: 0.7 - In SQLAlchemy 0.6 and earlier, the two argument form of - :meth:`~.Query.join` requires the usage of a tuple: - ``query(User).join((Address, User.id==Address.user_id))``\ . - This calling form is accepted in 0.7 and further, though - is not necessary unless multiple join conditions are passed to - a single :meth:`~.Query.join` call, which itself is also not - generally necessary as it is now equivalent to multiple - calls (this wasn't always the case). - - **Advanced Join Targeting and Adaption** - - There is a lot of flexibility in what the "target" can be when using - :meth:`~.Query.join`. As noted previously, it also accepts - :class:`.Table` constructs and other selectables such as - :func:`.alias` and :func:`.select` constructs, with either the one - or two-argument forms:: - - addresses_q = select([Address.user_id]).\\ - where(Address.email_address.endswith("@bar.com")).\\ - alias() - - q = session.query(User).\\ - join(addresses_q, addresses_q.c.user_id==User.id) - - :meth:`~.Query.join` also features the ability to *adapt* a - :meth:`~sqlalchemy.orm.relationship` -driven ON clause to the target - selectable. Below we construct a JOIN from ``User`` to a subquery - against ``Address``, allowing the relationship denoted by - ``User.addresses`` to *adapt* itself to the altered target:: - - address_subq = session.query(Address).\\ - filter(Address.email_address == 'ed@foo.com').\\ - subquery() - - q = session.query(User).join(address_subq, User.addresses) - - Producing SQL similar to:: - - SELECT user.* FROM user - JOIN ( - SELECT address.id AS id, - address.user_id AS user_id, - address.email_address AS email_address - FROM address - WHERE address.email_address = :email_address_1 - ) AS anon_1 ON user.id = anon_1.user_id - - The above form allows one to fall back onto an explicit ON - clause at any time:: - - q = session.query(User).\\ - join(address_subq, User.id==address_subq.c.user_id) - - **Controlling what to Join From** - - While :meth:`~.Query.join` exclusively deals with the "right" - side of the JOIN, we can also control the "left" side, in those - cases where it's needed, using :meth:`~.Query.select_from`. - Below we construct a query against ``Address`` but can still - make usage of ``User.addresses`` as our ON clause by instructing - the :class:`.Query` to select first from the ``User`` - entity:: - - q = session.query(Address).select_from(User).\\ - join(User.addresses).\\ - filter(User.name == 'ed') - - Which will produce SQL similar to:: - - SELECT address.* FROM user - JOIN address ON user.id=address.user_id - WHERE user.name = :name_1 - - **Constructing Aliases Anonymously** - - :meth:`~.Query.join` can construct anonymous aliases - using the ``aliased=True`` flag. This feature is useful - when a query is being joined algorithmically, such as - when querying self-referentially to an arbitrary depth:: - - q = session.query(Node).\\ - join("children", "children", aliased=True) - - When ``aliased=True`` is used, the actual "alias" construct - is not explicitly available. To work with it, methods such as - :meth:`.Query.filter` will adapt the incoming entity to - the last join point:: - - q = session.query(Node).\\ - join("children", "children", aliased=True).\\ - filter(Node.name == 'grandchild 1') - - When using automatic aliasing, the ``from_joinpoint=True`` - argument can allow a multi-node join to be broken into - multiple calls to :meth:`~.Query.join`, so that - each path along the way can be further filtered:: - - q = session.query(Node).\\ - join("children", aliased=True).\\ - filter(Node.name='child 1').\\ - join("children", aliased=True, from_joinpoint=True).\\ - filter(Node.name == 'grandchild 1') - - The filtering aliases above can then be reset back to the - original ``Node`` entity using :meth:`~.Query.reset_joinpoint`:: - - q = session.query(Node).\\ - join("children", "children", aliased=True).\\ - filter(Node.name == 'grandchild 1').\\ - reset_joinpoint().\\ - filter(Node.name == 'parent 1) - - For an example of ``aliased=True``, see the distribution - example :ref:`examples_xmlpersistence` which illustrates - an XPath-like query system using algorithmic joins. - - :param \*props: A collection of one or more join conditions, - each consisting of a relationship-bound attribute or string - relationship name representing an "on clause", or a single - target entity, or a tuple in the form of ``(target, onclause)``. - A special two-argument calling form of the form ``target, onclause`` - is also accepted. - :param aliased=False: If True, indicate that the JOIN target should be - anonymously aliased. Subsequent calls to :meth:`~.Query.filter` - and similar will adapt the incoming criterion to the target - alias, until :meth:`~.Query.reset_joinpoint` is called. - :param isouter=False: If True, the join used will be a left outer join, - just as if the :meth:`.Query.outerjoin` method were called. This - flag is here to maintain consistency with the same flag as accepted - by :meth:`.FromClause.join` and other Core constructs. - - - .. versionadded:: 1.0.0 - - :param from_joinpoint=False: When using ``aliased=True``, a setting - of True here will cause the join to be from the most recent - joined target, rather than starting back from the original - FROM clauses of the query. - - .. seealso:: - - :ref:`ormtutorial_joins` in the ORM tutorial. - - :ref:`inheritance_toplevel` for details on how - :meth:`~.Query.join` is used for inheritance relationships. - - :func:`.orm.join` - a standalone ORM-level join function, - used internally by :meth:`.Query.join`, which in previous - SQLAlchemy versions was the primary ORM-level joining interface. - - """ - aliased, from_joinpoint, isouter = kwargs.pop('aliased', False),\ - kwargs.pop('from_joinpoint', False),\ - kwargs.pop('isouter', False) - if kwargs: - raise TypeError("unknown arguments: %s" % - ', '.join(sorted(kwargs))) - return self._join(props, - outerjoin=isouter, create_aliases=aliased, - from_joinpoint=from_joinpoint) - - def outerjoin(self, *props, **kwargs): - """Create a left outer join against this ``Query`` object's criterion - and apply generatively, returning the newly resulting ``Query``. - - Usage is the same as the ``join()`` method. - - """ - aliased, from_joinpoint = kwargs.pop('aliased', False), \ - kwargs.pop('from_joinpoint', False) - if kwargs: - raise TypeError("unknown arguments: %s" % - ', '.join(sorted(kwargs))) - return self._join(props, - outerjoin=True, create_aliases=aliased, - from_joinpoint=from_joinpoint) - - def _update_joinpoint(self, jp): - self._joinpoint = jp - # copy backwards to the root of the _joinpath - # dict, so that no existing dict in the path is mutated - while 'prev' in jp: - f, prev = jp['prev'] - prev = prev.copy() - prev[f] = jp - jp['prev'] = (f, prev) - jp = prev - self._joinpath = jp - - @_generative(_no_statement_condition, _no_limit_offset) - def _join(self, keys, outerjoin, create_aliases, from_joinpoint): - """consumes arguments from join() or outerjoin(), places them into a - consistent format with which to form the actual JOIN constructs. - - """ - - if not from_joinpoint: - self._reset_joinpoint() - - if len(keys) == 2 and \ - isinstance(keys[0], (expression.FromClause, - type, AliasedClass)) and \ - isinstance(keys[1], (str, expression.ClauseElement, - interfaces.PropComparator)): - # detect 2-arg form of join and - # convert to a tuple. - keys = (keys,) - - keylist = util.to_list(keys) - for idx, arg1 in enumerate(keylist): - if isinstance(arg1, tuple): - # "tuple" form of join, multiple - # tuples are accepted as well. The simpler - # "2-arg" form is preferred. May deprecate - # the "tuple" usage. - arg1, arg2 = arg1 - else: - arg2 = None - - # determine onclause/right_entity. there - # is a little bit of legacy behavior still at work here - # which means they might be in either order. may possibly - # lock this down to (right_entity, onclause) in 0.6. - if isinstance( - arg1, (interfaces.PropComparator, util.string_types)): - right_entity, onclause = arg2, arg1 - else: - right_entity, onclause = arg1, arg2 - - left_entity = prop = None - - if isinstance(onclause, interfaces.PropComparator): - of_type = getattr(onclause, '_of_type', None) - else: - of_type = None - - if isinstance(onclause, util.string_types): - left_entity = self._joinpoint_zero() - - descriptor = _entity_descriptor(left_entity, onclause) - onclause = descriptor - - # check for q.join(Class.propname, from_joinpoint=True) - # and Class is that of the current joinpoint - elif from_joinpoint and \ - isinstance(onclause, interfaces.PropComparator): - left_entity = onclause._parententity - - info = inspect(self._joinpoint_zero()) - left_mapper, left_selectable, left_is_aliased = \ - getattr(info, 'mapper', None), \ - info.selectable, \ - getattr(info, 'is_aliased_class', None) - - if left_mapper is left_entity: - left_entity = self._joinpoint_zero() - descriptor = _entity_descriptor(left_entity, - onclause.key) - onclause = descriptor - - if isinstance(onclause, interfaces.PropComparator): - if right_entity is None: - if of_type: - right_entity = of_type - else: - right_entity = onclause.property.mapper - - left_entity = onclause._parententity - - prop = onclause.property - if not isinstance(onclause, attributes.QueryableAttribute): - onclause = prop - - if not create_aliases: - # check for this path already present. - # don't render in that case. - edge = (left_entity, right_entity, prop.key) - if edge in self._joinpoint: - # The child's prev reference might be stale -- - # it could point to a parent older than the - # current joinpoint. If this is the case, - # then we need to update it and then fix the - # tree's spine with _update_joinpoint. Copy - # and then mutate the child, which might be - # shared by a different query object. - jp = self._joinpoint[edge].copy() - jp['prev'] = (edge, self._joinpoint) - self._update_joinpoint(jp) - - if idx == len(keylist) - 1: - util.warn( - "Pathed join target %s has already " - "been joined to; skipping" % prop) - continue - - elif onclause is not None and right_entity is None: - # TODO: no coverage here - raise NotImplementedError("query.join(a==b) not supported.") - - self._join_left_to_right( - left_entity, - right_entity, onclause, - outerjoin, create_aliases, prop) - - def _join_left_to_right(self, left, right, - onclause, outerjoin, create_aliases, prop): - """append a JOIN to the query's from clause.""" - - self._polymorphic_adapters = self._polymorphic_adapters.copy() - - if left is None: - if self._from_obj: - left = self._from_obj[0] - elif self._entities: - left = self._entities[0].entity_zero_or_selectable - - if left is None: - raise sa_exc.InvalidRequestError( - "Don't know how to join from %s; please use " - "select_from() to establish the left " - "entity/selectable of this join" % self._entities[0]) - - if left is right and \ - not create_aliases: - raise sa_exc.InvalidRequestError( - "Can't construct a join from %s to %s, they " - "are the same entity" % - (left, right)) - - l_info = inspect(left) - r_info = inspect(right) - - overlap = False - if not create_aliases: - right_mapper = getattr(r_info, "mapper", None) - # if the target is a joined inheritance mapping, - # be more liberal about auto-aliasing. - if right_mapper and ( - right_mapper.with_polymorphic or - isinstance(right_mapper.mapped_table, expression.Join) - ): - for from_obj in self._from_obj or [l_info.selectable]: - if sql_util.selectables_overlap( - l_info.selectable, from_obj) and \ - sql_util.selectables_overlap( - from_obj, r_info.selectable): - overlap = True - break - - if (overlap or not create_aliases) and \ - l_info.selectable is r_info.selectable: - raise sa_exc.InvalidRequestError( - "Can't join table/selectable '%s' to itself" % - l_info.selectable) - - right, onclause = self._prepare_right_side( - r_info, right, onclause, - create_aliases, - prop, overlap) - - # if joining on a MapperProperty path, - # track the path to prevent redundant joins - if not create_aliases and prop: - self._update_joinpoint({ - '_joinpoint_entity': right, - 'prev': ((left, right, prop.key), self._joinpoint) - }) - else: - self._joinpoint = {'_joinpoint_entity': right} - - self._join_to_left(l_info, left, right, onclause, outerjoin) - - def _prepare_right_side(self, r_info, right, onclause, create_aliases, - prop, overlap): - info = r_info - - right_mapper, right_selectable, right_is_aliased = \ - getattr(info, 'mapper', None), \ - info.selectable, \ - getattr(info, 'is_aliased_class', False) - - if right_mapper: - self._join_entities += (info, ) - - if right_mapper and prop and \ - not right_mapper.common_parent(prop.mapper): - raise sa_exc.InvalidRequestError( - "Join target %s does not correspond to " - "the right side of join condition %s" % (right, onclause) - ) - - if not right_mapper and prop: - right_mapper = prop.mapper - - need_adapter = False - - if right_mapper and right is right_selectable: - if not right_selectable.is_derived_from( - right_mapper.mapped_table): - raise sa_exc.InvalidRequestError( - "Selectable '%s' is not derived from '%s'" % - (right_selectable.description, - right_mapper.mapped_table.description)) - - if isinstance(right_selectable, expression.SelectBase): - # TODO: this isn't even covered now! - right_selectable = right_selectable.alias() - need_adapter = True - - right = aliased(right_mapper, right_selectable) - - aliased_entity = right_mapper and \ - not right_is_aliased and \ - ( - right_mapper.with_polymorphic and isinstance( - right_mapper._with_polymorphic_selectable, - expression.Alias) - or - overlap # test for overlap: - # orm/inheritance/relationships.py - # SelfReferentialM2MTest - ) - - if not need_adapter and (create_aliases or aliased_entity): - right = aliased(right, flat=True) - need_adapter = True - - # if an alias() of the right side was generated here, - # apply an adapter to all subsequent filter() calls - # until reset_joinpoint() is called. - if need_adapter: - self._filter_aliases = ORMAdapter( - right, - equivalents=right_mapper and - right_mapper._equivalent_columns or {}, - chain_to=self._filter_aliases) - - # if the onclause is a ClauseElement, adapt it with any - # adapters that are in place right now - if isinstance(onclause, expression.ClauseElement): - onclause = self._adapt_clause(onclause, True, True) - - # if an alias() on the right side was generated, - # which is intended to wrap a the right side in a subquery, - # ensure that columns retrieved from this target in the result - # set are also adapted. - if aliased_entity and not create_aliases: - self._mapper_loads_polymorphically_with( - right_mapper, - ORMAdapter( - right, - equivalents=right_mapper._equivalent_columns - ) - ) - - return right, onclause - - def _join_to_left(self, l_info, left, right, onclause, outerjoin): - info = l_info - left_mapper = getattr(info, 'mapper', None) - left_selectable = info.selectable - - if self._from_obj: - replace_clause_index, clause = sql_util.find_join_source( - self._from_obj, - left_selectable) - if clause is not None: - try: - clause = orm_join(clause, - right, - onclause, isouter=outerjoin) - except sa_exc.ArgumentError as ae: - raise sa_exc.InvalidRequestError( - "Could not find a FROM clause to join from. " - "Tried joining to %s, but got: %s" % (right, ae)) - - self._from_obj = \ - self._from_obj[:replace_clause_index] + \ - (clause, ) + \ - self._from_obj[replace_clause_index + 1:] - return - - if left_mapper: - for ent in self._entities: - if ent.corresponds_to(left): - clause = ent.selectable - break - else: - clause = left - else: - clause = left_selectable - - assert clause is not None - try: - clause = orm_join(clause, right, onclause, isouter=outerjoin) - except sa_exc.ArgumentError as ae: - raise sa_exc.InvalidRequestError( - "Could not find a FROM clause to join from. " - "Tried joining to %s, but got: %s" % (right, ae)) - self._from_obj = self._from_obj + (clause,) - - def _reset_joinpoint(self): - self._joinpoint = self._joinpath - self._filter_aliases = None - - @_generative(_no_statement_condition) - def reset_joinpoint(self): - """Return a new :class:`.Query`, where the "join point" has - been reset back to the base FROM entities of the query. - - This method is usually used in conjunction with the - ``aliased=True`` feature of the :meth:`~.Query.join` - method. See the example in :meth:`~.Query.join` for how - this is used. - - """ - self._reset_joinpoint() - - @_generative(_no_clauseelement_condition) - def select_from(self, *from_obj): - """Set the FROM clause of this :class:`.Query` explicitly. - - :meth:`.Query.select_from` is often used in conjunction with - :meth:`.Query.join` in order to control which entity is selected - from on the "left" side of the join. - - The entity or selectable object here effectively replaces the - "left edge" of any calls to :meth:`~.Query.join`, when no - joinpoint is otherwise established - usually, the default "join - point" is the leftmost entity in the :class:`~.Query` object's - list of entities to be selected. - - A typical example:: - - q = session.query(Address).select_from(User).\\ - join(User.addresses).\\ - filter(User.name == 'ed') - - Which produces SQL equivalent to:: - - SELECT address.* FROM user - JOIN address ON user.id=address.user_id - WHERE user.name = :name_1 - - :param \*from_obj: collection of one or more entities to apply - to the FROM clause. Entities can be mapped classes, - :class:`.AliasedClass` objects, :class:`.Mapper` objects - as well as core :class:`.FromClause` elements like subqueries. - - .. versionchanged:: 0.9 - This method no longer applies the given FROM object - to be the selectable from which matching entities - select from; the :meth:`.select_entity_from` method - now accomplishes this. See that method for a description - of this behavior. - - .. seealso:: - - :meth:`~.Query.join` - - :meth:`.Query.select_entity_from` - - """ - - self._set_select_from(from_obj, False) - - @_generative(_no_clauseelement_condition) - def select_entity_from(self, from_obj): - """Set the FROM clause of this :class:`.Query` to a - core selectable, applying it as a replacement FROM clause - for corresponding mapped entities. - - This method is similar to the :meth:`.Query.select_from` - method, in that it sets the FROM clause of the query. However, - where :meth:`.Query.select_from` only affects what is placed - in the FROM, this method also applies the given selectable - to replace the FROM which the selected entities would normally - select from. - - The given ``from_obj`` must be an instance of a :class:`.FromClause`, - e.g. a :func:`.select` or :class:`.Alias` construct. - - An example would be a :class:`.Query` that selects ``User`` entities, - but uses :meth:`.Query.select_entity_from` to have the entities - selected from a :func:`.select` construct instead of the - base ``user`` table:: - - select_stmt = select([User]).where(User.id == 7) - - q = session.query(User).\\ - select_entity_from(select_stmt).\\ - filter(User.name == 'ed') - - The query generated will select ``User`` entities directly - from the given :func:`.select` construct, and will be:: - - SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name - FROM (SELECT "user".id AS id, "user".name AS name - FROM "user" - WHERE "user".id = :id_1) AS anon_1 - WHERE anon_1.name = :name_1 - - Notice above that even the WHERE criterion was "adapted" such that - the ``anon_1`` subquery effectively replaces all references to the - ``user`` table, except for the one that it refers to internally. - - Compare this to :meth:`.Query.select_from`, which as of - version 0.9, does not affect existing entities. The - statement below:: - - q = session.query(User).\\ - select_from(select_stmt).\\ - filter(User.name == 'ed') - - Produces SQL where both the ``user`` table as well as the - ``select_stmt`` construct are present as separate elements - in the FROM clause. No "adaptation" of the ``user`` table - is applied:: - - SELECT "user".id AS user_id, "user".name AS user_name - FROM "user", (SELECT "user".id AS id, "user".name AS name - FROM "user" - WHERE "user".id = :id_1) AS anon_1 - WHERE "user".name = :name_1 - - :meth:`.Query.select_entity_from` maintains an older - behavior of :meth:`.Query.select_from`. In modern usage, - similar results can also be achieved using :func:`.aliased`:: - - select_stmt = select([User]).where(User.id == 7) - user_from_select = aliased(User, select_stmt.alias()) - - q = session.query(user_from_select) - - :param from_obj: a :class:`.FromClause` object that will replace - the FROM clause of this :class:`.Query`. - - .. seealso:: - - :meth:`.Query.select_from` - - .. versionadded:: 0.8 - :meth:`.Query.select_entity_from` was added to specify - the specific behavior of entity replacement, however - the :meth:`.Query.select_from` maintains this behavior - as well until 0.9. - - """ - - self._set_select_from([from_obj], True) - - def __getitem__(self, item): - if isinstance(item, slice): - start, stop, step = util.decode_slice(item) - - if isinstance(stop, int) and \ - isinstance(start, int) and \ - stop - start <= 0: - return [] - - # perhaps we should execute a count() here so that we - # can still use LIMIT/OFFSET ? - elif (isinstance(start, int) and start < 0) \ - or (isinstance(stop, int) and stop < 0): - return list(self)[item] - - res = self.slice(start, stop) - if step is not None: - return list(res)[None:None:item.step] - else: - return list(res) - else: - if item == -1: - return list(self)[-1] - else: - return list(self[item:item + 1])[0] - - @_generative(_no_statement_condition) - def slice(self, start, stop): - """apply LIMIT/OFFSET to the ``Query`` based on a " - "range and return the newly resulting ``Query``.""" - - if start is not None and stop is not None: - self._offset = (self._offset or 0) + start - self._limit = stop - start - elif start is None and stop is not None: - self._limit = stop - elif start is not None and stop is None: - self._offset = (self._offset or 0) + start - - if self._offset == 0: - self._offset = None - - @_generative(_no_statement_condition) - def limit(self, limit): - """Apply a ``LIMIT`` to the query and return the newly resulting - - ``Query``. - - """ - self._limit = limit - - @_generative(_no_statement_condition) - def offset(self, offset): - """Apply an ``OFFSET`` to the query and return the newly resulting - ``Query``. - - """ - self._offset = offset - - @_generative(_no_statement_condition) - def distinct(self, *criterion): - """Apply a ``DISTINCT`` to the query and return the newly resulting - ``Query``. - - - .. note:: - - The :meth:`.distinct` call includes logic that will automatically - add columns from the ORDER BY of the query to the columns - clause of the SELECT statement, to satisfy the common need - of the database backend that ORDER BY columns be part of the - SELECT list when DISTINCT is used. These columns *are not* - added to the list of columns actually fetched by the - :class:`.Query`, however, so would not affect results. - The columns are passed through when using the - :attr:`.Query.statement` accessor, however. - - :param \*expr: optional column expressions. When present, - the Postgresql dialect will render a ``DISTINCT ON (>)`` - construct. - - """ - if not criterion: - self._distinct = True - else: - criterion = self._adapt_col_list(criterion) - if isinstance(self._distinct, list): - self._distinct += criterion - else: - self._distinct = criterion - - @_generative() - def prefix_with(self, *prefixes): - """Apply the prefixes to the query and return the newly resulting - ``Query``. - - :param \*prefixes: optional prefixes, typically strings, - not using any commas. In particular is useful for MySQL keywords. - - e.g.:: - - query = sess.query(User.name).\\ - prefix_with('HIGH_PRIORITY').\\ - prefix_with('SQL_SMALL_RESULT', 'ALL') - - Would render:: - - SELECT HIGH_PRIORITY SQL_SMALL_RESULT ALL users.name AS users_name - FROM users - - .. versionadded:: 0.7.7 - - .. seealso:: - - :meth:`.HasPrefixes.prefix_with` - - """ - if self._prefixes: - self._prefixes += prefixes - else: - self._prefixes = prefixes - - @_generative() - def suffix_with(self, *suffixes): - """Apply the suffix to the query and return the newly resulting - ``Query``. - - :param \*suffixes: optional suffixes, typically strings, - not using any commas. - - .. versionadded:: 1.0.0 - - .. seealso:: - - :meth:`.Query.prefix_with` - - :meth:`.HasSuffixes.suffix_with` - - """ - if self._suffixes: - self._suffixes += suffixes - else: - self._suffixes = suffixes - - def all(self): - """Return the results represented by this ``Query`` as a list. - - This results in an execution of the underlying query. - - """ - return list(self) - - @_generative(_no_clauseelement_condition) - def from_statement(self, statement): - """Execute the given SELECT statement and return results. - - This method bypasses all internal statement compilation, and the - statement is executed without modification. - - The statement is typically either a :func:`~.expression.text` - or :func:`~.expression.select` construct, and should return the set - of columns - appropriate to the entity class represented by this :class:`.Query`. - - .. seealso:: - - :ref:`orm_tutorial_literal_sql` - usage examples in the - ORM tutorial - - """ - statement = expression._expression_literal_as_text(statement) - - if not isinstance(statement, - (expression.TextClause, - expression.SelectBase)): - raise sa_exc.ArgumentError( - "from_statement accepts text(), select(), " - "and union() objects only.") - - self._statement = statement - - def first(self): - """Return the first result of this ``Query`` or - None if the result doesn't contain any row. - - first() applies a limit of one within the generated SQL, so that - only one primary entity row is generated on the server side - (note this may consist of multiple result rows if join-loaded - collections are present). - - Calling :meth:`.Query.first` results in an execution of the underlying query. - - """ - if self._statement is not None: - ret = list(self)[0:1] - else: - ret = list(self[0:1]) - if len(ret) > 0: - return ret[0] - else: - return None - - def one_or_none(self): - """Return at most one result or raise an exception. - - Returns ``None`` if the query selects - no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound`` - if multiple object identities are returned, or if multiple - rows are returned for a query that returns only scalar values - as opposed to full identity-mapped entities. - - Calling :meth:`.Query.one_or_none` results in an execution of the underlying - query. - - .. versionadded:: 1.0.9 - - Added :meth:`.Query.one_or_none` - - .. seealso:: - - :meth:`.Query.first` - - :meth:`.Query.one` - - - """ - ret = list(self) - - l = len(ret) - if l == 1: - return ret[0] - elif l == 0: - return None - else: - raise orm_exc.MultipleResultsFound( - "Multiple rows were found for one_or_none()") - - def one(self): - """Return exactly one result or raise an exception. - - Raises ``sqlalchemy.orm.exc.NoResultFound`` if the query selects - no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound`` - if multiple object identities are returned, or if multiple - rows are returned for a query that returns only scalar values - as opposed to full identity-mapped entities. - - Calling :meth:`.one` results in an execution of the underlying query. - - .. seealso:: - - :meth:`.Query.first` - - :meth:`.Query.one_or_none` - - """ - ret = list(self) - - l = len(ret) - if l == 1: - return ret[0] - elif l == 0: - raise orm_exc.NoResultFound("No row was found for one()") - else: - raise orm_exc.MultipleResultsFound( - "Multiple rows were found for one()") - - def scalar(self): - """Return the first element of the first result or None - if no rows present. If multiple rows are returned, - raises MultipleResultsFound. - - >>> session.query(Item).scalar() - - >>> session.query(Item.id).scalar() - 1 - >>> session.query(Item.id).filter(Item.id < 0).scalar() - None - >>> session.query(Item.id, Item.name).scalar() - 1 - >>> session.query(func.count(Parent.id)).scalar() - 20 - - This results in an execution of the underlying query. - - """ - try: - ret = self.one() - if not isinstance(ret, tuple): - return ret - return ret[0] - except orm_exc.NoResultFound: - return None - - def __iter__(self): - context = self._compile_context() - context.statement.use_labels = True - if self._autoflush and not self._populate_existing: - self.session._autoflush() - return self._execute_and_instances(context) - - def _connection_from_session(self, **kw): - conn = self.session.connection( - **kw) - if self._execution_options: - conn = conn.execution_options(**self._execution_options) - return conn - - def _execute_and_instances(self, querycontext): - conn = self._connection_from_session( - mapper=self._bind_mapper(), - clause=querycontext.statement, - close_with_result=True) - - result = conn.execute(querycontext.statement, self._params) - return loading.instances(querycontext.query, result, querycontext) - - @property - def column_descriptions(self): - """Return metadata about the columns which would be - returned by this :class:`.Query`. - - Format is a list of dictionaries:: - - user_alias = aliased(User, name='user2') - q = sess.query(User, User.id, user_alias) - - # this expression: - q.column_descriptions - - # would return: - [ - { - 'name':'User', - 'type':User, - 'aliased':False, - 'expr':User, - 'entity': User - }, - { - 'name':'id', - 'type':Integer(), - 'aliased':False, - 'expr':User.id, - 'entity': User - }, - { - 'name':'user2', - 'type':User, - 'aliased':True, - 'expr':user_alias, - 'entity': user_alias - } - ] - - """ - - return [ - { - 'name': ent._label_name, - 'type': ent.type, - 'aliased': getattr(insp_ent, 'is_aliased_class', False), - 'expr': ent.expr, - 'entity': - getattr(insp_ent, "entity", None) - if ent.entity_zero is not None - and not insp_ent.is_clause_element - else None - } - for ent, insp_ent in [ - ( - _ent, - (inspect(_ent.entity_zero) - if _ent.entity_zero is not None else None) - ) - for _ent in self._entities - ] - ] - - def instances(self, cursor, __context=None): - """Given a ResultProxy cursor as returned by connection.execute(), - return an ORM result as an iterator. - - e.g.:: - - result = engine.execute("select * from users") - for u in session.query(User).instances(result): - print u - """ - context = __context - if context is None: - context = QueryContext(self) - - return loading.instances(self, cursor, context) - - def merge_result(self, iterator, load=True): - """Merge a result into this :class:`.Query` object's Session. - - Given an iterator returned by a :class:`.Query` of the same structure - as this one, return an identical iterator of results, with all mapped - instances merged into the session using :meth:`.Session.merge`. This - is an optimized method which will merge all mapped instances, - preserving the structure of the result rows and unmapped columns with - less method overhead than that of calling :meth:`.Session.merge` - explicitly for each value. - - The structure of the results is determined based on the column list of - this :class:`.Query` - if these do not correspond, unchecked errors - will occur. - - The 'load' argument is the same as that of :meth:`.Session.merge`. - - For an example of how :meth:`~.Query.merge_result` is used, see - the source code for the example :ref:`examples_caching`, where - :meth:`~.Query.merge_result` is used to efficiently restore state - from a cache back into a target :class:`.Session`. - - """ - - return loading.merge_result(self, iterator, load) - - @property - def _select_args(self): - return { - 'limit': self._limit, - 'offset': self._offset, - 'distinct': self._distinct, - 'prefixes': self._prefixes, - 'suffixes': self._suffixes, - 'group_by': self._group_by or None, - 'having': self._having - } - - @property - def _should_nest_selectable(self): - kwargs = self._select_args - return (kwargs.get('limit') is not None or - kwargs.get('offset') is not None or - kwargs.get('distinct', False)) - - def exists(self): - """A convenience method that turns a query into an EXISTS subquery - of the form EXISTS (SELECT 1 FROM ... WHERE ...). - - e.g.:: - - q = session.query(User).filter(User.name == 'fred') - session.query(q.exists()) - - Producing SQL similar to:: - - SELECT EXISTS ( - SELECT 1 FROM users WHERE users.name = :name_1 - ) AS anon_1 - - The EXISTS construct is usually used in the WHERE clause:: - - session.query(User.id).filter(q.exists()).scalar() - - Note that some databases such as SQL Server don't allow an - EXISTS expression to be present in the columns clause of a - SELECT. To select a simple boolean value based on the exists - as a WHERE, use :func:`.literal`:: - - from sqlalchemy import literal - - session.query(literal(True)).filter(q.exists()).scalar() - - .. versionadded:: 0.8.1 - - """ - - # .add_columns() for the case that we are a query().select_from(X), - # so that ".statement" can be produced (#2995) but also without - # omitting the FROM clause from a query(X) (#2818); - # .with_only_columns() after we have a core select() so that - # we get just "SELECT 1" without any entities. - return sql.exists(self.add_columns('1').with_labels(). - statement.with_only_columns([1])) - - def count(self): - """Return a count of rows this Query would return. - - This generates the SQL for this Query as follows:: - - SELECT count(1) AS count_1 FROM ( - SELECT - ) AS anon_1 - - .. versionchanged:: 0.7 - The above scheme is newly refined as of 0.7b3. - - For fine grained control over specific columns - to count, to skip the usage of a subquery or - otherwise control of the FROM clause, - or to use other aggregate functions, - use :attr:`~sqlalchemy.sql.expression.func` - expressions in conjunction - with :meth:`~.Session.query`, i.e.:: - - from sqlalchemy import func - - # count User records, without - # using a subquery. - session.query(func.count(User.id)) - - # return count of user "id" grouped - # by "name" - session.query(func.count(User.id)).\\ - group_by(User.name) - - from sqlalchemy import distinct - - # count distinct "name" values - session.query(func.count(distinct(User.name))) - - """ - col = sql.func.count(sql.literal_column('*')) - return self.from_self(col).scalar() - - def delete(self, synchronize_session='evaluate'): - """Perform a bulk delete query. - - Deletes rows matched by this query from the database. - - E.g.:: - - sess.query(User).filter(User.age == 25).\\ - delete(synchronize_session=False) - - sess.query(User).filter(User.age == 25).\\ - delete(synchronize_session='evaluate') - - .. warning:: The :meth:`.Query.delete` method is a "bulk" operation, - which bypasses ORM unit-of-work automation in favor of greater - performance. **Please read all caveats and warnings below.** - - :param synchronize_session: chooses the strategy for the removal of - matched objects from the session. Valid values are: - - ``False`` - don't synchronize the session. This option is the most - efficient and is reliable once the session is expired, which - typically occurs after a commit(), or explicitly using - expire_all(). Before the expiration, objects may still remain in - the session which were in fact deleted which can lead to confusing - results if they are accessed via get() or already loaded - collections. - - ``'fetch'`` - performs a select query before the delete to find - objects that are matched by the delete query and need to be - removed from the session. Matched objects are removed from the - session. - - ``'evaluate'`` - Evaluate the query's criteria in Python straight - on the objects in the session. If evaluation of the criteria isn't - implemented, an error is raised. - - The expression evaluator currently doesn't account for differing - string collations between the database and Python. - - :return: the count of rows matched as returned by the database's - "row count" feature. - - .. warning:: **Additional Caveats for bulk query deletes** - - * The method does **not** offer in-Python cascading of - relationships - it is assumed that ON DELETE CASCADE/SET - NULL/etc. is configured for any foreign key references - which require it, otherwise the database may emit an - integrity violation if foreign key references are being - enforced. - - After the DELETE, dependent objects in the - :class:`.Session` which were impacted by an ON DELETE - may not contain the current state, or may have been - deleted. This issue is resolved once the - :class:`.Session` is expired, which normally occurs upon - :meth:`.Session.commit` or can be forced by using - :meth:`.Session.expire_all`. Accessing an expired - object whose row has been deleted will invoke a SELECT - to locate the row; when the row is not found, an - :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is - raised. - - * The ``'fetch'`` strategy results in an additional - SELECT statement emitted and will significantly reduce - performance. - - * The ``'evaluate'`` strategy performs a scan of - all matching objects within the :class:`.Session`; if the - contents of the :class:`.Session` are expired, such as - via a proceeding :meth:`.Session.commit` call, **this will - result in SELECT queries emitted for every matching object**. - - * The :meth:`.MapperEvents.before_delete` and - :meth:`.MapperEvents.after_delete` - events **are not invoked** from this method. Instead, the - :meth:`.SessionEvents.after_bulk_delete` method is provided to - act upon a mass DELETE of entity rows. - - .. seealso:: - - :meth:`.Query.update` - - :ref:`inserts_and_updates` - Core SQL tutorial - - """ - # TODO: cascades need handling. - - delete_op = persistence.BulkDelete.factory( - self, synchronize_session) - delete_op.exec_() - return delete_op.rowcount - - def update(self, values, synchronize_session='evaluate', update_args=None): - """Perform a bulk update query. - - Updates rows matched by this query in the database. - - E.g.:: - - sess.query(User).filter(User.age == 25).\\ - update({User.age: User.age - 10}, synchronize_session=False) - - sess.query(User).filter(User.age == 25).\\ - update({"age": User.age - 10}, synchronize_session='evaluate') - - - .. warning:: The :meth:`.Query.update` method is a "bulk" operation, - which bypasses ORM unit-of-work automation in favor of greater - performance. **Please read all caveats and warnings below.** - - - :param values: a dictionary with attributes names, or alternatively - mapped attributes or SQL expressions, as keys, and literal - values or sql expressions as values. - - .. versionchanged:: 1.0.0 - string names in the values dictionary - are now resolved against the mapped entity; previously, these - strings were passed as literal column names with no mapper-level - translation. - - :param synchronize_session: chooses the strategy to update the - attributes on objects in the session. Valid values are: - - ``False`` - don't synchronize the session. This option is the most - efficient and is reliable once the session is expired, which - typically occurs after a commit(), or explicitly using - expire_all(). Before the expiration, updated objects may still - remain in the session with stale values on their attributes, which - can lead to confusing results. - - ``'fetch'`` - performs a select query before the update to find - objects that are matched by the update query. The updated - attributes are expired on matched objects. - - ``'evaluate'`` - Evaluate the Query's criteria in Python straight - on the objects in the session. If evaluation of the criteria isn't - implemented, an exception is raised. - - The expression evaluator currently doesn't account for differing - string collations between the database and Python. - - :param update_args: Optional dictionary, if present will be passed - to the underlying :func:`.update` construct as the ``**kw`` for - the object. May be used to pass dialect-specific arguments such - as ``mysql_limit``. - - .. versionadded:: 1.0.0 - - :return: the count of rows matched as returned by the database's - "row count" feature. - - .. warning:: **Additional Caveats for bulk query updates** - - * The method does **not** offer in-Python cascading of - relationships - it is assumed that ON UPDATE CASCADE is - configured for any foreign key references which require - it, otherwise the database may emit an integrity - violation if foreign key references are being enforced. - - After the UPDATE, dependent objects in the - :class:`.Session` which were impacted by an ON UPDATE - CASCADE may not contain the current state; this issue is - resolved once the :class:`.Session` is expired, which - normally occurs upon :meth:`.Session.commit` or can be - forced by using :meth:`.Session.expire_all`. - - * The ``'fetch'`` strategy results in an additional - SELECT statement emitted and will significantly reduce - performance. - - * The ``'evaluate'`` strategy performs a scan of - all matching objects within the :class:`.Session`; if the - contents of the :class:`.Session` are expired, such as - via a proceeding :meth:`.Session.commit` call, **this will - result in SELECT queries emitted for every matching object**. - - * The method supports multiple table updates, as detailed - in :ref:`multi_table_updates`, and this behavior does - extend to support updates of joined-inheritance and - other multiple table mappings. However, the **join - condition of an inheritance mapper is not - automatically rendered**. Care must be taken in any - multiple-table update to explicitly include the joining - condition between those tables, even in mappings where - this is normally automatic. E.g. if a class ``Engineer`` - subclasses ``Employee``, an UPDATE of the ``Engineer`` - local table using criteria against the ``Employee`` - local table might look like:: - - session.query(Engineer).\\ - filter(Engineer.id == Employee.id).\\ - filter(Employee.name == 'dilbert').\\ - update({"engineer_type": "programmer"}) - - * The :meth:`.MapperEvents.before_update` and - :meth:`.MapperEvents.after_update` - events **are not invoked from this method**. Instead, the - :meth:`.SessionEvents.after_bulk_update` method is provided to - act upon a mass UPDATE of entity rows. - - .. seealso:: - - :meth:`.Query.delete` - - :ref:`inserts_and_updates` - Core SQL tutorial - - """ - - update_args = update_args or {} - update_op = persistence.BulkUpdate.factory( - self, synchronize_session, values, update_args) - update_op.exec_() - return update_op.rowcount - - def _compile_context(self, labels=True): - if self.dispatch.before_compile: - for fn in self.dispatch.before_compile: - new_query = fn(self) - if new_query is not None: - self = new_query - - context = QueryContext(self) - - if context.statement is not None: - return context - - context.labels = labels - - context._for_update_arg = self._for_update_arg - - for entity in self._entities: - entity.setup_context(self, context) - - for rec in context.create_eager_joins: - strategy = rec[0] - strategy(*rec[1:]) - - if context.from_clause: - # "load from explicit FROMs" mode, - # i.e. when select_from() or join() is used - context.froms = list(context.from_clause) - # else "load from discrete FROMs" mode, - # i.e. when each _MappedEntity has its own FROM - - if self._enable_single_crit: - self._adjust_for_single_inheritance(context) - - if not context.primary_columns: - if self._only_load_props: - raise sa_exc.InvalidRequestError( - "No column-based properties specified for " - "refresh operation. Use session.expire() " - "to reload collections and related items.") - else: - raise sa_exc.InvalidRequestError( - "Query contains no columns with which to " - "SELECT from.") - - if context.multi_row_eager_loaders and self._should_nest_selectable: - context.statement = self._compound_eager_statement(context) - else: - context.statement = self._simple_statement(context) - - return context - - def _compound_eager_statement(self, context): - # for eager joins present and LIMIT/OFFSET/DISTINCT, - # wrap the query inside a select, - # then append eager joins onto that - - if context.order_by: - order_by_col_expr = list( - chain(*[ - sql_util.unwrap_order_by(o) - for o in context.order_by - ]) - ) - else: - context.order_by = None - order_by_col_expr = [] - - inner = sql.select( - context.primary_columns + order_by_col_expr, - context.whereclause, - from_obj=context.froms, - use_labels=context.labels, - # TODO: this order_by is only needed if - # LIMIT/OFFSET is present in self._select_args, - # else the application on the outside is enough - order_by=context.order_by, - **self._select_args - ) - - for hint in self._with_hints: - inner = inner.with_hint(*hint) - - if self._correlate: - inner = inner.correlate(*self._correlate) - - inner = inner.alias() - - equivs = self.__all_equivs() - - context.adapter = sql_util.ColumnAdapter(inner, equivs) - - statement = sql.select( - [inner] + context.secondary_columns, - use_labels=context.labels) - - statement._for_update_arg = context._for_update_arg - - from_clause = inner - for eager_join in context.eager_joins.values(): - # EagerLoader places a 'stop_on' attribute on the join, - # giving us a marker as to where the "splice point" of - # the join should be - from_clause = sql_util.splice_joins( - from_clause, - eager_join, eager_join.stop_on) - - statement.append_from(from_clause) - - if context.order_by: - statement.append_order_by( - *context.adapter.copy_and_process( - context.order_by - ) - ) - - statement.append_order_by(*context.eager_order_by) - return statement - - def _simple_statement(self, context): - if not context.order_by: - context.order_by = None - - if self._distinct and context.order_by: - order_by_col_expr = list( - chain(*[ - sql_util.unwrap_order_by(o) - for o in context.order_by - ]) - ) - context.primary_columns += order_by_col_expr - - context.froms += tuple(context.eager_joins.values()) - - statement = sql.select( - context.primary_columns + - context.secondary_columns, - context.whereclause, - from_obj=context.froms, - use_labels=context.labels, - order_by=context.order_by, - **self._select_args - ) - statement._for_update_arg = context._for_update_arg - - for hint in self._with_hints: - statement = statement.with_hint(*hint) - - if self._correlate: - statement = statement.correlate(*self._correlate) - - if context.eager_order_by: - statement.append_order_by(*context.eager_order_by) - return statement - - def _adjust_for_single_inheritance(self, context): - """Apply single-table-inheritance filtering. - - For all distinct single-table-inheritance mappers represented in - the columns clause of this query, add criterion to the WHERE - clause of the given QueryContext such that only the appropriate - subtypes are selected from the total results. - - """ - - for (ext_info, adapter) in set(self._mapper_adapter_map.values()): - if ext_info in self._join_entities: - continue - single_crit = ext_info.mapper._single_table_criterion - if single_crit is not None: - if adapter: - single_crit = adapter.traverse(single_crit) - single_crit = self._adapt_clause(single_crit, False, False) - context.whereclause = sql.and_( - sql.True_._ifnone(context.whereclause), - single_crit) - - def __str__(self): - return str(self._compile_context().statement) - -from ..sql.selectable import ForUpdateArg - - -class LockmodeArg(ForUpdateArg): - @classmethod - def parse_legacy_query(self, mode): - if mode in (None, False): - return None - - if mode == "read": - read = True - nowait = False - elif mode == "update": - read = nowait = False - elif mode == "update_nowait": - nowait = True - read = False - else: - raise sa_exc.ArgumentError( - "Unknown with_lockmode argument: %r" % mode) - - return LockmodeArg(read=read, nowait=nowait) - - -class _QueryEntity(object): - """represent an entity column returned within a Query result.""" - - def __new__(cls, *args, **kwargs): - if cls is _QueryEntity: - entity = args[1] - if not isinstance(entity, util.string_types) and \ - _is_mapped_class(entity): - cls = _MapperEntity - elif isinstance(entity, Bundle): - cls = _BundleEntity - else: - cls = _ColumnEntity - return object.__new__(cls) - - def _clone(self): - q = self.__class__.__new__(self.__class__) - q.__dict__ = self.__dict__.copy() - return q - - -class _MapperEntity(_QueryEntity): - """mapper/class/AliasedClass entity""" - - def __init__(self, query, entity): - if not query._primary_entity: - query._primary_entity = self - query._entities.append(self) - - self.entities = [entity] - self.expr = entity - - supports_single_entity = True - - def setup_entity(self, ext_info, aliased_adapter): - self.mapper = ext_info.mapper - self.aliased_adapter = aliased_adapter - self.selectable = ext_info.selectable - self.is_aliased_class = ext_info.is_aliased_class - self._with_polymorphic = ext_info.with_polymorphic_mappers - self._polymorphic_discriminator = \ - ext_info.polymorphic_on - self.entity_zero = ext_info - if ext_info.is_aliased_class: - self._label_name = self.entity_zero.name - else: - self._label_name = self.mapper.class_.__name__ - self.path = self.entity_zero._path_registry - - def set_with_polymorphic(self, query, cls_or_mappers, - selectable, polymorphic_on): - """Receive an update from a call to query.with_polymorphic(). - - Note the newer style of using a free standing with_polymporphic() - construct doesn't make use of this method. - - - """ - if self.is_aliased_class: - # TODO: invalidrequest ? - raise NotImplementedError( - "Can't use with_polymorphic() against " - "an Aliased object" - ) - - if cls_or_mappers is None: - query._reset_polymorphic_adapter(self.mapper) - return - - mappers, from_obj = self.mapper._with_polymorphic_args( - cls_or_mappers, selectable) - self._with_polymorphic = mappers - self._polymorphic_discriminator = polymorphic_on - - self.selectable = from_obj - query._mapper_loads_polymorphically_with( - self.mapper, sql_util.ColumnAdapter( - from_obj, self.mapper._equivalent_columns)) - - filter_fn = id - - @property - def type(self): - return self.mapper.class_ - - @property - def entity_zero_or_selectable(self): - return self.entity_zero - - def corresponds_to(self, entity): - if entity.is_aliased_class: - if self.is_aliased_class: - if entity._base_alias is self.entity_zero._base_alias: - return True - return False - elif self.is_aliased_class: - if self.entity_zero._use_mapper_path: - return entity in self._with_polymorphic - else: - return entity is self.entity_zero - - return entity.common_parent(self.entity_zero) - - def adapt_to_selectable(self, query, sel): - query._entities.append(self) - - def _get_entity_clauses(self, query, context): - - adapter = None - - if not self.is_aliased_class: - if query._polymorphic_adapters: - adapter = query._polymorphic_adapters.get(self.mapper, None) - else: - adapter = self.aliased_adapter - - if adapter: - if query._from_obj_alias: - ret = adapter.wrap(query._from_obj_alias) - else: - ret = adapter - else: - ret = query._from_obj_alias - - return ret - - def row_processor(self, query, context, result): - adapter = self._get_entity_clauses(query, context) - - if context.adapter and adapter: - adapter = adapter.wrap(context.adapter) - elif not adapter: - adapter = context.adapter - - # polymorphic mappers which have concrete tables in - # their hierarchy usually - # require row aliasing unconditionally. - if not adapter and self.mapper._requires_row_aliasing: - adapter = sql_util.ColumnAdapter( - self.selectable, - self.mapper._equivalent_columns) - - if query._primary_entity is self: - only_load_props = query._only_load_props - refresh_state = context.refresh_state - else: - only_load_props = refresh_state = None - - _instance = loading._instance_processor( - self.mapper, - context, - result, - self.path, - adapter, - only_load_props=only_load_props, - refresh_state=refresh_state, - polymorphic_discriminator=self._polymorphic_discriminator - ) - - return _instance, self._label_name - - def setup_context(self, query, context): - adapter = self._get_entity_clauses(query, context) - - # if self._adapted_selectable is None: - context.froms += (self.selectable,) - - if context.order_by is False and self.mapper.order_by: - context.order_by = self.mapper.order_by - - # apply adaptation to the mapper's order_by if needed. - if adapter: - context.order_by = adapter.adapt_list( - util.to_list( - context.order_by - ) - ) - - loading._setup_entity_query( - context, self.mapper, self, - self.path, adapter, context.primary_columns, - with_polymorphic=self._with_polymorphic, - only_load_props=query._only_load_props, - polymorphic_discriminator=self._polymorphic_discriminator) - - def __str__(self): - return str(self.mapper) - - -@inspection._self_inspects -class Bundle(InspectionAttr): - """A grouping of SQL expressions that are returned by a :class:`.Query` - under one namespace. - - The :class:`.Bundle` essentially allows nesting of the tuple-based - results returned by a column-oriented :class:`.Query` object. It also - is extensible via simple subclassing, where the primary capability - to override is that of how the set of expressions should be returned, - allowing post-processing as well as custom return types, without - involving ORM identity-mapped classes. - - .. versionadded:: 0.9.0 - - .. seealso:: - - :ref:`bundles` - - """ - - single_entity = False - """If True, queries for a single Bundle will be returned as a single - entity, rather than an element within a keyed tuple.""" - - is_clause_element = False - - is_mapper = False - - is_aliased_class = False - - def __init__(self, name, *exprs, **kw): - """Construct a new :class:`.Bundle`. - - e.g.:: - - bn = Bundle("mybundle", MyClass.x, MyClass.y) - - for row in session.query(bn).filter( - bn.c.x == 5).filter(bn.c.y == 4): - print(row.mybundle.x, row.mybundle.y) - - :param name: name of the bundle. - :param \*exprs: columns or SQL expressions comprising the bundle. - :param single_entity=False: if True, rows for this :class:`.Bundle` - can be returned as a "single entity" outside of any enclosing tuple - in the same manner as a mapped entity. - - """ - self.name = self._label = name - self.exprs = exprs - self.c = self.columns = ColumnCollection() - self.columns.update((getattr(col, "key", col._label), col) - for col in exprs) - self.single_entity = kw.pop('single_entity', self.single_entity) - - columns = None - """A namespace of SQL expressions referred to by this :class:`.Bundle`. - - e.g.:: - - bn = Bundle("mybundle", MyClass.x, MyClass.y) - - q = sess.query(bn).filter(bn.c.x == 5) - - Nesting of bundles is also supported:: - - b1 = Bundle("b1", - Bundle('b2', MyClass.a, MyClass.b), - Bundle('b3', MyClass.x, MyClass.y) - ) - - q = sess.query(b1).filter( - b1.c.b2.c.a == 5).filter(b1.c.b3.c.y == 9) - - .. seealso:: - - :attr:`.Bundle.c` - - """ - - c = None - """An alias for :attr:`.Bundle.columns`.""" - - def _clone(self): - cloned = self.__class__.__new__(self.__class__) - cloned.__dict__.update(self.__dict__) - return cloned - - def __clause_element__(self): - return expression.ClauseList(group=False, *self.c) - - @property - def clauses(self): - return self.__clause_element__().clauses - - def label(self, name): - """Provide a copy of this :class:`.Bundle` passing a new label.""" - - cloned = self._clone() - cloned.name = name - return cloned - - def create_row_processor(self, query, procs, labels): - """Produce the "row processing" function for this :class:`.Bundle`. - - May be overridden by subclasses. - - .. seealso:: - - :ref:`bundles` - includes an example of subclassing. - - """ - keyed_tuple = util.lightweight_named_tuple('result', labels) - - def proc(row): - return keyed_tuple([proc(row) for proc in procs]) - return proc - - -class _BundleEntity(_QueryEntity): - def __init__(self, query, bundle, setup_entities=True): - query._entities.append(self) - self.bundle = self.expr = bundle - self.type = type(bundle) - self._label_name = bundle.name - self._entities = [] - - if setup_entities: - for expr in bundle.exprs: - if isinstance(expr, Bundle): - _BundleEntity(self, expr) - else: - _ColumnEntity(self, expr, namespace=self) - - self.entities = () - - self.filter_fn = lambda item: item - - self.supports_single_entity = self.bundle.single_entity - - @property - def entity_zero(self): - for ent in self._entities: - ezero = ent.entity_zero - if ezero is not None: - return ezero - else: - return None - - def corresponds_to(self, entity): - # TODO: this seems to have no effect for - # _ColumnEntity either - return False - - @property - def entity_zero_or_selectable(self): - for ent in self._entities: - ezero = ent.entity_zero_or_selectable - if ezero is not None: - return ezero - else: - return None - - def adapt_to_selectable(self, query, sel): - c = _BundleEntity(query, self.bundle, setup_entities=False) - # c._label_name = self._label_name - # c.entity_zero = self.entity_zero - # c.entities = self.entities - - for ent in self._entities: - ent.adapt_to_selectable(c, sel) - - def setup_entity(self, ext_info, aliased_adapter): - for ent in self._entities: - ent.setup_entity(ext_info, aliased_adapter) - - def setup_context(self, query, context): - for ent in self._entities: - ent.setup_context(query, context) - - def row_processor(self, query, context, result): - procs, labels = zip( - *[ent.row_processor(query, context, result) - for ent in self._entities] - ) - - proc = self.bundle.create_row_processor(query, procs, labels) - - return proc, self._label_name - - -class _ColumnEntity(_QueryEntity): - """Column/expression based entity.""" - - def __init__(self, query, column, namespace=None): - self.expr = column - self.namespace = namespace - search_entities = True - check_column = False - - if isinstance(column, util.string_types): - column = sql.literal_column(column) - self._label_name = column.name - search_entities = False - check_column = True - _entity = None - elif isinstance(column, ( - attributes.QueryableAttribute, - interfaces.PropComparator - )): - _entity = getattr(column, '_parententity', None) - if _entity is not None: - search_entities = False - self._label_name = column.key - column = column._query_clause_element() - check_column = True - if isinstance(column, Bundle): - _BundleEntity(query, column) - return - - if not isinstance(column, sql.ColumnElement): - if hasattr(column, '_select_iterable'): - # break out an object like Table into - # individual columns - for c in column._select_iterable: - if c is column: - break - _ColumnEntity(query, c, namespace=column) - else: - return - - raise sa_exc.InvalidRequestError( - "SQL expression, column, or mapped entity " - "expected - got '%r'" % (column, ) - ) - elif not check_column: - self._label_name = getattr(column, 'key', None) - search_entities = True - - self.type = type_ = column.type - if type_.hashable: - self.filter_fn = lambda item: item - else: - counter = util.counter() - self.filter_fn = lambda item: counter() - - # If the Column is unnamed, give it a - # label() so that mutable column expressions - # can be located in the result even - # if the expression's identity has been changed - # due to adaption. - - if not column._label and not getattr(column, 'is_literal', False): - column = column.label(self._label_name) - - query._entities.append(self) - - self.column = column - self.froms = set() - - # look for ORM entities represented within the - # given expression. Try to count only entities - # for columns whose FROM object is in the actual list - # of FROMs for the overall expression - this helps - # subqueries which were built from ORM constructs from - # leaking out their entities into the main select construct - self.actual_froms = actual_froms = set(column._from_objects) - - if not search_entities: - self.entity_zero = _entity - if _entity: - self.entities = [_entity] - else: - self.entities = [] - self._from_entities = set(self.entities) - else: - all_elements = [ - elem for elem in visitors.iterate(column, {}) - if 'parententity' in elem._annotations - ] - - self.entities = util.unique_list([ - elem._annotations['parententity'] - for elem in all_elements - if 'parententity' in elem._annotations - ]) - - self._from_entities = set([ - elem._annotations['parententity'] - for elem in all_elements - if 'parententity' in elem._annotations - and actual_froms.intersection(elem._from_objects) - ]) - if self.entities: - self.entity_zero = self.entities[0] - elif self.namespace is not None: - self.entity_zero = self.namespace - else: - self.entity_zero = None - - supports_single_entity = False - - @property - def entity_zero_or_selectable(self): - if self.entity_zero is not None: - return self.entity_zero - elif self.actual_froms: - return list(self.actual_froms)[0] - else: - return None - - def adapt_to_selectable(self, query, sel): - c = _ColumnEntity(query, sel.corresponding_column(self.column)) - c._label_name = self._label_name - c.entity_zero = self.entity_zero - c.entities = self.entities - - def setup_entity(self, ext_info, aliased_adapter): - if 'selectable' not in self.__dict__: - self.selectable = ext_info.selectable - - if self.actual_froms.intersection(ext_info.selectable._from_objects): - self.froms.add(ext_info.selectable) - - def corresponds_to(self, entity): - # TODO: just returning False here, - # no tests fail - if self.entity_zero is None: - return False - elif _is_aliased_class(entity): - # TODO: polymorphic subclasses ? - return entity is self.entity_zero - else: - return not _is_aliased_class(self.entity_zero) and \ - entity.common_parent(self.entity_zero) - - def row_processor(self, query, context, result): - if ('fetch_column', self) in context.attributes: - column = context.attributes[('fetch_column', self)] - else: - column = query._adapt_clause(self.column, False, True) - - if context.adapter: - column = context.adapter.columns[column] - - getter = result._getter(column) - return getter, self._label_name - - def setup_context(self, query, context): - column = query._adapt_clause(self.column, False, True) - context.froms += tuple(self.froms) - context.primary_columns.append(column) - - context.attributes[('fetch_column', self)] = column - - def __str__(self): - return str(self.column) - - -class QueryContext(object): - __slots__ = ( - 'multi_row_eager_loaders', 'adapter', 'froms', 'for_update', - 'query', 'session', 'autoflush', 'populate_existing', - 'invoke_all_eagers', 'version_check', 'refresh_state', - 'primary_columns', 'secondary_columns', 'eager_order_by', - 'eager_joins', 'create_eager_joins', 'propagate_options', - 'attributes', 'statement', 'from_clause', 'whereclause', - 'order_by', 'labels', '_for_update_arg', 'runid', 'partials' - ) - - def __init__(self, query): - - if query._statement is not None: - if isinstance(query._statement, expression.SelectBase) and \ - not query._statement._textual and \ - not query._statement.use_labels: - self.statement = query._statement.apply_labels() - else: - self.statement = query._statement - else: - self.statement = None - self.from_clause = query._from_obj - self.whereclause = query._criterion - self.order_by = query._order_by - - self.multi_row_eager_loaders = False - self.adapter = None - self.froms = () - self.for_update = None - self.query = query - self.session = query.session - self.autoflush = query._autoflush - self.populate_existing = query._populate_existing - self.invoke_all_eagers = query._invoke_all_eagers - self.version_check = query._version_check - self.refresh_state = query._refresh_state - self.primary_columns = [] - self.secondary_columns = [] - self.eager_order_by = [] - self.eager_joins = {} - self.create_eager_joins = [] - self.propagate_options = set(o for o in query._with_options if - o.propagate_to_loaders) - self.attributes = query._attributes.copy() - - -class AliasOption(interfaces.MapperOption): - - def __init__(self, alias): - """Return a :class:`.MapperOption` that will indicate to the :class:`.Query` - that the main table has been aliased. - - This is a seldom-used option to suit the - very rare case that :func:`.contains_eager` - is being used in conjunction with a user-defined SELECT - statement that aliases the parent table. E.g.:: - - # define an aliased UNION called 'ulist' - ulist = users.select(users.c.user_id==7).\\ - union(users.select(users.c.user_id>7)).\\ - alias('ulist') - - # add on an eager load of "addresses" - statement = ulist.outerjoin(addresses).\\ - select().apply_labels() - - # create query, indicating "ulist" will be an - # alias for the main table, "addresses" - # property should be eager loaded - query = session.query(User).options( - contains_alias(ulist), - contains_eager(User.addresses)) - - # then get results via the statement - results = query.from_statement(statement).all() - - :param alias: is the string name of an alias, or a - :class:`~.sql.expression.Alias` object representing - the alias. - - """ - self.alias = alias - - def process_query(self, query): - if isinstance(self.alias, util.string_types): - alias = query._mapper_zero().mapped_table.alias(self.alias) - else: - alias = self.alias - query._from_obj_alias = sql_util.ColumnAdapter(alias) diff --git a/python/sqlalchemy/orm/relationships.py b/python/sqlalchemy/orm/relationships.py deleted file mode 100644 index 552ce8b6..00000000 --- a/python/sqlalchemy/orm/relationships.py +++ /dev/null @@ -1,2845 +0,0 @@ -# orm/relationships.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Heuristics related to join conditions as used in -:func:`.relationship`. - -Provides the :class:`.JoinCondition` object, which encapsulates -SQL annotation and aliasing behavior focused on the `primaryjoin` -and `secondaryjoin` aspects of :func:`.relationship`. - -""" -from __future__ import absolute_import -from .. import sql, util, exc as sa_exc, schema, log - -import weakref -from .util import CascadeOptions, _orm_annotate, _orm_deannotate -from . import dependency -from . import attributes -from ..sql.util import ( - ClauseAdapter, - join_condition, _shallow_annotate, visit_binary_product, - _deep_deannotate, selectables_overlap, adapt_criterion_to_null -) -from ..sql import operators, expression, visitors -from .interfaces import (MANYTOMANY, MANYTOONE, ONETOMANY, - StrategizedProperty, PropComparator) -from ..inspection import inspect -from . import mapper as mapperlib -import collections - - -def remote(expr): - """Annotate a portion of a primaryjoin expression - with a 'remote' annotation. - - See the section :ref:`relationship_custom_foreign` for a - description of use. - - .. versionadded:: 0.8 - - .. seealso:: - - :ref:`relationship_custom_foreign` - - :func:`.foreign` - - """ - return _annotate_columns(expression._clause_element_as_expr(expr), - {"remote": True}) - - -def foreign(expr): - """Annotate a portion of a primaryjoin expression - with a 'foreign' annotation. - - See the section :ref:`relationship_custom_foreign` for a - description of use. - - .. versionadded:: 0.8 - - .. seealso:: - - :ref:`relationship_custom_foreign` - - :func:`.remote` - - """ - - return _annotate_columns(expression._clause_element_as_expr(expr), - {"foreign": True}) - - -@log.class_logger -@util.langhelpers.dependency_for("sqlalchemy.orm.properties") -class RelationshipProperty(StrategizedProperty): - """Describes an object property that holds a single item or list - of items that correspond to a related database table. - - Public constructor is the :func:`.orm.relationship` function. - - See also: - - :ref:`relationship_config_toplevel` - - """ - - strategy_wildcard_key = 'relationship' - - _dependency_processor = None - - def __init__(self, argument, - secondary=None, primaryjoin=None, - secondaryjoin=None, - foreign_keys=None, - uselist=None, - order_by=False, - backref=None, - back_populates=None, - post_update=False, - cascade=False, extension=None, - viewonly=False, lazy=True, - collection_class=None, passive_deletes=False, - passive_updates=True, remote_side=None, - enable_typechecks=True, join_depth=None, - comparator_factory=None, - single_parent=False, innerjoin=False, - distinct_target_key=None, - doc=None, - active_history=False, - cascade_backrefs=True, - load_on_pending=False, - bake_queries=True, - strategy_class=None, _local_remote_pairs=None, - query_class=None, - info=None): - """Provide a relationship between two mapped classes. - - This corresponds to a parent-child or associative table relationship. - The constructed class is an instance of - :class:`.RelationshipProperty`. - - A typical :func:`.relationship`, used in a classical mapping:: - - mapper(Parent, properties={ - 'children': relationship(Child) - }) - - Some arguments accepted by :func:`.relationship` optionally accept a - callable function, which when called produces the desired value. - The callable is invoked by the parent :class:`.Mapper` at "mapper - initialization" time, which happens only when mappers are first used, - and is assumed to be after all mappings have been constructed. This - can be used to resolve order-of-declaration and other dependency - issues, such as if ``Child`` is declared below ``Parent`` in the same - file:: - - mapper(Parent, properties={ - "children":relationship(lambda: Child, - order_by=lambda: Child.id) - }) - - When using the :ref:`declarative_toplevel` extension, the Declarative - initializer allows string arguments to be passed to - :func:`.relationship`. These string arguments are converted into - callables that evaluate the string as Python code, using the - Declarative class-registry as a namespace. This allows the lookup of - related classes to be automatic via their string name, and removes the - need to import related classes at all into the local module space:: - - from sqlalchemy.ext.declarative import declarative_base - - Base = declarative_base() - - class Parent(Base): - __tablename__ = 'parent' - id = Column(Integer, primary_key=True) - children = relationship("Child", order_by="Child.id") - - .. seealso:: - - :ref:`relationship_config_toplevel` - Full introductory and - reference documentation for :func:`.relationship`. - - :ref:`orm_tutorial_relationship` - ORM tutorial introduction. - - :param argument: - a mapped class, or actual :class:`.Mapper` instance, representing - the target of the relationship. - - :paramref:`~.relationship.argument` may also be passed as a callable - function which is evaluated at mapper initialization time, and may - be passed as a Python-evaluable string when using Declarative. - - .. seealso:: - - :ref:`declarative_configuring_relationships` - further detail - on relationship configuration when using Declarative. - - :param secondary: - for a many-to-many relationship, specifies the intermediary - table, and is typically an instance of :class:`.Table`. - In less common circumstances, the argument may also be specified - as an :class:`.Alias` construct, or even a :class:`.Join` construct. - - :paramref:`~.relationship.secondary` may - also be passed as a callable function which is evaluated at - mapper initialization time. When using Declarative, it may also - be a string argument noting the name of a :class:`.Table` that is - present in the :class:`.MetaData` collection associated with the - parent-mapped :class:`.Table`. - - The :paramref:`~.relationship.secondary` keyword argument is - typically applied in the case where the intermediary :class:`.Table` - is not otherwise expressed in any direct class mapping. If the - "secondary" table is also explicitly mapped elsewhere (e.g. as in - :ref:`association_pattern`), one should consider applying the - :paramref:`~.relationship.viewonly` flag so that this - :func:`.relationship` is not used for persistence operations which - may conflict with those of the association object pattern. - - .. seealso:: - - :ref:`relationships_many_to_many` - Reference example of "many - to many". - - :ref:`orm_tutorial_many_to_many` - ORM tutorial introduction to - many-to-many relationships. - - :ref:`self_referential_many_to_many` - Specifics on using - many-to-many in a self-referential case. - - :ref:`declarative_many_to_many` - Additional options when using - Declarative. - - :ref:`association_pattern` - an alternative to - :paramref:`~.relationship.secondary` when composing association - table relationships, allowing additional attributes to be - specified on the association table. - - :ref:`composite_secondary_join` - a lesser-used pattern which - in some cases can enable complex :func:`.relationship` SQL - conditions to be used. - - .. versionadded:: 0.9.2 :paramref:`~.relationship.secondary` works - more effectively when referring to a :class:`.Join` instance. - - :param active_history=False: - When ``True``, indicates that the "previous" value for a - many-to-one reference should be loaded when replaced, if - not already loaded. Normally, history tracking logic for - simple many-to-ones only needs to be aware of the "new" - value in order to perform a flush. This flag is available - for applications that make use of - :func:`.attributes.get_history` which also need to know - the "previous" value of the attribute. - - :param backref: - indicates the string name of a property to be placed on the related - mapper's class that will handle this relationship in the other - direction. The other property will be created automatically - when the mappers are configured. Can also be passed as a - :func:`.backref` object to control the configuration of the - new relationship. - - .. seealso:: - - :ref:`relationships_backref` - Introductory documentation and - examples. - - :paramref:`~.relationship.back_populates` - alternative form - of backref specification. - - :func:`.backref` - allows control over :func:`.relationship` - configuration when using :paramref:`~.relationship.backref`. - - - :param back_populates: - Takes a string name and has the same meaning as - :paramref:`~.relationship.backref`, except the complementing - property is **not** created automatically, and instead must be - configured explicitly on the other mapper. The complementing - property should also indicate - :paramref:`~.relationship.back_populates` to this relationship to - ensure proper functioning. - - .. seealso:: - - :ref:`relationships_backref` - Introductory documentation and - examples. - - :paramref:`~.relationship.backref` - alternative form - of backref specification. - - :param bake_queries: - Use the :class:`.BakedQuery` cache to cache queries used in lazy - loads. True by default, as this typically improves performance - significantly. Set to False to reduce ORM memory use, or - if unresolved stability issues are observed with the baked query - cache system. - - .. versionadded:: 1.0.0 - - :param cascade: - a comma-separated list of cascade rules which determines how - Session operations should be "cascaded" from parent to child. - This defaults to ``False``, which means the default cascade - should be used - this default cascade is ``"save-update, merge"``. - - The available cascades are ``save-update``, ``merge``, - ``expunge``, ``delete``, ``delete-orphan``, and ``refresh-expire``. - An additional option, ``all`` indicates shorthand for - ``"save-update, merge, refresh-expire, - expunge, delete"``, and is often used as in ``"all, delete-orphan"`` - to indicate that related objects should follow along with the - parent object in all cases, and be deleted when de-associated. - - .. seealso:: - - :ref:`unitofwork_cascades` - Full detail on each of the available - cascade options. - - :ref:`tutorial_delete_cascade` - Tutorial example describing - a delete cascade. - - :param cascade_backrefs=True: - a boolean value indicating if the ``save-update`` cascade should - operate along an assignment event intercepted by a backref. - When set to ``False``, the attribute managed by this relationship - will not cascade an incoming transient object into the session of a - persistent parent, if the event is received via backref. - - .. seealso:: - - :ref:`backref_cascade` - Full discussion and examples on how - the :paramref:`~.relationship.cascade_backrefs` option is used. - - :param collection_class: - a class or callable that returns a new list-holding object. will - be used in place of a plain list for storing elements. - - .. seealso:: - - :ref:`custom_collections` - Introductory documentation and - examples. - - :param comparator_factory: - a class which extends :class:`.RelationshipProperty.Comparator` - which provides custom SQL clause generation for comparison - operations. - - .. seealso:: - - :class:`.PropComparator` - some detail on redefining comparators - at this level. - - :ref:`custom_comparators` - Brief intro to this feature. - - - :param distinct_target_key=None: - Indicate if a "subquery" eager load should apply the DISTINCT - keyword to the innermost SELECT statement. When left as ``None``, - the DISTINCT keyword will be applied in those cases when the target - columns do not comprise the full primary key of the target table. - When set to ``True``, the DISTINCT keyword is applied to the - innermost SELECT unconditionally. - - It may be desirable to set this flag to False when the DISTINCT is - reducing performance of the innermost subquery beyond that of what - duplicate innermost rows may be causing. - - .. versionadded:: 0.8.3 - - :paramref:`~.relationship.distinct_target_key` allows the - subquery eager loader to apply a DISTINCT modifier to the - innermost SELECT. - - .. versionchanged:: 0.9.0 - - :paramref:`~.relationship.distinct_target_key` now defaults to - ``None``, so that the feature enables itself automatically for - those cases where the innermost query targets a non-unique - key. - - .. seealso:: - - :ref:`loading_toplevel` - includes an introduction to subquery - eager loading. - - :param doc: - docstring which will be applied to the resulting descriptor. - - :param extension: - an :class:`.AttributeExtension` instance, or list of extensions, - which will be prepended to the list of attribute listeners for - the resulting descriptor placed on the class. - - .. deprecated:: 0.7 Please see :class:`.AttributeEvents`. - - :param foreign_keys: - - a list of columns which are to be used as "foreign key" - columns, or columns which refer to the value in a remote - column, within the context of this :func:`.relationship` - object's :paramref:`~.relationship.primaryjoin` condition. - That is, if the :paramref:`~.relationship.primaryjoin` - condition of this :func:`.relationship` is ``a.id == - b.a_id``, and the values in ``b.a_id`` are required to be - present in ``a.id``, then the "foreign key" column of this - :func:`.relationship` is ``b.a_id``. - - In normal cases, the :paramref:`~.relationship.foreign_keys` - parameter is **not required.** :func:`.relationship` will - automatically determine which columns in the - :paramref:`~.relationship.primaryjoin` conditition are to be - considered "foreign key" columns based on those - :class:`.Column` objects that specify :class:`.ForeignKey`, - or are otherwise listed as referencing columns in a - :class:`.ForeignKeyConstraint` construct. - :paramref:`~.relationship.foreign_keys` is only needed when: - - 1. There is more than one way to construct a join from the local - table to the remote table, as there are multiple foreign key - references present. Setting ``foreign_keys`` will limit the - :func:`.relationship` to consider just those columns specified - here as "foreign". - - .. versionchanged:: 0.8 - A multiple-foreign key join ambiguity can be resolved by - setting the :paramref:`~.relationship.foreign_keys` - parameter alone, without the need to explicitly set - :paramref:`~.relationship.primaryjoin` as well. - - 2. The :class:`.Table` being mapped does not actually have - :class:`.ForeignKey` or :class:`.ForeignKeyConstraint` - constructs present, often because the table - was reflected from a database that does not support foreign key - reflection (MySQL MyISAM). - - 3. The :paramref:`~.relationship.primaryjoin` argument is used to - construct a non-standard join condition, which makes use of - columns or expressions that do not normally refer to their - "parent" column, such as a join condition expressed by a - complex comparison using a SQL function. - - The :func:`.relationship` construct will raise informative - error messages that suggest the use of the - :paramref:`~.relationship.foreign_keys` parameter when - presented with an ambiguous condition. In typical cases, - if :func:`.relationship` doesn't raise any exceptions, the - :paramref:`~.relationship.foreign_keys` parameter is usually - not needed. - - :paramref:`~.relationship.foreign_keys` may also be passed as a - callable function which is evaluated at mapper initialization time, - and may be passed as a Python-evaluable string when using - Declarative. - - .. seealso:: - - :ref:`relationship_foreign_keys` - - :ref:`relationship_custom_foreign` - - :func:`.foreign` - allows direct annotation of the "foreign" - columns within a :paramref:`~.relationship.primaryjoin` condition. - - .. versionadded:: 0.8 - The :func:`.foreign` annotation can also be applied - directly to the :paramref:`~.relationship.primaryjoin` - expression, which is an alternate, more specific system of - describing which columns in a particular - :paramref:`~.relationship.primaryjoin` should be considered - "foreign". - - :param info: Optional data dictionary which will be populated into the - :attr:`.MapperProperty.info` attribute of this object. - - .. versionadded:: 0.8 - - :param innerjoin=False: - when ``True``, joined eager loads will use an inner join to join - against related tables instead of an outer join. The purpose - of this option is generally one of performance, as inner joins - generally perform better than outer joins. - - This flag can be set to ``True`` when the relationship references an - object via many-to-one using local foreign keys that are not - nullable, or when the reference is one-to-one or a collection that - is guaranteed to have one or at least one entry. - - The option supports the same "nested" and "unnested" options as - that of :paramref:`.joinedload.innerjoin`. See that flag - for details on nested / unnested behaviors. - - .. seealso:: - - :paramref:`.joinedload.innerjoin` - the option as specified by - loader option, including detail on nesting behavior. - - :ref:`what_kind_of_loading` - Discussion of some details of - various loader options. - - - :param join_depth: - when non-``None``, an integer value indicating how many levels - deep "eager" loaders should join on a self-referring or cyclical - relationship. The number counts how many times the same Mapper - shall be present in the loading condition along a particular join - branch. When left at its default of ``None``, eager loaders - will stop chaining when they encounter a the same target mapper - which is already higher up in the chain. This option applies - both to joined- and subquery- eager loaders. - - .. seealso:: - - :ref:`self_referential_eager_loading` - Introductory documentation - and examples. - - :param lazy='select': specifies - how the related items should be loaded. Default value is - ``select``. Values include: - - * ``select`` - items should be loaded lazily when the property is - first accessed, using a separate SELECT statement, or identity map - fetch for simple many-to-one references. - - * ``immediate`` - items should be loaded as the parents are loaded, - using a separate SELECT statement, or identity map fetch for - simple many-to-one references. - - * ``joined`` - items should be loaded "eagerly" in the same query as - that of the parent, using a JOIN or LEFT OUTER JOIN. Whether - the join is "outer" or not is determined by the - :paramref:`~.relationship.innerjoin` parameter. - - * ``subquery`` - items should be loaded "eagerly" as the parents are - loaded, using one additional SQL statement, which issues a JOIN to - a subquery of the original statement, for each collection - requested. - - * ``noload`` - no loading should occur at any time. This is to - support "write-only" attributes, or attributes which are - populated in some manner specific to the application. - - * ``dynamic`` - the attribute will return a pre-configured - :class:`.Query` object for all read - operations, onto which further filtering operations can be - applied before iterating the results. See - the section :ref:`dynamic_relationship` for more details. - - * True - a synonym for 'select' - - * False - a synonym for 'joined' - - * None - a synonym for 'noload' - - .. seealso:: - - :doc:`/orm/loading_relationships` - Full documentation on relationship loader - configuration. - - :ref:`dynamic_relationship` - detail on the ``dynamic`` option. - - :param load_on_pending=False: - Indicates loading behavior for transient or pending parent objects. - - When set to ``True``, causes the lazy-loader to - issue a query for a parent object that is not persistent, meaning it - has never been flushed. This may take effect for a pending object - when autoflush is disabled, or for a transient object that has been - "attached" to a :class:`.Session` but is not part of its pending - collection. - - The :paramref:`~.relationship.load_on_pending` flag does not improve - behavior when the ORM is used normally - object references should be - constructed at the object level, not at the foreign key level, so - that they are present in an ordinary way before a flush proceeds. - This flag is not not intended for general use. - - .. seealso:: - - :meth:`.Session.enable_relationship_loading` - this method - establishes "load on pending" behavior for the whole object, and - also allows loading on objects that remain transient or - detached. - - :param order_by: - indicates the ordering that should be applied when loading these - items. :paramref:`~.relationship.order_by` is expected to refer to - one of the :class:`.Column` objects to which the target class is - mapped, or the attribute itself bound to the target class which - refers to the column. - - :paramref:`~.relationship.order_by` may also be passed as a callable - function which is evaluated at mapper initialization time, and may - be passed as a Python-evaluable string when using Declarative. - - :param passive_deletes=False: - Indicates loading behavior during delete operations. - - A value of True indicates that unloaded child items should not - be loaded during a delete operation on the parent. Normally, - when a parent item is deleted, all child items are loaded so - that they can either be marked as deleted, or have their - foreign key to the parent set to NULL. Marking this flag as - True usually implies an ON DELETE rule is in - place which will handle updating/deleting child rows on the - database side. - - Additionally, setting the flag to the string value 'all' will - disable the "nulling out" of the child foreign keys, when there - is no delete or delete-orphan cascade enabled. This is - typically used when a triggering or error raise scenario is in - place on the database side. Note that the foreign key - attributes on in-session child objects will not be changed - after a flush occurs so this is a very special use-case - setting. - - .. seealso:: - - :ref:`passive_deletes` - Introductory documentation - and examples. - - :param passive_updates=True: - Indicates the persistence behavior to take when a referenced - primary key value changes in place, indicating that the referencing - foreign key columns will also need their value changed. - - When True, it is assumed that ``ON UPDATE CASCADE`` is configured on - the foreign key in the database, and that the database will - handle propagation of an UPDATE from a source column to - dependent rows. When False, the SQLAlchemy :func:`.relationship` - construct will attempt to emit its own UPDATE statements to - modify related targets. However note that SQLAlchemy **cannot** - emit an UPDATE for more than one level of cascade. Also, - setting this flag to False is not compatible in the case where - the database is in fact enforcing referential integrity, unless - those constraints are explicitly "deferred", if the target backend - supports it. - - It is highly advised that an application which is employing - mutable primary keys keeps ``passive_updates`` set to True, - and instead uses the referential integrity features of the database - itself in order to handle the change efficiently and fully. - - .. seealso:: - - :ref:`passive_updates` - Introductory documentation and - examples. - - :paramref:`.mapper.passive_updates` - a similar flag which - takes effect for joined-table inheritance mappings. - - :param post_update: - this indicates that the relationship should be handled by a - second UPDATE statement after an INSERT or before a - DELETE. Currently, it also will issue an UPDATE after the - instance was UPDATEd as well, although this technically should - be improved. This flag is used to handle saving bi-directional - dependencies between two individual rows (i.e. each row - references the other), where it would otherwise be impossible to - INSERT or DELETE both rows fully since one row exists before the - other. Use this flag when a particular mapping arrangement will - incur two rows that are dependent on each other, such as a table - that has a one-to-many relationship to a set of child rows, and - also has a column that references a single child row within that - list (i.e. both tables contain a foreign key to each other). If - a flush operation returns an error that a "cyclical - dependency" was detected, this is a cue that you might want to - use :paramref:`~.relationship.post_update` to "break" the cycle. - - .. seealso:: - - :ref:`post_update` - Introductory documentation and examples. - - :param primaryjoin: - a SQL expression that will be used as the primary - join of this child object against the parent object, or in a - many-to-many relationship the join of the primary object to the - association table. By default, this value is computed based on the - foreign key relationships of the parent and child tables (or - association table). - - :paramref:`~.relationship.primaryjoin` may also be passed as a - callable function which is evaluated at mapper initialization time, - and may be passed as a Python-evaluable string when using - Declarative. - - .. seealso:: - - :ref:`relationship_primaryjoin` - - :param remote_side: - used for self-referential relationships, indicates the column or - list of columns that form the "remote side" of the relationship. - - :paramref:`.relationship.remote_side` may also be passed as a - callable function which is evaluated at mapper initialization time, - and may be passed as a Python-evaluable string when using - Declarative. - - .. versionchanged:: 0.8 - The :func:`.remote` annotation can also be applied - directly to the ``primaryjoin`` expression, which is an - alternate, more specific system of describing which columns in a - particular ``primaryjoin`` should be considered "remote". - - .. seealso:: - - :ref:`self_referential` - in-depth explanation of how - :paramref:`~.relationship.remote_side` - is used to configure self-referential relationships. - - :func:`.remote` - an annotation function that accomplishes the - same purpose as :paramref:`~.relationship.remote_side`, typically - when a custom :paramref:`~.relationship.primaryjoin` condition - is used. - - :param query_class: - a :class:`.Query` subclass that will be used as the base of the - "appender query" returned by a "dynamic" relationship, that - is, a relationship that specifies ``lazy="dynamic"`` or was - otherwise constructed using the :func:`.orm.dynamic_loader` - function. - - .. seealso:: - - :ref:`dynamic_relationship` - Introduction to "dynamic" - relationship loaders. - - :param secondaryjoin: - a SQL expression that will be used as the join of - an association table to the child object. By default, this value is - computed based on the foreign key relationships of the association - and child tables. - - :paramref:`~.relationship.secondaryjoin` may also be passed as a - callable function which is evaluated at mapper initialization time, - and may be passed as a Python-evaluable string when using - Declarative. - - .. seealso:: - - :ref:`relationship_primaryjoin` - - :param single_parent: - when True, installs a validator which will prevent objects - from being associated with more than one parent at a time. - This is used for many-to-one or many-to-many relationships that - should be treated either as one-to-one or one-to-many. Its usage - is optional, except for :func:`.relationship` constructs which - are many-to-one or many-to-many and also - specify the ``delete-orphan`` cascade option. The - :func:`.relationship` construct itself will raise an error - instructing when this option is required. - - .. seealso:: - - :ref:`unitofwork_cascades` - includes detail on when the - :paramref:`~.relationship.single_parent` flag may be appropriate. - - :param uselist: - a boolean that indicates if this property should be loaded as a - list or a scalar. In most cases, this value is determined - automatically by :func:`.relationship` at mapper configuration - time, based on the type and direction - of the relationship - one to many forms a list, many to one - forms a scalar, many to many is a list. If a scalar is desired - where normally a list would be present, such as a bi-directional - one-to-one relationship, set :paramref:`~.relationship.uselist` to - False. - - The :paramref:`~.relationship.uselist` flag is also available on an - existing :func:`.relationship` construct as a read-only attribute, - which can be used to determine if this :func:`.relationship` deals - with collections or scalar attributes:: - - >>> User.addresses.property.uselist - True - - .. seealso:: - - :ref:`relationships_one_to_one` - Introduction to the "one to - one" relationship pattern, which is typically when the - :paramref:`~.relationship.uselist` flag is needed. - - :param viewonly=False: - when set to True, the relationship is used only for loading objects, - and not for any persistence operation. A :func:`.relationship` - which specifies :paramref:`~.relationship.viewonly` can work - with a wider range of SQL operations within the - :paramref:`~.relationship.primaryjoin` condition, including - operations that feature the use of a variety of comparison operators - as well as SQL functions such as :func:`~.sql.expression.cast`. The - :paramref:`~.relationship.viewonly` flag is also of general use when - defining any kind of :func:`~.relationship` that doesn't represent - the full set of related objects, to prevent modifications of the - collection from resulting in persistence operations. - - - """ - super(RelationshipProperty, self).__init__() - - self.uselist = uselist - self.argument = argument - self.secondary = secondary - self.primaryjoin = primaryjoin - self.secondaryjoin = secondaryjoin - self.post_update = post_update - self.direction = None - self.viewonly = viewonly - self.lazy = lazy - self.single_parent = single_parent - self._user_defined_foreign_keys = foreign_keys - self.collection_class = collection_class - self.passive_deletes = passive_deletes - self.cascade_backrefs = cascade_backrefs - self.passive_updates = passive_updates - self.remote_side = remote_side - self.enable_typechecks = enable_typechecks - self.query_class = query_class - self.innerjoin = innerjoin - self.distinct_target_key = distinct_target_key - self.doc = doc - self.active_history = active_history - self.join_depth = join_depth - self.local_remote_pairs = _local_remote_pairs - self.extension = extension - self.bake_queries = bake_queries - self.load_on_pending = load_on_pending - self.comparator_factory = comparator_factory or \ - RelationshipProperty.Comparator - self.comparator = self.comparator_factory(self, None) - util.set_creation_order(self) - - if info is not None: - self.info = info - - if strategy_class: - self.strategy_class = strategy_class - else: - self.strategy_class = self._strategy_lookup(("lazy", self.lazy)) - - self._reverse_property = set() - - self.cascade = cascade if cascade is not False \ - else "save-update, merge" - - self.order_by = order_by - - self.back_populates = back_populates - - if self.back_populates: - if backref: - raise sa_exc.ArgumentError( - "backref and back_populates keyword arguments " - "are mutually exclusive") - self.backref = None - else: - self.backref = backref - - def instrument_class(self, mapper): - attributes.register_descriptor( - mapper.class_, - self.key, - comparator=self.comparator_factory(self, mapper), - parententity=mapper, - doc=self.doc, - ) - - class Comparator(PropComparator): - """Produce boolean, comparison, and other operators for - :class:`.RelationshipProperty` attributes. - - See the documentation for :class:`.PropComparator` for a brief - overview of ORM level operator definition. - - See also: - - :class:`.PropComparator` - - :class:`.ColumnProperty.Comparator` - - :class:`.ColumnOperators` - - :ref:`types_operators` - - :attr:`.TypeEngine.comparator_factory` - - """ - - _of_type = None - - def __init__( - self, prop, parentmapper, adapt_to_entity=None, of_type=None): - """Construction of :class:`.RelationshipProperty.Comparator` - is internal to the ORM's attribute mechanics. - - """ - self.prop = prop - self._parententity = parentmapper - self._adapt_to_entity = adapt_to_entity - if of_type: - self._of_type = of_type - - def adapt_to_entity(self, adapt_to_entity): - return self.__class__(self.property, self._parententity, - adapt_to_entity=adapt_to_entity, - of_type=self._of_type) - - @util.memoized_property - def mapper(self): - """The target :class:`.Mapper` referred to by this - :class:`.RelationshipProperty.Comparator`. - - This is the "target" or "remote" side of the - :func:`.relationship`. - - """ - return self.property.mapper - - @util.memoized_property - def _parententity(self): - return self.property.parent - - def _source_selectable(self): - if self._adapt_to_entity: - return self._adapt_to_entity.selectable - else: - return self.property.parent._with_polymorphic_selectable - - def __clause_element__(self): - adapt_from = self._source_selectable() - if self._of_type: - of_type = inspect(self._of_type).mapper - else: - of_type = None - - pj, sj, source, dest, \ - secondary, target_adapter = self.property._create_joins( - source_selectable=adapt_from, - source_polymorphic=True, - of_type=of_type) - if sj is not None: - return pj & sj - else: - return pj - - def of_type(self, cls): - """Produce a construct that represents a particular 'subtype' of - attribute for the parent class. - - Currently this is usable in conjunction with :meth:`.Query.join` - and :meth:`.Query.outerjoin`. - - """ - return RelationshipProperty.Comparator( - self.property, - self._parententity, - adapt_to_entity=self._adapt_to_entity, - of_type=cls) - - def in_(self, other): - """Produce an IN clause - this is not implemented - for :func:`~.orm.relationship`-based attributes at this time. - - """ - raise NotImplementedError('in_() not yet supported for ' - 'relationships. For a simple ' - 'many-to-one, use in_() against ' - 'the set of foreign key values.') - - __hash__ = None - - def __eq__(self, other): - """Implement the ``==`` operator. - - In a many-to-one context, such as:: - - MyClass.some_prop == - - this will typically produce a - clause such as:: - - mytable.related_id == - - Where ```` is the primary key of the given - object. - - The ``==`` operator provides partial functionality for non- - many-to-one comparisons: - - * Comparisons against collections are not supported. - Use :meth:`~.RelationshipProperty.Comparator.contains`. - * Compared to a scalar one-to-many, will produce a - clause that compares the target columns in the parent to - the given target. - * Compared to a scalar many-to-many, an alias - of the association table will be rendered as - well, forming a natural join that is part of the - main body of the query. This will not work for - queries that go beyond simple AND conjunctions of - comparisons, such as those which use OR. Use - explicit joins, outerjoins, or - :meth:`~.RelationshipProperty.Comparator.has` for - more comprehensive non-many-to-one scalar - membership tests. - * Comparisons against ``None`` given in a one-to-many - or many-to-many context produce a NOT EXISTS clause. - - """ - if isinstance(other, (util.NoneType, expression.Null)): - if self.property.direction in [ONETOMANY, MANYTOMANY]: - return ~self._criterion_exists() - else: - return _orm_annotate(self.property._optimized_compare( - None, adapt_source=self.adapter)) - elif self.property.uselist: - raise sa_exc.InvalidRequestError( - "Can't compare a collection to an object or collection; " - "use contains() to test for membership.") - else: - return _orm_annotate( - self.property._optimized_compare( - other, adapt_source=self.adapter)) - - def _criterion_exists(self, criterion=None, **kwargs): - if getattr(self, '_of_type', None): - info = inspect(self._of_type) - target_mapper, to_selectable, is_aliased_class = \ - info.mapper, info.selectable, info.is_aliased_class - if self.property._is_self_referential and not \ - is_aliased_class: - to_selectable = to_selectable.alias() - - single_crit = target_mapper._single_table_criterion - if single_crit is not None: - if criterion is not None: - criterion = single_crit & criterion - else: - criterion = single_crit - else: - is_aliased_class = False - to_selectable = None - - if self.adapter: - source_selectable = self._source_selectable() - else: - source_selectable = None - - pj, sj, source, dest, secondary, target_adapter = \ - self.property._create_joins( - dest_polymorphic=True, - dest_selectable=to_selectable, - source_selectable=source_selectable) - - for k in kwargs: - crit = getattr(self.property.mapper.class_, k) == kwargs[k] - if criterion is None: - criterion = crit - else: - criterion = criterion & crit - - # annotate the *local* side of the join condition, in the case - # of pj + sj this is the full primaryjoin, in the case of just - # pj its the local side of the primaryjoin. - if sj is not None: - j = _orm_annotate(pj) & sj - else: - j = _orm_annotate(pj, exclude=self.property.remote_side) - - if criterion is not None and target_adapter and not \ - is_aliased_class: - # limit this adapter to annotated only? - criterion = target_adapter.traverse(criterion) - - # only have the "joined left side" of what we - # return be subject to Query adaption. The right - # side of it is used for an exists() subquery and - # should not correlate or otherwise reach out - # to anything in the enclosing query. - if criterion is not None: - criterion = criterion._annotate( - {'no_replacement_traverse': True}) - - crit = j & sql.True_._ifnone(criterion) - - ex = sql.exists([1], crit, from_obj=dest).correlate_except(dest) - if secondary is not None: - ex = ex.correlate_except(secondary) - return ex - - def any(self, criterion=None, **kwargs): - """Produce an expression that tests a collection against - particular criterion, using EXISTS. - - An expression like:: - - session.query(MyClass).filter( - MyClass.somereference.any(SomeRelated.x==2) - ) - - - Will produce a query like:: - - SELECT * FROM my_table WHERE - EXISTS (SELECT 1 FROM related WHERE related.my_id=my_table.id - AND related.x=2) - - Because :meth:`~.RelationshipProperty.Comparator.any` uses - a correlated subquery, its performance is not nearly as - good when compared against large target tables as that of - using a join. - - :meth:`~.RelationshipProperty.Comparator.any` is particularly - useful for testing for empty collections:: - - session.query(MyClass).filter( - ~MyClass.somereference.any() - ) - - will produce:: - - SELECT * FROM my_table WHERE - NOT EXISTS (SELECT 1 FROM related WHERE - related.my_id=my_table.id) - - :meth:`~.RelationshipProperty.Comparator.any` is only - valid for collections, i.e. a :func:`.relationship` - that has ``uselist=True``. For scalar references, - use :meth:`~.RelationshipProperty.Comparator.has`. - - """ - if not self.property.uselist: - raise sa_exc.InvalidRequestError( - "'any()' not implemented for scalar " - "attributes. Use has()." - ) - - return self._criterion_exists(criterion, **kwargs) - - def has(self, criterion=None, **kwargs): - """Produce an expression that tests a scalar reference against - particular criterion, using EXISTS. - - An expression like:: - - session.query(MyClass).filter( - MyClass.somereference.has(SomeRelated.x==2) - ) - - - Will produce a query like:: - - SELECT * FROM my_table WHERE - EXISTS (SELECT 1 FROM related WHERE - related.id==my_table.related_id AND related.x=2) - - Because :meth:`~.RelationshipProperty.Comparator.has` uses - a correlated subquery, its performance is not nearly as - good when compared against large target tables as that of - using a join. - - :meth:`~.RelationshipProperty.Comparator.has` is only - valid for scalar references, i.e. a :func:`.relationship` - that has ``uselist=False``. For collection references, - use :meth:`~.RelationshipProperty.Comparator.any`. - - """ - if self.property.uselist: - raise sa_exc.InvalidRequestError( - "'has()' not implemented for collections. " - "Use any().") - return self._criterion_exists(criterion, **kwargs) - - def contains(self, other, **kwargs): - """Return a simple expression that tests a collection for - containment of a particular item. - - :meth:`~.RelationshipProperty.Comparator.contains` is - only valid for a collection, i.e. a - :func:`~.orm.relationship` that implements - one-to-many or many-to-many with ``uselist=True``. - - When used in a simple one-to-many context, an - expression like:: - - MyClass.contains(other) - - Produces a clause like:: - - mytable.id == - - Where ```` is the value of the foreign key - attribute on ``other`` which refers to the primary - key of its parent object. From this it follows that - :meth:`~.RelationshipProperty.Comparator.contains` is - very useful when used with simple one-to-many - operations. - - For many-to-many operations, the behavior of - :meth:`~.RelationshipProperty.Comparator.contains` - has more caveats. The association table will be - rendered in the statement, producing an "implicit" - join, that is, includes multiple tables in the FROM - clause which are equated in the WHERE clause:: - - query(MyClass).filter(MyClass.contains(other)) - - Produces a query like:: - - SELECT * FROM my_table, my_association_table AS - my_association_table_1 WHERE - my_table.id = my_association_table_1.parent_id - AND my_association_table_1.child_id = - - Where ```` would be the primary key of - ``other``. From the above, it is clear that - :meth:`~.RelationshipProperty.Comparator.contains` - will **not** work with many-to-many collections when - used in queries that move beyond simple AND - conjunctions, such as multiple - :meth:`~.RelationshipProperty.Comparator.contains` - expressions joined by OR. In such cases subqueries or - explicit "outer joins" will need to be used instead. - See :meth:`~.RelationshipProperty.Comparator.any` for - a less-performant alternative using EXISTS, or refer - to :meth:`.Query.outerjoin` as well as :ref:`ormtutorial_joins` - for more details on constructing outer joins. - - """ - if not self.property.uselist: - raise sa_exc.InvalidRequestError( - "'contains' not implemented for scalar " - "attributes. Use ==") - clause = self.property._optimized_compare( - other, adapt_source=self.adapter) - - if self.property.secondaryjoin is not None: - clause.negation_clause = \ - self.__negated_contains_or_equals(other) - - return clause - - def __negated_contains_or_equals(self, other): - if self.property.direction == MANYTOONE: - state = attributes.instance_state(other) - - def state_bindparam(x, state, col): - dict_ = state.dict - return sql.bindparam( - x, unique=True, - callable_=self.property._get_attr_w_warn_on_none( - col, - self.property.mapper._get_state_attr_by_column, - state, dict_, col, passive=attributes.PASSIVE_OFF - ) - ) - - def adapt(col): - if self.adapter: - return self.adapter(col) - else: - return col - - if self.property._use_get: - return sql.and_(*[ - sql.or_( - adapt(x) != state_bindparam(adapt(x), state, y), - adapt(x) == None) - for (x, y) in self.property.local_remote_pairs]) - - criterion = sql.and_(*[ - x == y for (x, y) in - zip( - self.property.mapper.primary_key, - self.property.mapper.primary_key_from_instance(other) - ) - ]) - - return ~self._criterion_exists(criterion) - - def __ne__(self, other): - """Implement the ``!=`` operator. - - In a many-to-one context, such as:: - - MyClass.some_prop != - - This will typically produce a clause such as:: - - mytable.related_id != - - Where ```` is the primary key of the - given object. - - The ``!=`` operator provides partial functionality for non- - many-to-one comparisons: - - * Comparisons against collections are not supported. - Use - :meth:`~.RelationshipProperty.Comparator.contains` - in conjunction with :func:`~.expression.not_`. - * Compared to a scalar one-to-many, will produce a - clause that compares the target columns in the parent to - the given target. - * Compared to a scalar many-to-many, an alias - of the association table will be rendered as - well, forming a natural join that is part of the - main body of the query. This will not work for - queries that go beyond simple AND conjunctions of - comparisons, such as those which use OR. Use - explicit joins, outerjoins, or - :meth:`~.RelationshipProperty.Comparator.has` in - conjunction with :func:`~.expression.not_` for - more comprehensive non-many-to-one scalar - membership tests. - * Comparisons against ``None`` given in a one-to-many - or many-to-many context produce an EXISTS clause. - - """ - if isinstance(other, (util.NoneType, expression.Null)): - if self.property.direction == MANYTOONE: - return _orm_annotate(~self.property._optimized_compare( - None, adapt_source=self.adapter)) - - else: - return self._criterion_exists() - elif self.property.uselist: - raise sa_exc.InvalidRequestError( - "Can't compare a collection" - " to an object or collection; use " - "contains() to test for membership.") - else: - return _orm_annotate(self.__negated_contains_or_equals(other)) - - @util.memoized_property - def property(self): - if mapperlib.Mapper._new_mappers: - mapperlib.Mapper._configure_all() - return self.prop - - def _with_parent(self, instance, alias_secondary=True): - assert instance is not None - return self._optimized_compare( - instance, value_is_parent=True, alias_secondary=alias_secondary) - - def _optimized_compare(self, state, value_is_parent=False, - adapt_source=None, - alias_secondary=True): - if state is not None: - state = attributes.instance_state(state) - - reverse_direction = not value_is_parent - - if state is None: - return self._lazy_none_clause( - reverse_direction, - adapt_source=adapt_source) - - if not reverse_direction: - criterion, bind_to_col = \ - self._lazy_strategy._lazywhere, \ - self._lazy_strategy._bind_to_col - else: - criterion, bind_to_col = \ - self._lazy_strategy._rev_lazywhere, \ - self._lazy_strategy._rev_bind_to_col - - if reverse_direction: - mapper = self.mapper - else: - mapper = self.parent - - dict_ = attributes.instance_dict(state.obj()) - - def visit_bindparam(bindparam): - if bindparam._identifying_key in bind_to_col: - bindparam.callable = self._get_attr_w_warn_on_none( - bind_to_col[bindparam._identifying_key], - mapper._get_state_attr_by_column, - state, dict_, - bind_to_col[bindparam._identifying_key], - passive=attributes.PASSIVE_OFF) - - if self.secondary is not None and alias_secondary: - criterion = ClauseAdapter( - self.secondary.alias()).\ - traverse(criterion) - - criterion = visitors.cloned_traverse( - criterion, {}, {'bindparam': visit_bindparam}) - - if adapt_source: - criterion = adapt_source(criterion) - return criterion - - def _get_attr_w_warn_on_none(self, column, fn, *arg, **kw): - def _go(): - value = fn(*arg, **kw) - if value is None: - util.warn( - "Got None for value of column %s; this is unsupported " - "for a relationship comparison and will not " - "currently produce an IS comparison " - "(but may in a future release)" % column) - return value - return _go - - def _lazy_none_clause(self, reverse_direction=False, adapt_source=None): - if not reverse_direction: - criterion, bind_to_col = \ - self._lazy_strategy._lazywhere, \ - self._lazy_strategy._bind_to_col - else: - criterion, bind_to_col = \ - self._lazy_strategy._rev_lazywhere, \ - self._lazy_strategy._rev_bind_to_col - - criterion = adapt_criterion_to_null(criterion, bind_to_col) - - if adapt_source: - criterion = adapt_source(criterion) - return criterion - - def __str__(self): - return str(self.parent.class_.__name__) + "." + self.key - - def merge(self, - session, - source_state, - source_dict, - dest_state, - dest_dict, - load, _recursive): - - if load: - for r in self._reverse_property: - if (source_state, r) in _recursive: - return - - if "merge" not in self._cascade: - return - - if self.key not in source_dict: - return - - if self.uselist: - instances = source_state.get_impl(self.key).\ - get(source_state, source_dict) - if hasattr(instances, '_sa_adapter'): - # convert collections to adapters to get a true iterator - instances = instances._sa_adapter - - if load: - # for a full merge, pre-load the destination collection, - # so that individual _merge of each item pulls from identity - # map for those already present. - # also assumes CollectionAttrbiuteImpl behavior of loading - # "old" list in any case - dest_state.get_impl(self.key).get(dest_state, dest_dict) - - dest_list = [] - for current in instances: - current_state = attributes.instance_state(current) - current_dict = attributes.instance_dict(current) - _recursive[(current_state, self)] = True - obj = session._merge(current_state, current_dict, - load=load, _recursive=_recursive) - if obj is not None: - dest_list.append(obj) - - if not load: - coll = attributes.init_state_collection(dest_state, - dest_dict, self.key) - for c in dest_list: - coll.append_without_event(c) - else: - dest_state.get_impl(self.key)._set_iterable( - dest_state, dest_dict, dest_list) - else: - current = source_dict[self.key] - if current is not None: - current_state = attributes.instance_state(current) - current_dict = attributes.instance_dict(current) - _recursive[(current_state, self)] = True - obj = session._merge(current_state, current_dict, - load=load, _recursive=_recursive) - else: - obj = None - - if not load: - dest_dict[self.key] = obj - else: - dest_state.get_impl(self.key).set(dest_state, - dest_dict, obj, None) - - def _value_as_iterable(self, state, dict_, key, - passive=attributes.PASSIVE_OFF): - """Return a list of tuples (state, obj) for the given - key. - - returns an empty list if the value is None/empty/PASSIVE_NO_RESULT - """ - - impl = state.manager[key].impl - x = impl.get(state, dict_, passive=passive) - if x is attributes.PASSIVE_NO_RESULT or x is None: - return [] - elif hasattr(impl, 'get_collection'): - return [ - (attributes.instance_state(o), o) for o in - impl.get_collection(state, dict_, x, passive=passive) - ] - else: - return [(attributes.instance_state(x), x)] - - def cascade_iterator(self, type_, state, dict_, - visited_states, halt_on=None): - # assert type_ in self._cascade - - # only actively lazy load on the 'delete' cascade - if type_ != 'delete' or self.passive_deletes: - passive = attributes.PASSIVE_NO_INITIALIZE - else: - passive = attributes.PASSIVE_OFF - - if type_ == 'save-update': - tuples = state.manager[self.key].impl.\ - get_all_pending(state, dict_) - - else: - tuples = self._value_as_iterable(state, dict_, self.key, - passive=passive) - - skip_pending = type_ == 'refresh-expire' and 'delete-orphan' \ - not in self._cascade - - for instance_state, c in tuples: - if instance_state in visited_states: - continue - - if c is None: - # would like to emit a warning here, but - # would not be consistent with collection.append(None) - # current behavior of silently skipping. - # see [ticket:2229] - continue - - instance_dict = attributes.instance_dict(c) - - if halt_on and halt_on(instance_state): - continue - - if skip_pending and not instance_state.key: - continue - - instance_mapper = instance_state.manager.mapper - - if not instance_mapper.isa(self.mapper.class_manager.mapper): - raise AssertionError("Attribute '%s' on class '%s' " - "doesn't handle objects " - "of type '%s'" % ( - self.key, - self.parent.class_, - c.__class__ - )) - - visited_states.add(instance_state) - - yield c, instance_mapper, instance_state, instance_dict - - def _add_reverse_property(self, key): - other = self.mapper.get_property(key, _configure_mappers=False) - self._reverse_property.add(other) - other._reverse_property.add(self) - - if not other.mapper.common_parent(self.parent): - raise sa_exc.ArgumentError( - 'reverse_property %r on ' - 'relationship %s references relationship %s, which ' - 'does not reference mapper %s' % - (key, self, other, self.parent)) - - if self.direction in (ONETOMANY, MANYTOONE) and self.direction \ - == other.direction: - raise sa_exc.ArgumentError( - '%s and back-reference %s are ' - 'both of the same direction %r. Did you mean to ' - 'set remote_side on the many-to-one side ?' % - (other, self, self.direction)) - - @util.memoized_property - def mapper(self): - """Return the targeted :class:`.Mapper` for this - :class:`.RelationshipProperty`. - - This is a lazy-initializing static attribute. - - """ - if util.callable(self.argument) and \ - not isinstance(self.argument, (type, mapperlib.Mapper)): - argument = self.argument() - else: - argument = self.argument - - if isinstance(argument, type): - mapper_ = mapperlib.class_mapper(argument, - configure=False) - elif isinstance(self.argument, mapperlib.Mapper): - mapper_ = argument - else: - raise sa_exc.ArgumentError( - "relationship '%s' expects " - "a class or a mapper argument (received: %s)" - % (self.key, type(argument))) - return mapper_ - - @util.memoized_property - @util.deprecated("0.7", "Use .target") - def table(self): - """Return the selectable linked to this - :class:`.RelationshipProperty` object's target - :class:`.Mapper`. - """ - return self.target - - def do_init(self): - self._check_conflicts() - self._process_dependent_arguments() - self._setup_join_conditions() - self._check_cascade_settings(self._cascade) - self._post_init() - self._generate_backref() - self._join_condition._warn_for_conflicting_sync_targets() - super(RelationshipProperty, self).do_init() - self._lazy_strategy = self._get_strategy((("lazy", "select"),)) - - def _process_dependent_arguments(self): - """Convert incoming configuration arguments to their - proper form. - - Callables are resolved, ORM annotations removed. - - """ - # accept callables for other attributes which may require - # deferred initialization. This technique is used - # by declarative "string configs" and some recipes. - for attr in ( - 'order_by', 'primaryjoin', 'secondaryjoin', - 'secondary', '_user_defined_foreign_keys', 'remote_side', - ): - attr_value = getattr(self, attr) - if util.callable(attr_value): - setattr(self, attr, attr_value()) - - # remove "annotations" which are present if mapped class - # descriptors are used to create the join expression. - for attr in 'primaryjoin', 'secondaryjoin': - val = getattr(self, attr) - if val is not None: - setattr(self, attr, _orm_deannotate( - expression._only_column_elements(val, attr)) - ) - - # ensure expressions in self.order_by, foreign_keys, - # remote_side are all columns, not strings. - if self.order_by is not False and self.order_by is not None: - self.order_by = [ - expression._only_column_elements(x, "order_by") - for x in - util.to_list(self.order_by)] - - self._user_defined_foreign_keys = \ - util.column_set( - expression._only_column_elements(x, "foreign_keys") - for x in util.to_column_set( - self._user_defined_foreign_keys - )) - - self.remote_side = \ - util.column_set( - expression._only_column_elements(x, "remote_side") - for x in - util.to_column_set(self.remote_side)) - - self.target = self.mapper.mapped_table - - def _setup_join_conditions(self): - self._join_condition = jc = JoinCondition( - parent_selectable=self.parent.mapped_table, - child_selectable=self.mapper.mapped_table, - parent_local_selectable=self.parent.local_table, - child_local_selectable=self.mapper.local_table, - primaryjoin=self.primaryjoin, - secondary=self.secondary, - secondaryjoin=self.secondaryjoin, - parent_equivalents=self.parent._equivalent_columns, - child_equivalents=self.mapper._equivalent_columns, - consider_as_foreign_keys=self._user_defined_foreign_keys, - local_remote_pairs=self.local_remote_pairs, - remote_side=self.remote_side, - self_referential=self._is_self_referential, - prop=self, - support_sync=not self.viewonly, - can_be_synced_fn=self._columns_are_mapped - ) - self.primaryjoin = jc.deannotated_primaryjoin - self.secondaryjoin = jc.deannotated_secondaryjoin - self.direction = jc.direction - self.local_remote_pairs = jc.local_remote_pairs - self.remote_side = jc.remote_columns - self.local_columns = jc.local_columns - self.synchronize_pairs = jc.synchronize_pairs - self._calculated_foreign_keys = jc.foreign_key_columns - self.secondary_synchronize_pairs = jc.secondary_synchronize_pairs - - def _check_conflicts(self): - """Test that this relationship is legal, warn about - inheritance conflicts.""" - - if self.parent.non_primary and not mapperlib.class_mapper( - self.parent.class_, - configure=False).has_property(self.key): - raise sa_exc.ArgumentError( - "Attempting to assign a new " - "relationship '%s' to a non-primary mapper on " - "class '%s'. New relationships can only be added " - "to the primary mapper, i.e. the very first mapper " - "created for class '%s' " % - (self.key, self.parent.class_.__name__, - self.parent.class_.__name__)) - - # check for conflicting relationship() on superclass - if not self.parent.concrete: - for inheriting in self.parent.iterate_to_root(): - if inheriting is not self.parent \ - and inheriting.has_property(self.key): - util.warn("Warning: relationship '%s' on mapper " - "'%s' supersedes the same relationship " - "on inherited mapper '%s'; this can " - "cause dependency issues during flush" - % (self.key, self.parent, inheriting)) - - def _get_cascade(self): - """Return the current cascade setting for this - :class:`.RelationshipProperty`. - """ - return self._cascade - - def _set_cascade(self, cascade): - cascade = CascadeOptions(cascade) - if 'mapper' in self.__dict__: - self._check_cascade_settings(cascade) - self._cascade = cascade - - if self._dependency_processor: - self._dependency_processor.cascade = cascade - - cascade = property(_get_cascade, _set_cascade) - - def _check_cascade_settings(self, cascade): - if cascade.delete_orphan and not self.single_parent \ - and (self.direction is MANYTOMANY or self.direction - is MANYTOONE): - raise sa_exc.ArgumentError( - 'On %s, delete-orphan cascade is not supported ' - 'on a many-to-many or many-to-one relationship ' - 'when single_parent is not set. Set ' - 'single_parent=True on the relationship().' - % self) - if self.direction is MANYTOONE and self.passive_deletes: - util.warn("On %s, 'passive_deletes' is normally configured " - "on one-to-many, one-to-one, many-to-many " - "relationships only." - % self) - - if self.passive_deletes == 'all' and \ - ("delete" in cascade or - "delete-orphan" in cascade): - raise sa_exc.ArgumentError( - "On %s, can't set passive_deletes='all' in conjunction " - "with 'delete' or 'delete-orphan' cascade" % self) - - if cascade.delete_orphan: - self.mapper.primary_mapper()._delete_orphans.append( - (self.key, self.parent.class_) - ) - - def _columns_are_mapped(self, *cols): - """Return True if all columns in the given collection are - mapped by the tables referenced by this :class:`.Relationship`. - - """ - for c in cols: - if self.secondary is not None \ - and self.secondary.c.contains_column(c): - continue - if not self.parent.mapped_table.c.contains_column(c) and \ - not self.target.c.contains_column(c): - return False - return True - - def _generate_backref(self): - """Interpret the 'backref' instruction to create a - :func:`.relationship` complementary to this one.""" - - if self.parent.non_primary: - return - if self.backref is not None and not self.back_populates: - if isinstance(self.backref, util.string_types): - backref_key, kwargs = self.backref, {} - else: - backref_key, kwargs = self.backref - mapper = self.mapper.primary_mapper() - - check = set(mapper.iterate_to_root()).\ - union(mapper.self_and_descendants) - for m in check: - if m.has_property(backref_key): - raise sa_exc.ArgumentError( - "Error creating backref " - "'%s' on relationship '%s': property of that " - "name exists on mapper '%s'" % - (backref_key, self, m)) - - # determine primaryjoin/secondaryjoin for the - # backref. Use the one we had, so that - # a custom join doesn't have to be specified in - # both directions. - if self.secondary is not None: - # for many to many, just switch primaryjoin/ - # secondaryjoin. use the annotated - # pj/sj on the _join_condition. - pj = kwargs.pop( - 'primaryjoin', - self._join_condition.secondaryjoin_minus_local) - sj = kwargs.pop( - 'secondaryjoin', - self._join_condition.primaryjoin_minus_local) - else: - pj = kwargs.pop( - 'primaryjoin', - self._join_condition.primaryjoin_reverse_remote) - sj = kwargs.pop('secondaryjoin', None) - if sj: - raise sa_exc.InvalidRequestError( - "Can't assign 'secondaryjoin' on a backref " - "against a non-secondary relationship." - ) - - foreign_keys = kwargs.pop('foreign_keys', - self._user_defined_foreign_keys) - parent = self.parent.primary_mapper() - kwargs.setdefault('viewonly', self.viewonly) - kwargs.setdefault('post_update', self.post_update) - kwargs.setdefault('passive_updates', self.passive_updates) - self.back_populates = backref_key - relationship = RelationshipProperty( - parent, self.secondary, - pj, sj, - foreign_keys=foreign_keys, - back_populates=self.key, - **kwargs) - mapper._configure_property(backref_key, relationship) - - if self.back_populates: - self._add_reverse_property(self.back_populates) - - def _post_init(self): - if self.uselist is None: - self.uselist = self.direction is not MANYTOONE - if not self.viewonly: - self._dependency_processor = \ - dependency.DependencyProcessor.from_relationship(self) - - @util.memoized_property - def _use_get(self): - """memoize the 'use_get' attribute of this RelationshipLoader's - lazyloader.""" - - strategy = self._lazy_strategy - return strategy.use_get - - @util.memoized_property - def _is_self_referential(self): - return self.mapper.common_parent(self.parent) - - def _create_joins(self, source_polymorphic=False, - source_selectable=None, dest_polymorphic=False, - dest_selectable=None, of_type=None): - if source_selectable is None: - if source_polymorphic and self.parent.with_polymorphic: - source_selectable = self.parent._with_polymorphic_selectable - - aliased = False - if dest_selectable is None: - if dest_polymorphic and self.mapper.with_polymorphic: - dest_selectable = self.mapper._with_polymorphic_selectable - aliased = True - else: - dest_selectable = self.mapper.mapped_table - - if self._is_self_referential and source_selectable is None: - dest_selectable = dest_selectable.alias() - aliased = True - else: - aliased = True - - dest_mapper = of_type or self.mapper - - single_crit = dest_mapper._single_table_criterion - aliased = aliased or (source_selectable is not None) - - primaryjoin, secondaryjoin, secondary, target_adapter, dest_selectable = \ - self._join_condition.join_targets( - source_selectable, dest_selectable, aliased, single_crit - ) - if source_selectable is None: - source_selectable = self.parent.local_table - if dest_selectable is None: - dest_selectable = self.mapper.local_table - return (primaryjoin, secondaryjoin, source_selectable, - dest_selectable, secondary, target_adapter) - - -def _annotate_columns(element, annotations): - def clone(elem): - if isinstance(elem, expression.ColumnClause): - elem = elem._annotate(annotations.copy()) - elem._copy_internals(clone=clone) - return elem - - if element is not None: - element = clone(element) - return element - - -class JoinCondition(object): - def __init__(self, - parent_selectable, - child_selectable, - parent_local_selectable, - child_local_selectable, - primaryjoin=None, - secondary=None, - secondaryjoin=None, - parent_equivalents=None, - child_equivalents=None, - consider_as_foreign_keys=None, - local_remote_pairs=None, - remote_side=None, - self_referential=False, - prop=None, - support_sync=True, - can_be_synced_fn=lambda *c: True - ): - self.parent_selectable = parent_selectable - self.parent_local_selectable = parent_local_selectable - self.child_selectable = child_selectable - self.child_local_selectable = child_local_selectable - self.parent_equivalents = parent_equivalents - self.child_equivalents = child_equivalents - self.primaryjoin = primaryjoin - self.secondaryjoin = secondaryjoin - self.secondary = secondary - self.consider_as_foreign_keys = consider_as_foreign_keys - self._local_remote_pairs = local_remote_pairs - self._remote_side = remote_side - self.prop = prop - self.self_referential = self_referential - self.support_sync = support_sync - self.can_be_synced_fn = can_be_synced_fn - self._determine_joins() - self._annotate_fks() - self._annotate_remote() - self._annotate_local() - self._setup_pairs() - self._check_foreign_cols(self.primaryjoin, True) - if self.secondaryjoin is not None: - self._check_foreign_cols(self.secondaryjoin, False) - self._determine_direction() - self._check_remote_side() - self._log_joins() - - def _log_joins(self): - if self.prop is None: - return - log = self.prop.logger - log.info('%s setup primary join %s', self.prop, - self.primaryjoin) - log.info('%s setup secondary join %s', self.prop, - self.secondaryjoin) - log.info('%s synchronize pairs [%s]', self.prop, - ','.join('(%s => %s)' % (l, r) for (l, r) in - self.synchronize_pairs)) - log.info('%s secondary synchronize pairs [%s]', self.prop, - ','.join('(%s => %s)' % (l, r) for (l, r) in - self.secondary_synchronize_pairs or [])) - log.info('%s local/remote pairs [%s]', self.prop, - ','.join('(%s / %s)' % (l, r) for (l, r) in - self.local_remote_pairs)) - log.info('%s remote columns [%s]', self.prop, - ','.join('%s' % col for col in self.remote_columns) - ) - log.info('%s local columns [%s]', self.prop, - ','.join('%s' % col for col in self.local_columns) - ) - log.info('%s relationship direction %s', self.prop, - self.direction) - - def _determine_joins(self): - """Determine the 'primaryjoin' and 'secondaryjoin' attributes, - if not passed to the constructor already. - - This is based on analysis of the foreign key relationships - between the parent and target mapped selectables. - - """ - if self.secondaryjoin is not None and self.secondary is None: - raise sa_exc.ArgumentError( - "Property %s specified with secondary " - "join condition but " - "no secondary argument" % self.prop) - - # find a join between the given mapper's mapped table and - # the given table. will try the mapper's local table first - # for more specificity, then if not found will try the more - # general mapped table, which in the case of inheritance is - # a join. - try: - consider_as_foreign_keys = self.consider_as_foreign_keys or None - if self.secondary is not None: - if self.secondaryjoin is None: - self.secondaryjoin = \ - join_condition( - self.child_selectable, - self.secondary, - a_subset=self.child_local_selectable, - consider_as_foreign_keys=consider_as_foreign_keys - ) - if self.primaryjoin is None: - self.primaryjoin = \ - join_condition( - self.parent_selectable, - self.secondary, - a_subset=self.parent_local_selectable, - consider_as_foreign_keys=consider_as_foreign_keys - ) - else: - if self.primaryjoin is None: - self.primaryjoin = \ - join_condition( - self.parent_selectable, - self.child_selectable, - a_subset=self.parent_local_selectable, - consider_as_foreign_keys=consider_as_foreign_keys - ) - except sa_exc.NoForeignKeysError: - if self.secondary is not None: - raise sa_exc.NoForeignKeysError( - "Could not determine join " - "condition between parent/child tables on " - "relationship %s - there are no foreign keys " - "linking these tables via secondary table '%s'. " - "Ensure that referencing columns are associated " - "with a ForeignKey or ForeignKeyConstraint, or " - "specify 'primaryjoin' and 'secondaryjoin' " - "expressions." % (self.prop, self.secondary)) - else: - raise sa_exc.NoForeignKeysError( - "Could not determine join " - "condition between parent/child tables on " - "relationship %s - there are no foreign keys " - "linking these tables. " - "Ensure that referencing columns are associated " - "with a ForeignKey or ForeignKeyConstraint, or " - "specify a 'primaryjoin' expression." % self.prop) - except sa_exc.AmbiguousForeignKeysError: - if self.secondary is not None: - raise sa_exc.AmbiguousForeignKeysError( - "Could not determine join " - "condition between parent/child tables on " - "relationship %s - there are multiple foreign key " - "paths linking the tables via secondary table '%s'. " - "Specify the 'foreign_keys' " - "argument, providing a list of those columns which " - "should be counted as containing a foreign key " - "reference from the secondary table to each of the " - "parent and child tables." - % (self.prop, self.secondary)) - else: - raise sa_exc.AmbiguousForeignKeysError( - "Could not determine join " - "condition between parent/child tables on " - "relationship %s - there are multiple foreign key " - "paths linking the tables. Specify the " - "'foreign_keys' argument, providing a list of those " - "columns which should be counted as containing a " - "foreign key reference to the parent table." - % self.prop) - - @property - def primaryjoin_minus_local(self): - return _deep_deannotate(self.primaryjoin, values=("local", "remote")) - - @property - def secondaryjoin_minus_local(self): - return _deep_deannotate(self.secondaryjoin, - values=("local", "remote")) - - @util.memoized_property - def primaryjoin_reverse_remote(self): - """Return the primaryjoin condition suitable for the - "reverse" direction. - - If the primaryjoin was delivered here with pre-existing - "remote" annotations, the local/remote annotations - are reversed. Otherwise, the local/remote annotations - are removed. - - """ - if self._has_remote_annotations: - def replace(element): - if "remote" in element._annotations: - v = element._annotations.copy() - del v['remote'] - v['local'] = True - return element._with_annotations(v) - elif "local" in element._annotations: - v = element._annotations.copy() - del v['local'] - v['remote'] = True - return element._with_annotations(v) - return visitors.replacement_traverse( - self.primaryjoin, {}, replace) - else: - if self._has_foreign_annotations: - # TODO: coverage - return _deep_deannotate(self.primaryjoin, - values=("local", "remote")) - else: - return _deep_deannotate(self.primaryjoin) - - def _has_annotation(self, clause, annotation): - for col in visitors.iterate(clause, {}): - if annotation in col._annotations: - return True - else: - return False - - @util.memoized_property - def _has_foreign_annotations(self): - return self._has_annotation(self.primaryjoin, "foreign") - - @util.memoized_property - def _has_remote_annotations(self): - return self._has_annotation(self.primaryjoin, "remote") - - def _annotate_fks(self): - """Annotate the primaryjoin and secondaryjoin - structures with 'foreign' annotations marking columns - considered as foreign. - - """ - if self._has_foreign_annotations: - return - - if self.consider_as_foreign_keys: - self._annotate_from_fk_list() - else: - self._annotate_present_fks() - - def _annotate_from_fk_list(self): - def check_fk(col): - if col in self.consider_as_foreign_keys: - return col._annotate({"foreign": True}) - self.primaryjoin = visitors.replacement_traverse( - self.primaryjoin, - {}, - check_fk - ) - if self.secondaryjoin is not None: - self.secondaryjoin = visitors.replacement_traverse( - self.secondaryjoin, - {}, - check_fk - ) - - def _annotate_present_fks(self): - if self.secondary is not None: - secondarycols = util.column_set(self.secondary.c) - else: - secondarycols = set() - - def is_foreign(a, b): - if isinstance(a, schema.Column) and \ - isinstance(b, schema.Column): - if a.references(b): - return a - elif b.references(a): - return b - - if secondarycols: - if a in secondarycols and b not in secondarycols: - return a - elif b in secondarycols and a not in secondarycols: - return b - - def visit_binary(binary): - if not isinstance(binary.left, sql.ColumnElement) or \ - not isinstance(binary.right, sql.ColumnElement): - return - - if "foreign" not in binary.left._annotations and \ - "foreign" not in binary.right._annotations: - col = is_foreign(binary.left, binary.right) - if col is not None: - if col.compare(binary.left): - binary.left = binary.left._annotate( - {"foreign": True}) - elif col.compare(binary.right): - binary.right = binary.right._annotate( - {"foreign": True}) - - self.primaryjoin = visitors.cloned_traverse( - self.primaryjoin, - {}, - {"binary": visit_binary} - ) - if self.secondaryjoin is not None: - self.secondaryjoin = visitors.cloned_traverse( - self.secondaryjoin, - {}, - {"binary": visit_binary} - ) - - def _refers_to_parent_table(self): - """Return True if the join condition contains column - comparisons where both columns are in both tables. - - """ - pt = self.parent_selectable - mt = self.child_selectable - result = [False] - - def visit_binary(binary): - c, f = binary.left, binary.right - if ( - isinstance(c, expression.ColumnClause) and - isinstance(f, expression.ColumnClause) and - pt.is_derived_from(c.table) and - pt.is_derived_from(f.table) and - mt.is_derived_from(c.table) and - mt.is_derived_from(f.table) - ): - result[0] = True - visitors.traverse( - self.primaryjoin, - {}, - {"binary": visit_binary} - ) - return result[0] - - def _tables_overlap(self): - """Return True if parent/child tables have some overlap.""" - - return selectables_overlap( - self.parent_selectable, self.child_selectable) - - def _annotate_remote(self): - """Annotate the primaryjoin and secondaryjoin - structures with 'remote' annotations marking columns - considered as part of the 'remote' side. - - """ - if self._has_remote_annotations: - return - - if self.secondary is not None: - self._annotate_remote_secondary() - elif self._local_remote_pairs or self._remote_side: - self._annotate_remote_from_args() - elif self._refers_to_parent_table(): - self._annotate_selfref(lambda col: "foreign" in col._annotations, False) - elif self._tables_overlap(): - self._annotate_remote_with_overlap() - else: - self._annotate_remote_distinct_selectables() - - def _annotate_remote_secondary(self): - """annotate 'remote' in primaryjoin, secondaryjoin - when 'secondary' is present. - - """ - def repl(element): - if self.secondary.c.contains_column(element): - return element._annotate({"remote": True}) - self.primaryjoin = visitors.replacement_traverse( - self.primaryjoin, {}, repl) - self.secondaryjoin = visitors.replacement_traverse( - self.secondaryjoin, {}, repl) - - def _annotate_selfref(self, fn, remote_side_given): - """annotate 'remote' in primaryjoin, secondaryjoin - when the relationship is detected as self-referential. - - """ - def visit_binary(binary): - equated = binary.left.compare(binary.right) - if isinstance(binary.left, expression.ColumnClause) and \ - isinstance(binary.right, expression.ColumnClause): - # assume one to many - FKs are "remote" - if fn(binary.left): - binary.left = binary.left._annotate({"remote": True}) - if fn(binary.right) and not equated: - binary.right = binary.right._annotate( - {"remote": True}) - elif not remote_side_given: - self._warn_non_column_elements() - - self.primaryjoin = visitors.cloned_traverse( - self.primaryjoin, {}, - {"binary": visit_binary}) - - def _annotate_remote_from_args(self): - """annotate 'remote' in primaryjoin, secondaryjoin - when the 'remote_side' or '_local_remote_pairs' - arguments are used. - - """ - if self._local_remote_pairs: - if self._remote_side: - raise sa_exc.ArgumentError( - "remote_side argument is redundant " - "against more detailed _local_remote_side " - "argument.") - - remote_side = [r for (l, r) in self._local_remote_pairs] - else: - remote_side = self._remote_side - - if self._refers_to_parent_table(): - self._annotate_selfref(lambda col: col in remote_side, True) - else: - def repl(element): - if element in remote_side: - return element._annotate({"remote": True}) - self.primaryjoin = visitors.replacement_traverse( - self.primaryjoin, {}, repl) - - def _annotate_remote_with_overlap(self): - """annotate 'remote' in primaryjoin, secondaryjoin - when the parent/child tables have some set of - tables in common, though is not a fully self-referential - relationship. - - """ - def visit_binary(binary): - binary.left, binary.right = proc_left_right(binary.left, - binary.right) - binary.right, binary.left = proc_left_right(binary.right, - binary.left) - - check_entities = self.prop is not None and \ - self.prop.mapper is not self.prop.parent - - def proc_left_right(left, right): - if isinstance(left, expression.ColumnClause) and \ - isinstance(right, expression.ColumnClause): - if self.child_selectable.c.contains_column(right) and \ - self.parent_selectable.c.contains_column(left): - right = right._annotate({"remote": True}) - elif check_entities and \ - right._annotations.get('parentmapper') is self.prop.mapper: - right = right._annotate({"remote": True}) - elif check_entities and \ - left._annotations.get('parentmapper') is self.prop.mapper: - left = left._annotate({"remote": True}) - else: - self._warn_non_column_elements() - - return left, right - - self.primaryjoin = visitors.cloned_traverse( - self.primaryjoin, {}, - {"binary": visit_binary}) - - def _annotate_remote_distinct_selectables(self): - """annotate 'remote' in primaryjoin, secondaryjoin - when the parent/child tables are entirely - separate. - - """ - def repl(element): - if self.child_selectable.c.contains_column(element) and \ - (not self.parent_local_selectable.c. - contains_column(element) or - self.child_local_selectable.c. - contains_column(element)): - return element._annotate({"remote": True}) - self.primaryjoin = visitors.replacement_traverse( - self.primaryjoin, {}, repl) - - def _warn_non_column_elements(self): - util.warn( - "Non-simple column elements in primary " - "join condition for property %s - consider using " - "remote() annotations to mark the remote side." - % self.prop - ) - - def _annotate_local(self): - """Annotate the primaryjoin and secondaryjoin - structures with 'local' annotations. - - This annotates all column elements found - simultaneously in the parent table - and the join condition that don't have a - 'remote' annotation set up from - _annotate_remote() or user-defined. - - """ - if self._has_annotation(self.primaryjoin, "local"): - return - - if self._local_remote_pairs: - local_side = util.column_set([l for (l, r) - in self._local_remote_pairs]) - else: - local_side = util.column_set(self.parent_selectable.c) - - def locals_(elem): - if "remote" not in elem._annotations and \ - elem in local_side: - return elem._annotate({"local": True}) - self.primaryjoin = visitors.replacement_traverse( - self.primaryjoin, {}, locals_ - ) - - def _check_remote_side(self): - if not self.local_remote_pairs: - raise sa_exc.ArgumentError( - 'Relationship %s could ' - 'not determine any unambiguous local/remote column ' - 'pairs based on join condition and remote_side ' - 'arguments. ' - 'Consider using the remote() annotation to ' - 'accurately mark those elements of the join ' - 'condition that are on the remote side of ' - 'the relationship.' % (self.prop, )) - - def _check_foreign_cols(self, join_condition, primary): - """Check the foreign key columns collected and emit error - messages.""" - - can_sync = False - - foreign_cols = self._gather_columns_with_annotation( - join_condition, "foreign") - - has_foreign = bool(foreign_cols) - - if primary: - can_sync = bool(self.synchronize_pairs) - else: - can_sync = bool(self.secondary_synchronize_pairs) - - if self.support_sync and can_sync or \ - (not self.support_sync and has_foreign): - return - - # from here below is just determining the best error message - # to report. Check for a join condition using any operator - # (not just ==), perhaps they need to turn on "viewonly=True". - if self.support_sync and has_foreign and not can_sync: - err = "Could not locate any simple equality expressions "\ - "involving locally mapped foreign key columns for "\ - "%s join condition "\ - "'%s' on relationship %s." % ( - primary and 'primary' or 'secondary', - join_condition, - self.prop - ) - err += \ - " Ensure that referencing columns are associated "\ - "with a ForeignKey or ForeignKeyConstraint, or are "\ - "annotated in the join condition with the foreign() "\ - "annotation. To allow comparison operators other than "\ - "'==', the relationship can be marked as viewonly=True." - - raise sa_exc.ArgumentError(err) - else: - err = "Could not locate any relevant foreign key columns "\ - "for %s join condition '%s' on relationship %s." % ( - primary and 'primary' or 'secondary', - join_condition, - self.prop - ) - err += \ - ' Ensure that referencing columns are associated '\ - 'with a ForeignKey or ForeignKeyConstraint, or are '\ - 'annotated in the join condition with the foreign() '\ - 'annotation.' - raise sa_exc.ArgumentError(err) - - def _determine_direction(self): - """Determine if this relationship is one to many, many to one, - many to many. - - """ - if self.secondaryjoin is not None: - self.direction = MANYTOMANY - else: - parentcols = util.column_set(self.parent_selectable.c) - targetcols = util.column_set(self.child_selectable.c) - - # fk collection which suggests ONETOMANY. - onetomany_fk = targetcols.intersection( - self.foreign_key_columns) - - # fk collection which suggests MANYTOONE. - - manytoone_fk = parentcols.intersection( - self.foreign_key_columns) - - if onetomany_fk and manytoone_fk: - # fks on both sides. test for overlap of local/remote - # with foreign key. - # we will gather columns directly from their annotations - # without deannotating, so that we can distinguish on a column - # that refers to itself. - - # 1. columns that are both remote and FK suggest - # onetomany. - onetomany_local = self._gather_columns_with_annotation( - self.primaryjoin, "remote", "foreign") - - # 2. columns that are FK but are not remote (e.g. local) - # suggest manytoone. - manytoone_local = set([c for c in - self._gather_columns_with_annotation( - self.primaryjoin, - "foreign") - if "remote" not in c._annotations]) - - # 3. if both collections are present, remove columns that - # refer to themselves. This is for the case of - # and_(Me.id == Me.remote_id, Me.version == Me.version) - if onetomany_local and manytoone_local: - self_equated = self.remote_columns.intersection( - self.local_columns - ) - onetomany_local = onetomany_local.difference(self_equated) - manytoone_local = manytoone_local.difference(self_equated) - - # at this point, if only one or the other collection is - # present, we know the direction, otherwise it's still - # ambiguous. - - if onetomany_local and not manytoone_local: - self.direction = ONETOMANY - elif manytoone_local and not onetomany_local: - self.direction = MANYTOONE - else: - raise sa_exc.ArgumentError( - "Can't determine relationship" - " direction for relationship '%s' - foreign " - "key columns within the join condition are present " - "in both the parent and the child's mapped tables. " - "Ensure that only those columns referring " - "to a parent column are marked as foreign, " - "either via the foreign() annotation or " - "via the foreign_keys argument." % self.prop) - elif onetomany_fk: - self.direction = ONETOMANY - elif manytoone_fk: - self.direction = MANYTOONE - else: - raise sa_exc.ArgumentError( - "Can't determine relationship " - "direction for relationship '%s' - foreign " - "key columns are present in neither the parent " - "nor the child's mapped tables" % self.prop) - - def _deannotate_pairs(self, collection): - """provide deannotation for the various lists of - pairs, so that using them in hashes doesn't incur - high-overhead __eq__() comparisons against - original columns mapped. - - """ - return [(x._deannotate(), y._deannotate()) - for x, y in collection] - - def _setup_pairs(self): - sync_pairs = [] - lrp = util.OrderedSet([]) - secondary_sync_pairs = [] - - def go(joincond, collection): - def visit_binary(binary, left, right): - if "remote" in right._annotations and \ - "remote" not in left._annotations and \ - self.can_be_synced_fn(left): - lrp.add((left, right)) - elif "remote" in left._annotations and \ - "remote" not in right._annotations and \ - self.can_be_synced_fn(right): - lrp.add((right, left)) - if binary.operator is operators.eq and \ - self.can_be_synced_fn(left, right): - if "foreign" in right._annotations: - collection.append((left, right)) - elif "foreign" in left._annotations: - collection.append((right, left)) - visit_binary_product(visit_binary, joincond) - - for joincond, collection in [ - (self.primaryjoin, sync_pairs), - (self.secondaryjoin, secondary_sync_pairs) - ]: - if joincond is None: - continue - go(joincond, collection) - - self.local_remote_pairs = self._deannotate_pairs(lrp) - self.synchronize_pairs = self._deannotate_pairs(sync_pairs) - self.secondary_synchronize_pairs = \ - self._deannotate_pairs(secondary_sync_pairs) - - _track_overlapping_sync_targets = weakref.WeakKeyDictionary() - - def _warn_for_conflicting_sync_targets(self): - if not self.support_sync: - return - - # we would like to detect if we are synchronizing any column - # pairs in conflict with another relationship that wishes to sync - # an entirely different column to the same target. This is a - # very rare edge case so we will try to minimize the memory/overhead - # impact of this check - for from_, to_ in [ - (from_, to_) for (from_, to_) in self.synchronize_pairs - ] + [ - (from_, to_) for (from_, to_) in self.secondary_synchronize_pairs - ]: - # save ourselves a ton of memory and overhead by only - # considering columns that are subject to a overlapping - # FK constraints at the core level. This condition can arise - # if multiple relationships overlap foreign() directly, but - # we're going to assume it's typically a ForeignKeyConstraint- - # level configuration that benefits from this warning. - if len(to_.foreign_keys) < 2: - continue - - if to_ not in self._track_overlapping_sync_targets: - self._track_overlapping_sync_targets[to_] = \ - weakref.WeakKeyDictionary({self.prop: from_}) - else: - other_props = [] - prop_to_from = self._track_overlapping_sync_targets[to_] - for pr, fr_ in prop_to_from.items(): - if pr.mapper in mapperlib._mapper_registry and \ - fr_ is not from_ and \ - pr not in self.prop._reverse_property: - other_props.append((pr, fr_)) - - if other_props: - util.warn( - "relationship '%s' will copy column %s to column %s, " - "which conflicts with relationship(s): %s. " - "Consider applying " - "viewonly=True to read-only relationships, or provide " - "a primaryjoin condition marking writable columns " - "with the foreign() annotation." % ( - self.prop, - from_, to_, - ", ".join( - "'%s' (copies %s to %s)" % (pr, fr_, to_) - for (pr, fr_) in other_props) - ) - ) - self._track_overlapping_sync_targets[to_][self.prop] = from_ - - @util.memoized_property - def remote_columns(self): - return self._gather_join_annotations("remote") - - @util.memoized_property - def local_columns(self): - return self._gather_join_annotations("local") - - @util.memoized_property - def foreign_key_columns(self): - return self._gather_join_annotations("foreign") - - @util.memoized_property - def deannotated_primaryjoin(self): - return _deep_deannotate(self.primaryjoin) - - @util.memoized_property - def deannotated_secondaryjoin(self): - if self.secondaryjoin is not None: - return _deep_deannotate(self.secondaryjoin) - else: - return None - - def _gather_join_annotations(self, annotation): - s = set( - self._gather_columns_with_annotation( - self.primaryjoin, annotation) - ) - if self.secondaryjoin is not None: - s.update( - self._gather_columns_with_annotation( - self.secondaryjoin, annotation) - ) - return set([x._deannotate() for x in s]) - - def _gather_columns_with_annotation(self, clause, *annotation): - annotation = set(annotation) - return set([ - col for col in visitors.iterate(clause, {}) - if annotation.issubset(col._annotations) - ]) - - def join_targets(self, source_selectable, - dest_selectable, - aliased, - single_crit=None): - """Given a source and destination selectable, create a - join between them. - - This takes into account aliasing the join clause - to reference the appropriate corresponding columns - in the target objects, as well as the extra child - criterion, equivalent column sets, etc. - - """ - - # place a barrier on the destination such that - # replacement traversals won't ever dig into it. - # its internal structure remains fixed - # regardless of context. - dest_selectable = _shallow_annotate( - dest_selectable, - {'no_replacement_traverse': True}) - - primaryjoin, secondaryjoin, secondary = self.primaryjoin, \ - self.secondaryjoin, self.secondary - - # adjust the join condition for single table inheritance, - # in the case that the join is to a subclass - # this is analogous to the - # "_adjust_for_single_table_inheritance()" method in Query. - - if single_crit is not None: - if secondaryjoin is not None: - secondaryjoin = secondaryjoin & single_crit - else: - primaryjoin = primaryjoin & single_crit - - if aliased: - if secondary is not None: - secondary = secondary.alias(flat=True) - primary_aliasizer = ClauseAdapter(secondary) - secondary_aliasizer = \ - ClauseAdapter(dest_selectable, - equivalents=self.child_equivalents).\ - chain(primary_aliasizer) - if source_selectable is not None: - primary_aliasizer = \ - ClauseAdapter(secondary).\ - chain(ClauseAdapter( - source_selectable, - equivalents=self.parent_equivalents)) - secondaryjoin = \ - secondary_aliasizer.traverse(secondaryjoin) - else: - primary_aliasizer = ClauseAdapter( - dest_selectable, - exclude_fn=_ColInAnnotations("local"), - equivalents=self.child_equivalents) - if source_selectable is not None: - primary_aliasizer.chain( - ClauseAdapter(source_selectable, - exclude_fn=_ColInAnnotations("remote"), - equivalents=self.parent_equivalents)) - secondary_aliasizer = None - - primaryjoin = primary_aliasizer.traverse(primaryjoin) - target_adapter = secondary_aliasizer or primary_aliasizer - target_adapter.exclude_fn = None - else: - target_adapter = None - return primaryjoin, secondaryjoin, secondary, \ - target_adapter, dest_selectable - - def create_lazy_clause(self, reverse_direction=False): - binds = util.column_dict() - equated_columns = util.column_dict() - - has_secondary = self.secondaryjoin is not None - - if has_secondary: - lookup = collections.defaultdict(list) - for l, r in self.local_remote_pairs: - lookup[l].append((l, r)) - equated_columns[r] = l - elif not reverse_direction: - for l, r in self.local_remote_pairs: - equated_columns[r] = l - else: - for l, r in self.local_remote_pairs: - equated_columns[l] = r - - def col_to_bind(col): - - if ( - (not reverse_direction and 'local' in col._annotations) or - reverse_direction and ( - (has_secondary and col in lookup) or - (not has_secondary and 'remote' in col._annotations) - ) - ): - if col not in binds: - binds[col] = sql.bindparam( - None, None, type_=col.type, unique=True) - return binds[col] - return None - - lazywhere = self.primaryjoin - if self.secondaryjoin is None or not reverse_direction: - lazywhere = visitors.replacement_traverse( - lazywhere, {}, col_to_bind) - - if self.secondaryjoin is not None: - secondaryjoin = self.secondaryjoin - if reverse_direction: - secondaryjoin = visitors.replacement_traverse( - secondaryjoin, {}, col_to_bind) - lazywhere = sql.and_(lazywhere, secondaryjoin) - - bind_to_col = dict((binds[col].key, col) for col in binds) - - # this is probably not necessary - lazywhere = _deep_deannotate(lazywhere) - - return lazywhere, bind_to_col, equated_columns - - -class _ColInAnnotations(object): - """Seralizable equivalent to: - - lambda c: "name" in c._annotations - """ - - def __init__(self, name): - self.name = name - - def __call__(self, c): - return self.name in c._annotations diff --git a/python/sqlalchemy/orm/scoping.py b/python/sqlalchemy/orm/scoping.py deleted file mode 100644 index b3f2fa5d..00000000 --- a/python/sqlalchemy/orm/scoping.py +++ /dev/null @@ -1,177 +0,0 @@ -# orm/scoping.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -from .. import exc as sa_exc -from ..util import ScopedRegistry, ThreadLocalRegistry, warn -from . import class_mapper, exc as orm_exc -from .session import Session - - -__all__ = ['scoped_session'] - - -class scoped_session(object): - """Provides scoped management of :class:`.Session` objects. - - See :ref:`unitofwork_contextual` for a tutorial. - - """ - - def __init__(self, session_factory, scopefunc=None): - """Construct a new :class:`.scoped_session`. - - :param session_factory: a factory to create new :class:`.Session` - instances. This is usually, but not necessarily, an instance - of :class:`.sessionmaker`. - :param scopefunc: optional function which defines - the current scope. If not passed, the :class:`.scoped_session` - object assumes "thread-local" scope, and will use - a Python ``threading.local()`` in order to maintain the current - :class:`.Session`. If passed, the function should return - a hashable token; this token will be used as the key in a - dictionary in order to store and retrieve the current - :class:`.Session`. - - """ - self.session_factory = session_factory - if scopefunc: - self.registry = ScopedRegistry(session_factory, scopefunc) - else: - self.registry = ThreadLocalRegistry(session_factory) - - def __call__(self, **kw): - """Return the current :class:`.Session`, creating it - using the session factory if not present. - - :param \**kw: Keyword arguments will be passed to the - session factory callable, if an existing :class:`.Session` - is not present. If the :class:`.Session` is present and - keyword arguments have been passed, - :exc:`~sqlalchemy.exc.InvalidRequestError` is raised. - - """ - if kw: - scope = kw.pop('scope', False) - if scope is not None: - if self.registry.has(): - raise sa_exc.InvalidRequestError( - "Scoped session is already present; " - "no new arguments may be specified.") - else: - sess = self.session_factory(**kw) - self.registry.set(sess) - return sess - else: - return self.session_factory(**kw) - else: - return self.registry() - - def remove(self): - """Dispose of the current :class:`.Session`, if present. - - This will first call :meth:`.Session.close` method - on the current :class:`.Session`, which releases any existing - transactional/connection resources still being held; transactions - specifically are rolled back. The :class:`.Session` is then - discarded. Upon next usage within the same scope, - the :class:`.scoped_session` will produce a new - :class:`.Session` object. - - """ - - if self.registry.has(): - self.registry().close() - self.registry.clear() - - def configure(self, **kwargs): - """reconfigure the :class:`.sessionmaker` used by this - :class:`.scoped_session`. - - See :meth:`.sessionmaker.configure`. - - """ - - if self.registry.has(): - warn('At least one scoped session is already present. ' - ' configure() can not affect sessions that have ' - 'already been created.') - - self.session_factory.configure(**kwargs) - - def query_property(self, query_cls=None): - """return a class property which produces a :class:`.Query` object - against the class and the current :class:`.Session` when called. - - e.g.:: - - Session = scoped_session(sessionmaker()) - - class MyClass(object): - query = Session.query_property() - - # after mappers are defined - result = MyClass.query.filter(MyClass.name=='foo').all() - - Produces instances of the session's configured query class by - default. To override and use a custom implementation, provide - a ``query_cls`` callable. The callable will be invoked with - the class's mapper as a positional argument and a session - keyword argument. - - There is no limit to the number of query properties placed on - a class. - - """ - class query(object): - def __get__(s, instance, owner): - try: - mapper = class_mapper(owner) - if mapper: - if query_cls: - # custom query class - return query_cls(mapper, session=self.registry()) - else: - # session's configured query class - return self.registry().query(mapper) - except orm_exc.UnmappedClassError: - return None - return query() - -ScopedSession = scoped_session -"""Old name for backwards compatibility.""" - - -def instrument(name): - def do(self, *args, **kwargs): - return getattr(self.registry(), name)(*args, **kwargs) - return do - -for meth in Session.public_methods: - setattr(scoped_session, meth, instrument(meth)) - - -def makeprop(name): - def set(self, attr): - setattr(self.registry(), name, attr) - - def get(self): - return getattr(self.registry(), name) - - return property(get, set) - -for prop in ('bind', 'dirty', 'deleted', 'new', 'identity_map', - 'is_active', 'autoflush', 'no_autoflush', 'info'): - setattr(scoped_session, prop, makeprop(prop)) - - -def clslevel(name): - def do(cls, *args, **kwargs): - return getattr(Session, name)(*args, **kwargs) - return classmethod(do) - -for prop in ('close_all', 'object_session', 'identity_key'): - setattr(scoped_session, prop, clslevel(prop)) diff --git a/python/sqlalchemy/orm/session.py b/python/sqlalchemy/orm/session.py deleted file mode 100644 index 6c3f392b..00000000 --- a/python/sqlalchemy/orm/session.py +++ /dev/null @@ -1,2780 +0,0 @@ -# orm/session.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php -"""Provides the Session class and related utilities.""" - - -import weakref -from .. import util, sql, engine, exc as sa_exc -from ..sql import util as sql_util, expression -from . import ( - SessionExtension, attributes, exc, query, - loading, identity -) -from ..inspection import inspect -from .base import ( - object_mapper, class_mapper, - _class_to_mapper, _state_mapper, object_state, - _none_set, state_str, instance_str -) -import itertools -from . import persistence -from .unitofwork import UOWTransaction -from . import state as statelib -import sys - -__all__ = ['Session', 'SessionTransaction', - 'SessionExtension', 'sessionmaker'] - -_sessions = weakref.WeakValueDictionary() -"""Weak-referencing dictionary of :class:`.Session` objects. -""" - - -def _state_session(state): - """Given an :class:`.InstanceState`, return the :class:`.Session` - associated, if any. - """ - if state.session_id: - try: - return _sessions[state.session_id] - except KeyError: - pass - return None - - -class _SessionClassMethods(object): - """Class-level methods for :class:`.Session`, :class:`.sessionmaker`.""" - - @classmethod - def close_all(cls): - """Close *all* sessions in memory.""" - - for sess in _sessions.values(): - sess.close() - - @classmethod - @util.dependencies("sqlalchemy.orm.util") - def identity_key(cls, orm_util, *args, **kwargs): - """Return an identity key. - - This is an alias of :func:`.util.identity_key`. - - """ - return orm_util.identity_key(*args, **kwargs) - - @classmethod - def object_session(cls, instance): - """Return the :class:`.Session` to which an object belongs. - - This is an alias of :func:`.object_session`. - - """ - - return object_session(instance) - - -ACTIVE = util.symbol('ACTIVE') -PREPARED = util.symbol('PREPARED') -COMMITTED = util.symbol('COMMITTED') -DEACTIVE = util.symbol('DEACTIVE') -CLOSED = util.symbol('CLOSED') - - -class SessionTransaction(object): - """A :class:`.Session`-level transaction. - - :class:`.SessionTransaction` is a mostly behind-the-scenes object - not normally referenced directly by application code. It coordinates - among multiple :class:`.Connection` objects, maintaining a database - transaction for each one individually, committing or rolling them - back all at once. It also provides optional two-phase commit behavior - which can augment this coordination operation. - - The :attr:`.Session.transaction` attribute of :class:`.Session` - refers to the current :class:`.SessionTransaction` object in use, if any. - - - A :class:`.SessionTransaction` is associated with a :class:`.Session` - in its default mode of ``autocommit=False`` immediately, associated - with no database connections. As the :class:`.Session` is called upon - to emit SQL on behalf of various :class:`.Engine` or :class:`.Connection` - objects, a corresponding :class:`.Connection` and associated - :class:`.Transaction` is added to a collection within the - :class:`.SessionTransaction` object, becoming one of the - connection/transaction pairs maintained by the - :class:`.SessionTransaction`. - - The lifespan of the :class:`.SessionTransaction` ends when the - :meth:`.Session.commit`, :meth:`.Session.rollback` or - :meth:`.Session.close` methods are called. At this point, the - :class:`.SessionTransaction` removes its association with its parent - :class:`.Session`. A :class:`.Session` that is in ``autocommit=False`` - mode will create a new :class:`.SessionTransaction` to replace it - immediately, whereas a :class:`.Session` that's in ``autocommit=True`` - mode will remain without a :class:`.SessionTransaction` until the - :meth:`.Session.begin` method is called. - - Another detail of :class:`.SessionTransaction` behavior is that it is - capable of "nesting". This means that the :meth:`.Session.begin` method - can be called while an existing :class:`.SessionTransaction` is already - present, producing a new :class:`.SessionTransaction` that temporarily - replaces the parent :class:`.SessionTransaction`. When a - :class:`.SessionTransaction` is produced as nested, it assigns itself to - the :attr:`.Session.transaction` attribute. When it is ended via - :meth:`.Session.commit` or :meth:`.Session.rollback`, it restores its - parent :class:`.SessionTransaction` back onto the - :attr:`.Session.transaction` attribute. The behavior is effectively a - stack, where :attr:`.Session.transaction` refers to the current head of - the stack. - - The purpose of this stack is to allow nesting of - :meth:`.Session.rollback` or :meth:`.Session.commit` calls in context - with various flavors of :meth:`.Session.begin`. This nesting behavior - applies to when :meth:`.Session.begin_nested` is used to emit a - SAVEPOINT transaction, and is also used to produce a so-called - "subtransaction" which allows a block of code to use a - begin/rollback/commit sequence regardless of whether or not its enclosing - code block has begun a transaction. The :meth:`.flush` method, whether - called explicitly or via autoflush, is the primary consumer of the - "subtransaction" feature, in that it wishes to guarantee that it works - within in a transaction block regardless of whether or not the - :class:`.Session` is in transactional mode when the method is called. - - See also: - - :meth:`.Session.rollback` - - :meth:`.Session.commit` - - :meth:`.Session.begin` - - :meth:`.Session.begin_nested` - - :attr:`.Session.is_active` - - :meth:`.SessionEvents.after_commit` - - :meth:`.SessionEvents.after_rollback` - - :meth:`.SessionEvents.after_soft_rollback` - - """ - - _rollback_exception = None - - def __init__(self, session, parent=None, nested=False): - self.session = session - self._connections = {} - self._parent = parent - self.nested = nested - self._state = ACTIVE - if not parent and nested: - raise sa_exc.InvalidRequestError( - "Can't start a SAVEPOINT transaction when no existing " - "transaction is in progress") - - if self.session._enable_transaction_accounting: - self._take_snapshot() - - if self.session.dispatch.after_transaction_create: - self.session.dispatch.after_transaction_create(self.session, self) - - @property - def is_active(self): - return self.session is not None and self._state is ACTIVE - - def _assert_active(self, prepared_ok=False, - rollback_ok=False, - deactive_ok=False, - closed_msg="This transaction is closed"): - if self._state is COMMITTED: - raise sa_exc.InvalidRequestError( - "This session is in 'committed' state; no further " - "SQL can be emitted within this transaction." - ) - elif self._state is PREPARED: - if not prepared_ok: - raise sa_exc.InvalidRequestError( - "This session is in 'prepared' state; no further " - "SQL can be emitted within this transaction." - ) - elif self._state is DEACTIVE: - if not deactive_ok and not rollback_ok: - if self._rollback_exception: - raise sa_exc.InvalidRequestError( - "This Session's transaction has been rolled back " - "due to a previous exception during flush." - " To begin a new transaction with this Session, " - "first issue Session.rollback()." - " Original exception was: %s" - % self._rollback_exception - ) - elif not deactive_ok: - raise sa_exc.InvalidRequestError( - "This Session's transaction has been rolled back " - "by a nested rollback() call. To begin a new " - "transaction, issue Session.rollback() first." - ) - elif self._state is CLOSED: - raise sa_exc.ResourceClosedError(closed_msg) - - @property - def _is_transaction_boundary(self): - return self.nested or not self._parent - - def connection(self, bindkey, execution_options=None, **kwargs): - self._assert_active() - bind = self.session.get_bind(bindkey, **kwargs) - return self._connection_for_bind(bind, execution_options) - - def _begin(self, nested=False): - self._assert_active() - return SessionTransaction( - self.session, self, nested=nested) - - def _iterate_parents(self, upto=None): - - current = self - result = () - while current: - result += (current, ) - if current._parent is upto: - break - elif current._parent is None: - raise sa_exc.InvalidRequestError( - "Transaction %s is not on the active transaction list" % ( - upto)) - else: - current = current._parent - - return result - - def _take_snapshot(self): - if not self._is_transaction_boundary: - self._new = self._parent._new - self._deleted = self._parent._deleted - self._dirty = self._parent._dirty - self._key_switches = self._parent._key_switches - return - - if not self.session._flushing: - self.session.flush() - - self._new = weakref.WeakKeyDictionary() - self._deleted = weakref.WeakKeyDictionary() - self._dirty = weakref.WeakKeyDictionary() - self._key_switches = weakref.WeakKeyDictionary() - - def _restore_snapshot(self, dirty_only=False): - assert self._is_transaction_boundary - - for s in set(self._new).union(self.session._new): - self.session._expunge_state(s) - if s.key: - del s.key - - for s, (oldkey, newkey) in self._key_switches.items(): - self.session.identity_map.safe_discard(s) - s.key = oldkey - self.session.identity_map.replace(s) - - for s in set(self._deleted).union(self.session._deleted): - if s.deleted: - # assert s in self._deleted - del s.deleted - self.session._update_impl(s, discard_existing=True) - - assert not self.session._deleted - - for s in self.session.identity_map.all_states(): - if not dirty_only or s.modified or s in self._dirty: - s._expire(s.dict, self.session.identity_map._modified) - - def _remove_snapshot(self): - assert self._is_transaction_boundary - - if not self.nested and self.session.expire_on_commit: - for s in self.session.identity_map.all_states(): - s._expire(s.dict, self.session.identity_map._modified) - for s in list(self._deleted): - s._detach() - self._deleted.clear() - elif self.nested: - self._parent._new.update(self._new) - self._parent._dirty.update(self._dirty) - self._parent._deleted.update(self._deleted) - self._parent._key_switches.update(self._key_switches) - - def _connection_for_bind(self, bind, execution_options): - self._assert_active() - - if bind in self._connections: - if execution_options: - util.warn( - "Connection is already established for the " - "given bind; execution_options ignored") - return self._connections[bind][0] - - if self._parent: - conn = self._parent._connection_for_bind(bind, execution_options) - if not self.nested: - return conn - else: - if isinstance(bind, engine.Connection): - conn = bind - if conn.engine in self._connections: - raise sa_exc.InvalidRequestError( - "Session already has a Connection associated for the " - "given Connection's Engine") - else: - conn = bind.contextual_connect() - - if execution_options: - conn = conn.execution_options(**execution_options) - - if self.session.twophase and self._parent is None: - transaction = conn.begin_twophase() - elif self.nested: - transaction = conn.begin_nested() - else: - transaction = conn.begin() - - self._connections[conn] = self._connections[conn.engine] = \ - (conn, transaction, conn is not bind) - self.session.dispatch.after_begin(self.session, self, conn) - return conn - - def prepare(self): - if self._parent is not None or not self.session.twophase: - raise sa_exc.InvalidRequestError( - "'twophase' mode not enabled, or not root transaction; " - "can't prepare.") - self._prepare_impl() - - def _prepare_impl(self): - self._assert_active() - if self._parent is None or self.nested: - self.session.dispatch.before_commit(self.session) - - stx = self.session.transaction - if stx is not self: - for subtransaction in stx._iterate_parents(upto=self): - subtransaction.commit() - - if not self.session._flushing: - for _flush_guard in range(100): - if self.session._is_clean(): - break - self.session.flush() - else: - raise exc.FlushError( - "Over 100 subsequent flushes have occurred within " - "session.commit() - is an after_flush() hook " - "creating new objects?") - - if self._parent is None and self.session.twophase: - try: - for t in set(self._connections.values()): - t[1].prepare() - except: - with util.safe_reraise(): - self.rollback() - - self._state = PREPARED - - def commit(self): - self._assert_active(prepared_ok=True) - if self._state is not PREPARED: - self._prepare_impl() - - if self._parent is None or self.nested: - for t in set(self._connections.values()): - t[1].commit() - - self._state = COMMITTED - self.session.dispatch.after_commit(self.session) - - if self.session._enable_transaction_accounting: - self._remove_snapshot() - - self.close() - return self._parent - - def rollback(self, _capture_exception=False): - self._assert_active(prepared_ok=True, rollback_ok=True) - - stx = self.session.transaction - if stx is not self: - for subtransaction in stx._iterate_parents(upto=self): - subtransaction.close() - - boundary = self - if self._state in (ACTIVE, PREPARED): - for transaction in self._iterate_parents(): - if transaction._parent is None or transaction.nested: - transaction._rollback_impl() - transaction._state = DEACTIVE - boundary = transaction - break - else: - transaction._state = DEACTIVE - - sess = self.session - - if sess._enable_transaction_accounting and \ - not sess._is_clean(): - - # if items were added, deleted, or mutated - # here, we need to re-restore the snapshot - util.warn( - "Session's state has been changed on " - "a non-active transaction - this state " - "will be discarded.") - boundary._restore_snapshot(dirty_only=boundary.nested) - - self.close() - if self._parent and _capture_exception: - self._parent._rollback_exception = sys.exc_info()[1] - - sess.dispatch.after_soft_rollback(sess, self) - - return self._parent - - def _rollback_impl(self): - for t in set(self._connections.values()): - t[1].rollback() - - if self.session._enable_transaction_accounting: - self._restore_snapshot(dirty_only=self.nested) - - self.session.dispatch.after_rollback(self.session) - - def close(self, invalidate=False): - self.session.transaction = self._parent - if self._parent is None: - for connection, transaction, autoclose in \ - set(self._connections.values()): - if invalidate: - connection.invalidate() - if autoclose: - connection.close() - else: - transaction.close() - - self._state = CLOSED - if self.session.dispatch.after_transaction_end: - self.session.dispatch.after_transaction_end(self.session, self) - - if self._parent is None: - if not self.session.autocommit: - self.session.begin() - self.session = None - self._connections = None - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - self._assert_active(deactive_ok=True, prepared_ok=True) - if self.session.transaction is None: - return - if type is None: - try: - self.commit() - except: - with util.safe_reraise(): - self.rollback() - else: - self.rollback() - - -class Session(_SessionClassMethods): - """Manages persistence operations for ORM-mapped objects. - - The Session's usage paradigm is described at :doc:`/orm/session`. - - - """ - - public_methods = ( - '__contains__', '__iter__', 'add', 'add_all', 'begin', 'begin_nested', - 'close', 'commit', 'connection', 'delete', 'execute', 'expire', - 'expire_all', 'expunge', 'expunge_all', 'flush', 'get_bind', - 'is_modified', 'bulk_save_objects', 'bulk_insert_mappings', - 'bulk_update_mappings', - 'merge', 'query', 'refresh', 'rollback', - 'scalar') - - def __init__(self, bind=None, autoflush=True, expire_on_commit=True, - _enable_transaction_accounting=True, - autocommit=False, twophase=False, - weak_identity_map=True, binds=None, extension=None, - info=None, - query_cls=query.Query): - """Construct a new Session. - - See also the :class:`.sessionmaker` function which is used to - generate a :class:`.Session`-producing callable with a given - set of arguments. - - :param autocommit: - - .. warning:: - - The autocommit flag is **not for general use**, and if it is - used, queries should only be invoked within the span of a - :meth:`.Session.begin` / :meth:`.Session.commit` pair. Executing - queries outside of a demarcated transaction is a legacy mode - of usage, and can in some cases lead to concurrent connection - checkouts. - - Defaults to ``False``. When ``True``, the - :class:`.Session` does not keep a persistent transaction running, - and will acquire connections from the engine on an as-needed basis, - returning them immediately after their use. Flushes will begin and - commit (or possibly rollback) their own transaction if no - transaction is present. When using this mode, the - :meth:`.Session.begin` method is used to explicitly start - transactions. - - .. seealso:: - - :ref:`session_autocommit` - - :param autoflush: When ``True``, all query operations will issue a - :meth:`~.Session.flush` call to this ``Session`` before proceeding. - This is a convenience feature so that :meth:`~.Session.flush` need - not be called repeatedly in order for database queries to retrieve - results. It's typical that ``autoflush`` is used in conjunction - with ``autocommit=False``. In this scenario, explicit calls to - :meth:`~.Session.flush` are rarely needed; you usually only need to - call :meth:`~.Session.commit` (which flushes) to finalize changes. - - :param bind: An optional :class:`.Engine` or :class:`.Connection` to - which this ``Session`` should be bound. When specified, all SQL - operations performed by this session will execute via this - connectable. - - :param binds: An optional dictionary which contains more granular - "bind" information than the ``bind`` parameter provides. This - dictionary can map individual :class`.Table` - instances as well as :class:`~.Mapper` instances to individual - :class:`.Engine` or :class:`.Connection` objects. Operations which - proceed relative to a particular :class:`.Mapper` will consult this - dictionary for the direct :class:`.Mapper` instance as - well as the mapper's ``mapped_table`` attribute in order to locate - a connectable to use. The full resolution is described in the - :meth:`.Session.get_bind`. - Usage looks like:: - - Session = sessionmaker(binds={ - SomeMappedClass: create_engine('postgresql://engine1'), - somemapper: create_engine('postgresql://engine2'), - some_table: create_engine('postgresql://engine3'), - }) - - Also see the :meth:`.Session.bind_mapper` - and :meth:`.Session.bind_table` methods. - - :param \class_: Specify an alternate class other than - ``sqlalchemy.orm.session.Session`` which should be used by the - returned class. This is the only argument that is local to the - :class:`.sessionmaker` function, and is not sent directly to the - constructor for ``Session``. - - :param _enable_transaction_accounting: Defaults to ``True``. A - legacy-only flag which when ``False`` disables *all* 0.5-style - object accounting on transaction boundaries, including auto-expiry - of instances on rollback and commit, maintenance of the "new" and - "deleted" lists upon rollback, and autoflush of pending changes - upon :meth:`~.Session.begin`, all of which are interdependent. - - :param expire_on_commit: Defaults to ``True``. When ``True``, all - instances will be fully expired after each :meth:`~.commit`, - so that all attribute/object access subsequent to a completed - transaction will load from the most recent database state. - - :param extension: An optional - :class:`~.SessionExtension` instance, or a list - of such instances, which will receive pre- and post- commit and - flush events, as well as a post-rollback event. **Deprecated.** - Please see :class:`.SessionEvents`. - - :param info: optional dictionary of arbitrary data to be associated - with this :class:`.Session`. Is available via the - :attr:`.Session.info` attribute. Note the dictionary is copied at - construction time so that modifications to the per- - :class:`.Session` dictionary will be local to that - :class:`.Session`. - - .. versionadded:: 0.9.0 - - :param query_cls: Class which should be used to create new Query - objects, as returned by the :meth:`~.Session.query` method. - Defaults to :class:`.Query`. - - :param twophase: When ``True``, all transactions will be started as - a "two phase" transaction, i.e. using the "two phase" semantics - of the database in use along with an XID. During a - :meth:`~.commit`, after :meth:`~.flush` has been issued for all - attached databases, the :meth:`~.TwoPhaseTransaction.prepare` - method on each database's :class:`.TwoPhaseTransaction` will be - called. This allows each database to roll back the entire - transaction, before each transaction is committed. - - :param weak_identity_map: Defaults to ``True`` - when set to - ``False``, objects placed in the :class:`.Session` will be - strongly referenced until explicitly removed or the - :class:`.Session` is closed. **Deprecated** - this option - is present to allow compatibility with older applications, but - it is recommended that strong references to objects - be maintained by the calling application - externally to the :class:`.Session` itself, - to the extent that is required by the application. - - """ - - if weak_identity_map: - self._identity_cls = identity.WeakInstanceDict - else: - util.warn_deprecated( - "weak_identity_map=False is deprecated. " - "It is present to allow compatibility with older " - "applications, but " - "it is recommended that strong references to " - "objects be maintained by the calling application " - "externally to the :class:`.Session` itself, " - "to the extent that is required by the application.") - - self._identity_cls = identity.StrongInstanceDict - self.identity_map = self._identity_cls() - - self._new = {} # InstanceState->object, strong refs object - self._deleted = {} # same - self.bind = bind - self.__binds = {} - self._flushing = False - self._warn_on_events = False - self.transaction = None - self.hash_key = _new_sessionid() - self.autoflush = autoflush - self.autocommit = autocommit - self.expire_on_commit = expire_on_commit - self._enable_transaction_accounting = _enable_transaction_accounting - self.twophase = twophase - self._query_cls = query_cls - if info: - self.info.update(info) - - if extension: - for ext in util.to_list(extension): - SessionExtension._adapt_listener(self, ext) - - if binds is not None: - for key, bind in binds.items(): - self._add_bind(key, bind) - - if not self.autocommit: - self.begin() - _sessions[self.hash_key] = self - - connection_callable = None - - transaction = None - """The current active or inactive :class:`.SessionTransaction`.""" - - @util.memoized_property - def info(self): - """A user-modifiable dictionary. - - The initial value of this dictioanry can be populated using the - ``info`` argument to the :class:`.Session` constructor or - :class:`.sessionmaker` constructor or factory methods. The dictionary - here is always local to this :class:`.Session` and can be modified - independently of all other :class:`.Session` objects. - - .. versionadded:: 0.9.0 - - """ - return {} - - def begin(self, subtransactions=False, nested=False): - """Begin a transaction on this :class:`.Session`. - - If this Session is already within a transaction, either a plain - transaction or nested transaction, an error is raised, unless - ``subtransactions=True`` or ``nested=True`` is specified. - - The ``subtransactions=True`` flag indicates that this - :meth:`~.Session.begin` can create a subtransaction if a transaction - is already in progress. For documentation on subtransactions, please - see :ref:`session_subtransactions`. - - The ``nested`` flag begins a SAVEPOINT transaction and is equivalent - to calling :meth:`~.Session.begin_nested`. For documentation on - SAVEPOINT transactions, please see :ref:`session_begin_nested`. - - """ - if self.transaction is not None: - if subtransactions or nested: - self.transaction = self.transaction._begin( - nested=nested) - else: - raise sa_exc.InvalidRequestError( - "A transaction is already begun. Use " - "subtransactions=True to allow subtransactions.") - else: - self.transaction = SessionTransaction( - self, nested=nested) - return self.transaction # needed for __enter__/__exit__ hook - - def begin_nested(self): - """Begin a `nested` transaction on this Session. - - The target database(s) must support SQL SAVEPOINTs or a - SQLAlchemy-supported vendor implementation of the idea. - - For documentation on SAVEPOINT - transactions, please see :ref:`session_begin_nested`. - - """ - return self.begin(nested=True) - - def rollback(self): - """Rollback the current transaction in progress. - - If no transaction is in progress, this method is a pass-through. - - This method rolls back the current transaction or nested transaction - regardless of subtransactions being in effect. All subtransactions up - to the first real transaction are closed. Subtransactions occur when - :meth:`.begin` is called multiple times. - - .. seealso:: - - :ref:`session_rollback` - - """ - if self.transaction is None: - pass - else: - self.transaction.rollback() - - def commit(self): - """Flush pending changes and commit the current transaction. - - If no transaction is in progress, this method raises an - :exc:`~sqlalchemy.exc.InvalidRequestError`. - - By default, the :class:`.Session` also expires all database - loaded state on all ORM-managed attributes after transaction commit. - This so that subsequent operations load the most recent - data from the database. This behavior can be disabled using - the ``expire_on_commit=False`` option to :class:`.sessionmaker` or - the :class:`.Session` constructor. - - If a subtransaction is in effect (which occurs when begin() is called - multiple times), the subtransaction will be closed, and the next call - to ``commit()`` will operate on the enclosing transaction. - - When using the :class:`.Session` in its default mode of - ``autocommit=False``, a new transaction will - be begun immediately after the commit, but note that the newly begun - transaction does *not* use any connection resources until the first - SQL is actually emitted. - - .. seealso:: - - :ref:`session_committing` - - """ - if self.transaction is None: - if not self.autocommit: - self.begin() - else: - raise sa_exc.InvalidRequestError("No transaction is begun.") - - self.transaction.commit() - - def prepare(self): - """Prepare the current transaction in progress for two phase commit. - - If no transaction is in progress, this method raises an - :exc:`~sqlalchemy.exc.InvalidRequestError`. - - Only root transactions of two phase sessions can be prepared. If the - current transaction is not such, an - :exc:`~sqlalchemy.exc.InvalidRequestError` is raised. - - """ - if self.transaction is None: - if not self.autocommit: - self.begin() - else: - raise sa_exc.InvalidRequestError("No transaction is begun.") - - self.transaction.prepare() - - def connection(self, mapper=None, clause=None, - bind=None, - close_with_result=False, - execution_options=None, - **kw): - """Return a :class:`.Connection` object corresponding to this - :class:`.Session` object's transactional state. - - If this :class:`.Session` is configured with ``autocommit=False``, - either the :class:`.Connection` corresponding to the current - transaction is returned, or if no transaction is in progress, a new - one is begun and the :class:`.Connection` returned (note that no - transactional state is established with the DBAPI until the first - SQL statement is emitted). - - Alternatively, if this :class:`.Session` is configured with - ``autocommit=True``, an ad-hoc :class:`.Connection` is returned - using :meth:`.Engine.contextual_connect` on the underlying - :class:`.Engine`. - - Ambiguity in multi-bind or unbound :class:`.Session` objects can be - resolved through any of the optional keyword arguments. This - ultimately makes usage of the :meth:`.get_bind` method for resolution. - - :param bind: - Optional :class:`.Engine` to be used as the bind. If - this engine is already involved in an ongoing transaction, - that connection will be used. This argument takes precedence - over ``mapper``, ``clause``. - - :param mapper: - Optional :func:`.mapper` mapped class, used to identify - the appropriate bind. This argument takes precedence over - ``clause``. - - :param clause: - A :class:`.ClauseElement` (i.e. :func:`~.sql.expression.select`, - :func:`~.sql.expression.text`, - etc.) which will be used to locate a bind, if a bind - cannot otherwise be identified. - - :param close_with_result: Passed to :meth:`.Engine.connect`, - indicating the :class:`.Connection` should be considered - "single use", automatically closing when the first result set is - closed. This flag only has an effect if this :class:`.Session` is - configured with ``autocommit=True`` and does not already have a - transaction in progress. - - :param execution_options: a dictionary of execution options that will - be passed to :meth:`.Connection.execution_options`, **when the - connection is first procured only**. If the connection is already - present within the :class:`.Session`, a warning is emitted and - the arguments are ignored. - - .. versionadded:: 0.9.9 - - .. seealso:: - - :ref:`session_transaction_isolation` - - :param \**kw: - Additional keyword arguments are sent to :meth:`get_bind()`, - allowing additional arguments to be passed to custom - implementations of :meth:`get_bind`. - - """ - if bind is None: - bind = self.get_bind(mapper, clause=clause, **kw) - - return self._connection_for_bind(bind, - close_with_result=close_with_result, - execution_options=execution_options) - - def _connection_for_bind(self, engine, execution_options=None, **kw): - if self.transaction is not None: - return self.transaction._connection_for_bind( - engine, execution_options) - else: - conn = engine.contextual_connect(**kw) - if execution_options: - conn = conn.execution_options(**execution_options) - return conn - - def execute(self, clause, params=None, mapper=None, bind=None, **kw): - """Execute a SQL expression construct or string statement within - the current transaction. - - Returns a :class:`.ResultProxy` representing - results of the statement execution, in the same manner as that of an - :class:`.Engine` or - :class:`.Connection`. - - E.g.:: - - result = session.execute( - user_table.select().where(user_table.c.id == 5) - ) - - :meth:`~.Session.execute` accepts any executable clause construct, - such as :func:`~.sql.expression.select`, - :func:`~.sql.expression.insert`, - :func:`~.sql.expression.update`, - :func:`~.sql.expression.delete`, and - :func:`~.sql.expression.text`. Plain SQL strings can be passed - as well, which in the case of :meth:`.Session.execute` only - will be interpreted the same as if it were passed via a - :func:`~.expression.text` construct. That is, the following usage:: - - result = session.execute( - "SELECT * FROM user WHERE id=:param", - {"param":5} - ) - - is equivalent to:: - - from sqlalchemy import text - result = session.execute( - text("SELECT * FROM user WHERE id=:param"), - {"param":5} - ) - - The second positional argument to :meth:`.Session.execute` is an - optional parameter set. Similar to that of - :meth:`.Connection.execute`, whether this is passed as a single - dictionary, or a list of dictionaries, determines whether the DBAPI - cursor's ``execute()`` or ``executemany()`` is used to execute the - statement. An INSERT construct may be invoked for a single row:: - - result = session.execute( - users.insert(), {"id": 7, "name": "somename"}) - - or for multiple rows:: - - result = session.execute(users.insert(), [ - {"id": 7, "name": "somename7"}, - {"id": 8, "name": "somename8"}, - {"id": 9, "name": "somename9"} - ]) - - The statement is executed within the current transactional context of - this :class:`.Session`. The :class:`.Connection` which is used - to execute the statement can also be acquired directly by - calling the :meth:`.Session.connection` method. Both methods use - a rule-based resolution scheme in order to determine the - :class:`.Connection`, which in the average case is derived directly - from the "bind" of the :class:`.Session` itself, and in other cases - can be based on the :func:`.mapper` - and :class:`.Table` objects passed to the method; see the - documentation for :meth:`.Session.get_bind` for a full description of - this scheme. - - The :meth:`.Session.execute` method does *not* invoke autoflush. - - The :class:`.ResultProxy` returned by the :meth:`.Session.execute` - method is returned with the "close_with_result" flag set to true; - the significance of this flag is that if this :class:`.Session` is - autocommitting and does not have a transaction-dedicated - :class:`.Connection` available, a temporary :class:`.Connection` is - established for the statement execution, which is closed (meaning, - returned to the connection pool) when the :class:`.ResultProxy` has - consumed all available data. This applies *only* when the - :class:`.Session` is configured with autocommit=True and no - transaction has been started. - - :param clause: - An executable statement (i.e. an :class:`.Executable` expression - such as :func:`.expression.select`) or string SQL statement - to be executed. - - :param params: - Optional dictionary, or list of dictionaries, containing - bound parameter values. If a single dictionary, single-row - execution occurs; if a list of dictionaries, an - "executemany" will be invoked. The keys in each dictionary - must correspond to parameter names present in the statement. - - :param mapper: - Optional :func:`.mapper` or mapped class, used to identify - the appropriate bind. This argument takes precedence over - ``clause`` when locating a bind. See :meth:`.Session.get_bind` - for more details. - - :param bind: - Optional :class:`.Engine` to be used as the bind. If - this engine is already involved in an ongoing transaction, - that connection will be used. This argument takes - precedence over ``mapper`` and ``clause`` when locating - a bind. - - :param \**kw: - Additional keyword arguments are sent to :meth:`.Session.get_bind()` - to allow extensibility of "bind" schemes. - - .. seealso:: - - :ref:`sqlexpression_toplevel` - Tutorial on using Core SQL - constructs. - - :ref:`connections_toplevel` - Further information on direct - statement execution. - - :meth:`.Connection.execute` - core level statement execution - method, which is :meth:`.Session.execute` ultimately uses - in order to execute the statement. - - """ - clause = expression._literal_as_text(clause) - - if bind is None: - bind = self.get_bind(mapper, clause=clause, **kw) - - return self._connection_for_bind( - bind, close_with_result=True).execute(clause, params or {}) - - def scalar(self, clause, params=None, mapper=None, bind=None, **kw): - """Like :meth:`~.Session.execute` but return a scalar result.""" - - return self.execute( - clause, params=params, mapper=mapper, bind=bind, **kw).scalar() - - def close(self): - """Close this Session. - - This clears all items and ends any transaction in progress. - - If this session were created with ``autocommit=False``, a new - transaction is immediately begun. Note that this new transaction does - not use any connection resources until they are first needed. - - """ - self._close_impl(invalidate=False) - - def invalidate(self): - """Close this Session, using connection invalidation. - - This is a variant of :meth:`.Session.close` that will additionally - ensure that the :meth:`.Connection.invalidate` method will be called - on all :class:`.Connection` objects. This can be called when - the database is known to be in a state where the connections are - no longer safe to be used. - - E.g.:: - - try: - sess = Session() - sess.add(User()) - sess.commit() - except gevent.Timeout: - sess.invalidate() - raise - except: - sess.rollback() - raise - - This clears all items and ends any transaction in progress. - - If this session were created with ``autocommit=False``, a new - transaction is immediately begun. Note that this new transaction does - not use any connection resources until they are first needed. - - .. versionadded:: 0.9.9 - - """ - self._close_impl(invalidate=True) - - def _close_impl(self, invalidate): - self.expunge_all() - if self.transaction is not None: - for transaction in self.transaction._iterate_parents(): - transaction.close(invalidate) - - def expunge_all(self): - """Remove all object instances from this ``Session``. - - This is equivalent to calling ``expunge(obj)`` on all objects in this - ``Session``. - - """ - for state in self.identity_map.all_states() + list(self._new): - state._detach() - - self.identity_map = self._identity_cls() - self._new = {} - self._deleted = {} - - # TODO: need much more test coverage for bind_mapper() and similar ! - # TODO: + crystallize + document resolution order - # vis. bind_mapper/bind_table - - def _add_bind(self, key, bind): - try: - insp = inspect(key) - except sa_exc.NoInspectionAvailable: - if not isinstance(key, type): - raise exc.ArgumentError( - "Not acceptable bind target: %s" % - key) - else: - self.__binds[key] = bind - else: - if insp.is_selectable: - self.__binds[insp] = bind - elif insp.is_mapper: - self.__binds[insp.class_] = bind - for selectable in insp._all_tables: - self.__binds[selectable] = bind - else: - raise exc.ArgumentError( - "Not acceptable bind target: %s" % - key) - - def bind_mapper(self, mapper, bind): - """Associate a :class:`.Mapper` with a "bind", e.g. a :class:`.Engine` - or :class:`.Connection`. - - The given mapper is added to a lookup used by the - :meth:`.Session.get_bind` method. - - """ - self._add_bind(mapper, bind) - - def bind_table(self, table, bind): - """Associate a :class:`.Table` with a "bind", e.g. a :class:`.Engine` - or :class:`.Connection`. - - The given mapper is added to a lookup used by the - :meth:`.Session.get_bind` method. - - """ - self._add_bind(table, bind) - - def get_bind(self, mapper=None, clause=None): - """Return a "bind" to which this :class:`.Session` is bound. - - The "bind" is usually an instance of :class:`.Engine`, - except in the case where the :class:`.Session` has been - explicitly bound directly to a :class:`.Connection`. - - For a multiply-bound or unbound :class:`.Session`, the - ``mapper`` or ``clause`` arguments are used to determine the - appropriate bind to return. - - Note that the "mapper" argument is usually present - when :meth:`.Session.get_bind` is called via an ORM - operation such as a :meth:`.Session.query`, each - individual INSERT/UPDATE/DELETE operation within a - :meth:`.Session.flush`, call, etc. - - The order of resolution is: - - 1. if mapper given and session.binds is present, - locate a bind based on mapper. - 2. if clause given and session.binds is present, - locate a bind based on :class:`.Table` objects - found in the given clause present in session.binds. - 3. if session.bind is present, return that. - 4. if clause given, attempt to return a bind - linked to the :class:`.MetaData` ultimately - associated with the clause. - 5. if mapper given, attempt to return a bind - linked to the :class:`.MetaData` ultimately - associated with the :class:`.Table` or other - selectable to which the mapper is mapped. - 6. No bind can be found, :exc:`~sqlalchemy.exc.UnboundExecutionError` - is raised. - - :param mapper: - Optional :func:`.mapper` mapped class or instance of - :class:`.Mapper`. The bind can be derived from a :class:`.Mapper` - first by consulting the "binds" map associated with this - :class:`.Session`, and secondly by consulting the :class:`.MetaData` - associated with the :class:`.Table` to which the :class:`.Mapper` - is mapped for a bind. - - :param clause: - A :class:`.ClauseElement` (i.e. :func:`~.sql.expression.select`, - :func:`~.sql.expression.text`, - etc.). If the ``mapper`` argument is not present or could not - produce a bind, the given expression construct will be searched - for a bound element, typically a :class:`.Table` associated with - bound :class:`.MetaData`. - - """ - - if mapper is clause is None: - if self.bind: - return self.bind - else: - raise sa_exc.UnboundExecutionError( - "This session is not bound to a single Engine or " - "Connection, and no context was provided to locate " - "a binding.") - - if mapper is not None: - try: - mapper = inspect(mapper) - except sa_exc.NoInspectionAvailable: - if isinstance(mapper, type): - raise exc.UnmappedClassError(mapper) - else: - raise - - if self.__binds: - if mapper: - for cls in mapper.class_.__mro__: - if cls in self.__binds: - return self.__binds[cls] - if clause is None: - clause = mapper.mapped_table - - if clause is not None: - for t in sql_util.find_tables(clause, include_crud=True): - if t in self.__binds: - return self.__binds[t] - - if self.bind: - return self.bind - - if isinstance(clause, sql.expression.ClauseElement) and clause.bind: - return clause.bind - - if mapper and mapper.mapped_table.bind: - return mapper.mapped_table.bind - - context = [] - if mapper is not None: - context.append('mapper %s' % mapper) - if clause is not None: - context.append('SQL expression') - - raise sa_exc.UnboundExecutionError( - "Could not locate a bind configured on %s or this Session" % ( - ', '.join(context))) - - def query(self, *entities, **kwargs): - """Return a new :class:`.Query` object corresponding to this - :class:`.Session`.""" - - return self._query_cls(entities, self, **kwargs) - - @property - @util.contextmanager - def no_autoflush(self): - """Return a context manager that disables autoflush. - - e.g.:: - - with session.no_autoflush: - - some_object = SomeClass() - session.add(some_object) - # won't autoflush - some_object.related_thing = session.query(SomeRelated).first() - - Operations that proceed within the ``with:`` block - will not be subject to flushes occurring upon query - access. This is useful when initializing a series - of objects which involve existing database queries, - where the uncompleted object should not yet be flushed. - - .. versionadded:: 0.7.6 - - """ - autoflush = self.autoflush - self.autoflush = False - yield self - self.autoflush = autoflush - - def _autoflush(self): - if self.autoflush and not self._flushing: - try: - self.flush() - except sa_exc.StatementError as e: - # note we are reraising StatementError as opposed to - # raising FlushError with "chaining" to remain compatible - # with code that catches StatementError, IntegrityError, - # etc. - e.add_detail( - "raised as a result of Query-invoked autoflush; " - "consider using a session.no_autoflush block if this " - "flush is occurring prematurely") - util.raise_from_cause(e) - - def refresh(self, instance, attribute_names=None, lockmode=None): - """Expire and refresh the attributes on the given instance. - - A query will be issued to the database and all attributes will be - refreshed with their current database value. - - Lazy-loaded relational attributes will remain lazily loaded, so that - the instance-wide refresh operation will be followed immediately by - the lazy load of that attribute. - - Eagerly-loaded relational attributes will eagerly load within the - single refresh operation. - - Note that a highly isolated transaction will return the same values as - were previously read in that same transaction, regardless of changes - in database state outside of that transaction - usage of - :meth:`~Session.refresh` usually only makes sense if non-ORM SQL - statement were emitted in the ongoing transaction, or if autocommit - mode is turned on. - - :param attribute_names: optional. An iterable collection of - string attribute names indicating a subset of attributes to - be refreshed. - - :param lockmode: Passed to the :class:`~sqlalchemy.orm.query.Query` - as used by :meth:`~sqlalchemy.orm.query.Query.with_lockmode`. - - .. seealso:: - - :ref:`session_expire` - introductory material - - :meth:`.Session.expire` - - :meth:`.Session.expire_all` - - """ - try: - state = attributes.instance_state(instance) - except exc.NO_STATE: - raise exc.UnmappedInstanceError(instance) - - self._expire_state(state, attribute_names) - - if loading.load_on_ident( - self.query(object_mapper(instance)), - state.key, refresh_state=state, - lockmode=lockmode, - only_load_props=attribute_names) is None: - raise sa_exc.InvalidRequestError( - "Could not refresh instance '%s'" % - instance_str(instance)) - - def expire_all(self): - """Expires all persistent instances within this Session. - - When any attributes on a persistent instance is next accessed, - a query will be issued using the - :class:`.Session` object's current transactional context in order to - load all expired attributes for the given instance. Note that - a highly isolated transaction will return the same values as were - previously read in that same transaction, regardless of changes - in database state outside of that transaction. - - To expire individual objects and individual attributes - on those objects, use :meth:`Session.expire`. - - The :class:`.Session` object's default behavior is to - expire all state whenever the :meth:`Session.rollback` - or :meth:`Session.commit` methods are called, so that new - state can be loaded for the new transaction. For this reason, - calling :meth:`Session.expire_all` should not be needed when - autocommit is ``False``, assuming the transaction is isolated. - - .. seealso:: - - :ref:`session_expire` - introductory material - - :meth:`.Session.expire` - - :meth:`.Session.refresh` - - """ - for state in self.identity_map.all_states(): - state._expire(state.dict, self.identity_map._modified) - - def expire(self, instance, attribute_names=None): - """Expire the attributes on an instance. - - Marks the attributes of an instance as out of date. When an expired - attribute is next accessed, a query will be issued to the - :class:`.Session` object's current transactional context in order to - load all expired attributes for the given instance. Note that - a highly isolated transaction will return the same values as were - previously read in that same transaction, regardless of changes - in database state outside of that transaction. - - To expire all objects in the :class:`.Session` simultaneously, - use :meth:`Session.expire_all`. - - The :class:`.Session` object's default behavior is to - expire all state whenever the :meth:`Session.rollback` - or :meth:`Session.commit` methods are called, so that new - state can be loaded for the new transaction. For this reason, - calling :meth:`Session.expire` only makes sense for the specific - case that a non-ORM SQL statement was emitted in the current - transaction. - - :param instance: The instance to be refreshed. - :param attribute_names: optional list of string attribute names - indicating a subset of attributes to be expired. - - .. seealso:: - - :ref:`session_expire` - introductory material - - :meth:`.Session.expire` - - :meth:`.Session.refresh` - - """ - try: - state = attributes.instance_state(instance) - except exc.NO_STATE: - raise exc.UnmappedInstanceError(instance) - self._expire_state(state, attribute_names) - - def _expire_state(self, state, attribute_names): - self._validate_persistent(state) - if attribute_names: - state._expire_attributes(state.dict, attribute_names) - else: - # pre-fetch the full cascade since the expire is going to - # remove associations - cascaded = list(state.manager.mapper.cascade_iterator( - 'refresh-expire', state)) - self._conditional_expire(state) - for o, m, st_, dct_ in cascaded: - self._conditional_expire(st_) - - def _conditional_expire(self, state): - """Expire a state if persistent, else expunge if pending""" - - if state.key: - state._expire(state.dict, self.identity_map._modified) - elif state in self._new: - self._new.pop(state) - state._detach() - - @util.deprecated("0.7", "The non-weak-referencing identity map " - "feature is no longer needed.") - def prune(self): - """Remove unreferenced instances cached in the identity map. - - Note that this method is only meaningful if "weak_identity_map" is set - to False. The default weak identity map is self-pruning. - - Removes any object in this Session's identity map that is not - referenced in user code, modified, new or scheduled for deletion. - Returns the number of objects pruned. - - """ - return self.identity_map.prune() - - def expunge(self, instance): - """Remove the `instance` from this ``Session``. - - This will free all internal references to the instance. Cascading - will be applied according to the *expunge* cascade rule. - - """ - try: - state = attributes.instance_state(instance) - except exc.NO_STATE: - raise exc.UnmappedInstanceError(instance) - if state.session_id is not self.hash_key: - raise sa_exc.InvalidRequestError( - "Instance %s is not present in this Session" % - state_str(state)) - - cascaded = list(state.manager.mapper.cascade_iterator( - 'expunge', state)) - self._expunge_state(state) - for o, m, st_, dct_ in cascaded: - self._expunge_state(st_) - - def _expunge_state(self, state): - if state in self._new: - self._new.pop(state) - state._detach() - elif self.identity_map.contains_state(state): - self.identity_map.safe_discard(state) - self._deleted.pop(state, None) - state._detach() - elif self.transaction: - self.transaction._deleted.pop(state, None) - state._detach() - - def _register_newly_persistent(self, states): - for state in states: - mapper = _state_mapper(state) - - # prevent against last minute dereferences of the object - obj = state.obj() - if obj is not None: - - instance_key = mapper._identity_key_from_state(state) - - if _none_set.intersection(instance_key[1]) and \ - not mapper.allow_partial_pks or \ - _none_set.issuperset(instance_key[1]): - raise exc.FlushError( - "Instance %s has a NULL identity key. If this is an " - "auto-generated value, check that the database table " - "allows generation of new primary key values, and " - "that the mapped Column object is configured to " - "expect these generated values. Ensure also that " - "this flush() is not occurring at an inappropriate " - "time, such aswithin a load() event." - % state_str(state) - ) - - if state.key is None: - state.key = instance_key - elif state.key != instance_key: - # primary key switch. use safe_discard() in case another - # state has already replaced this one in the identity - # map (see test/orm/test_naturalpks.py ReversePKsTest) - self.identity_map.safe_discard(state) - if state in self.transaction._key_switches: - orig_key = self.transaction._key_switches[state][0] - else: - orig_key = state.key - self.transaction._key_switches[state] = ( - orig_key, instance_key) - state.key = instance_key - - self.identity_map.replace(state) - - statelib.InstanceState._commit_all_states( - ((state, state.dict) for state in states), - self.identity_map - ) - - self._register_altered(states) - # remove from new last, might be the last strong ref - for state in set(states).intersection(self._new): - self._new.pop(state) - - def _register_altered(self, states): - if self._enable_transaction_accounting and self.transaction: - for state in states: - if state in self._new: - self.transaction._new[state] = True - else: - self.transaction._dirty[state] = True - - def _remove_newly_deleted(self, states): - for state in states: - if self._enable_transaction_accounting and self.transaction: - self.transaction._deleted[state] = True - - self.identity_map.safe_discard(state) - self._deleted.pop(state, None) - state.deleted = True - - def add(self, instance, _warn=True): - """Place an object in the ``Session``. - - Its state will be persisted to the database on the next flush - operation. - - Repeated calls to ``add()`` will be ignored. The opposite of ``add()`` - is ``expunge()``. - - """ - if _warn and self._warn_on_events: - self._flush_warning("Session.add()") - - try: - state = attributes.instance_state(instance) - except exc.NO_STATE: - raise exc.UnmappedInstanceError(instance) - - self._save_or_update_state(state) - - def add_all(self, instances): - """Add the given collection of instances to this ``Session``.""" - - if self._warn_on_events: - self._flush_warning("Session.add_all()") - - for instance in instances: - self.add(instance, _warn=False) - - def _save_or_update_state(self, state): - self._save_or_update_impl(state) - - mapper = _state_mapper(state) - for o, m, st_, dct_ in mapper.cascade_iterator( - 'save-update', - state, - halt_on=self._contains_state): - self._save_or_update_impl(st_) - - def delete(self, instance): - """Mark an instance as deleted. - - The database delete operation occurs upon ``flush()``. - - """ - if self._warn_on_events: - self._flush_warning("Session.delete()") - - try: - state = attributes.instance_state(instance) - except exc.NO_STATE: - raise exc.UnmappedInstanceError(instance) - - if state.key is None: - raise sa_exc.InvalidRequestError( - "Instance '%s' is not persisted" % - state_str(state)) - - if state in self._deleted: - return - - # ensure object is attached to allow the - # cascade operation to load deferred attributes - # and collections - self._attach(state, include_before=True) - - # grab the cascades before adding the item to the deleted list - # so that autoflush does not delete the item - # the strong reference to the instance itself is significant here - cascade_states = list(state.manager.mapper.cascade_iterator( - 'delete', state)) - - self._deleted[state] = state.obj() - self.identity_map.add(state) - - for o, m, st_, dct_ in cascade_states: - self._delete_impl(st_) - - def merge(self, instance, load=True): - """Copy the state of a given instance into a corresponding instance - within this :class:`.Session`. - - :meth:`.Session.merge` examines the primary key attributes of the - source instance, and attempts to reconcile it with an instance of the - same primary key in the session. If not found locally, it attempts - to load the object from the database based on primary key, and if - none can be located, creates a new instance. The state of each - attribute on the source instance is then copied to the target - instance. The resulting target instance is then returned by the - method; the original source instance is left unmodified, and - un-associated with the :class:`.Session` if not already. - - This operation cascades to associated instances if the association is - mapped with ``cascade="merge"``. - - See :ref:`unitofwork_merging` for a detailed discussion of merging. - - :param instance: Instance to be merged. - :param load: Boolean, when False, :meth:`.merge` switches into - a "high performance" mode which causes it to forego emitting history - events as well as all database access. This flag is used for - cases such as transferring graphs of objects into a :class:`.Session` - from a second level cache, or to transfer just-loaded objects - into the :class:`.Session` owned by a worker thread or process - without re-querying the database. - - The ``load=False`` use case adds the caveat that the given - object has to be in a "clean" state, that is, has no pending changes - to be flushed - even if the incoming object is detached from any - :class:`.Session`. This is so that when - the merge operation populates local attributes and - cascades to related objects and - collections, the values can be "stamped" onto the - target object as is, without generating any history or attribute - events, and without the need to reconcile the incoming data with - any existing related objects or collections that might not - be loaded. The resulting objects from ``load=False`` are always - produced as "clean", so it is only appropriate that the given objects - should be "clean" as well, else this suggests a mis-use of the - method. - - """ - - if self._warn_on_events: - self._flush_warning("Session.merge()") - - _recursive = {} - - if load: - # flush current contents if we expect to load data - self._autoflush() - - object_mapper(instance) # verify mapped - autoflush = self.autoflush - try: - self.autoflush = False - return self._merge( - attributes.instance_state(instance), - attributes.instance_dict(instance), - load=load, _recursive=_recursive) - finally: - self.autoflush = autoflush - - def _merge(self, state, state_dict, load=True, _recursive=None): - mapper = _state_mapper(state) - if state in _recursive: - return _recursive[state] - - new_instance = False - key = state.key - - if key is None: - if not load: - raise sa_exc.InvalidRequestError( - "merge() with load=False option does not support " - "objects transient (i.e. unpersisted) objects. flush() " - "all changes on mapped instances before merging with " - "load=False.") - key = mapper._identity_key_from_state(state) - - if key in self.identity_map: - merged = self.identity_map[key] - - elif not load: - if state.modified: - raise sa_exc.InvalidRequestError( - "merge() with load=False option does not support " - "objects marked as 'dirty'. flush() all changes on " - "mapped instances before merging with load=False.") - merged = mapper.class_manager.new_instance() - merged_state = attributes.instance_state(merged) - merged_state.key = key - self._update_impl(merged_state) - new_instance = True - - elif not _none_set.intersection(key[1]) or \ - (mapper.allow_partial_pks and - not _none_set.issuperset(key[1])): - merged = self.query(mapper.class_).get(key[1]) - else: - merged = None - - if merged is None: - merged = mapper.class_manager.new_instance() - merged_state = attributes.instance_state(merged) - merged_dict = attributes.instance_dict(merged) - new_instance = True - self._save_or_update_state(merged_state) - else: - merged_state = attributes.instance_state(merged) - merged_dict = attributes.instance_dict(merged) - - _recursive[state] = merged - - # check that we didn't just pull the exact same - # state out. - if state is not merged_state: - # version check if applicable - if mapper.version_id_col is not None: - existing_version = mapper._get_state_attr_by_column( - state, - state_dict, - mapper.version_id_col, - passive=attributes.PASSIVE_NO_INITIALIZE) - - merged_version = mapper._get_state_attr_by_column( - merged_state, - merged_dict, - mapper.version_id_col, - passive=attributes.PASSIVE_NO_INITIALIZE) - - if existing_version is not attributes.PASSIVE_NO_RESULT and \ - merged_version is not attributes.PASSIVE_NO_RESULT and \ - existing_version != merged_version: - raise exc.StaleDataError( - "Version id '%s' on merged state %s " - "does not match existing version '%s'. " - "Leave the version attribute unset when " - "merging to update the most recent version." - % ( - existing_version, - state_str(merged_state), - merged_version - )) - - merged_state.load_path = state.load_path - merged_state.load_options = state.load_options - - for prop in mapper.iterate_properties: - prop.merge(self, state, state_dict, - merged_state, merged_dict, - load, _recursive) - - if not load: - # remove any history - merged_state._commit_all(merged_dict, self.identity_map) - - if new_instance: - merged_state.manager.dispatch.load(merged_state, None) - return merged - - def _validate_persistent(self, state): - if not self.identity_map.contains_state(state): - raise sa_exc.InvalidRequestError( - "Instance '%s' is not persistent within this Session" % - state_str(state)) - - def _save_impl(self, state): - if state.key is not None: - raise sa_exc.InvalidRequestError( - "Object '%s' already has an identity - " - "it can't be registered as pending" % state_str(state)) - - self._before_attach(state) - if state not in self._new: - self._new[state] = state.obj() - state.insert_order = len(self._new) - self._attach(state) - - def _update_impl(self, state, discard_existing=False): - if (self.identity_map.contains_state(state) and - state not in self._deleted): - return - - if state.key is None: - raise sa_exc.InvalidRequestError( - "Instance '%s' is not persisted" % - state_str(state)) - - if state.deleted: - raise sa_exc.InvalidRequestError( - "Instance '%s' has been deleted. Use the make_transient() " - "function to send this object back to the transient state." % - state_str(state) - ) - self._before_attach(state, check_identity_map=False) - self._deleted.pop(state, None) - if discard_existing: - self.identity_map.replace(state) - else: - self.identity_map.add(state) - self._attach(state) - - def _save_or_update_impl(self, state): - if state.key is None: - self._save_impl(state) - else: - self._update_impl(state) - - def _delete_impl(self, state): - if state in self._deleted: - return - - if state.key is None: - return - - self._attach(state, include_before=True) - self._deleted[state] = state.obj() - self.identity_map.add(state) - - def enable_relationship_loading(self, obj): - """Associate an object with this :class:`.Session` for related - object loading. - - .. warning:: - - :meth:`.enable_relationship_loading` exists to serve special - use cases and is not recommended for general use. - - Accesses of attributes mapped with :func:`.relationship` - will attempt to load a value from the database using this - :class:`.Session` as the source of connectivity. The values - will be loaded based on foreign key values present on this - object - it follows that this functionality - generally only works for many-to-one-relationships. - - The object will be attached to this session, but will - **not** participate in any persistence operations; its state - for almost all purposes will remain either "transient" or - "detached", except for the case of relationship loading. - - Also note that backrefs will often not work as expected. - Altering a relationship-bound attribute on the target object - may not fire off a backref event, if the effective value - is what was already loaded from a foreign-key-holding value. - - The :meth:`.Session.enable_relationship_loading` method is - similar to the ``load_on_pending`` flag on :func:`.relationship`. - Unlike that flag, :meth:`.Session.enable_relationship_loading` allows - an object to remain transient while still being able to load - related items. - - To make a transient object associated with a :class:`.Session` - via :meth:`.Session.enable_relationship_loading` pending, add - it to the :class:`.Session` using :meth:`.Session.add` normally. - - :meth:`.Session.enable_relationship_loading` does not improve - behavior when the ORM is used normally - object references should be - constructed at the object level, not at the foreign key level, so - that they are present in an ordinary way before flush() - proceeds. This method is not intended for general use. - - .. versionadded:: 0.8 - - .. seealso:: - - ``load_on_pending`` at :func:`.relationship` - this flag - allows per-relationship loading of many-to-ones on items that - are pending. - - """ - state = attributes.instance_state(obj) - self._attach(state, include_before=True) - state._load_pending = True - - def _before_attach(self, state, check_identity_map=True): - if state.session_id != self.hash_key and \ - self.dispatch.before_attach: - self.dispatch.before_attach(self, state.obj()) - - if check_identity_map and state.key and \ - state.key in self.identity_map and \ - not self.identity_map.contains_state(state): - raise sa_exc.InvalidRequestError( - "Can't attach instance " - "%s; another instance with key %s is already " - "present in this session." % (state_str(state), state.key)) - - if state.session_id and \ - state.session_id is not self.hash_key and \ - state.session_id in _sessions: - raise sa_exc.InvalidRequestError( - "Object '%s' is already attached to session '%s' " - "(this is '%s')" % (state_str(state), - state.session_id, self.hash_key)) - - def _attach(self, state, include_before=False): - - if state.session_id != self.hash_key: - if include_before: - self._before_attach(state) - state.session_id = self.hash_key - if state.modified and state._strong_obj is None: - state._strong_obj = state.obj() - if self.dispatch.after_attach: - self.dispatch.after_attach(self, state.obj()) - - def __contains__(self, instance): - """Return True if the instance is associated with this session. - - The instance may be pending or persistent within the Session for a - result of True. - - """ - try: - state = attributes.instance_state(instance) - except exc.NO_STATE: - raise exc.UnmappedInstanceError(instance) - return self._contains_state(state) - - def __iter__(self): - """Iterate over all pending or persistent instances within this - Session. - - """ - return iter( - list(self._new.values()) + list(self.identity_map.values())) - - def _contains_state(self, state): - return state in self._new or self.identity_map.contains_state(state) - - def flush(self, objects=None): - """Flush all the object changes to the database. - - Writes out all pending object creations, deletions and modifications - to the database as INSERTs, DELETEs, UPDATEs, etc. Operations are - automatically ordered by the Session's unit of work dependency - solver. - - Database operations will be issued in the current transactional - context and do not affect the state of the transaction, unless an - error occurs, in which case the entire transaction is rolled back. - You may flush() as often as you like within a transaction to move - changes from Python to the database's transaction buffer. - - For ``autocommit`` Sessions with no active manual transaction, flush() - will create a transaction on the fly that surrounds the entire set of - operations into the flush. - - :param objects: Optional; restricts the flush operation to operate - only on elements that are in the given collection. - - This feature is for an extremely narrow set of use cases where - particular objects may need to be operated upon before the - full flush() occurs. It is not intended for general use. - - """ - - if self._flushing: - raise sa_exc.InvalidRequestError("Session is already flushing") - - if self._is_clean(): - return - try: - self._flushing = True - self._flush(objects) - finally: - self._flushing = False - - def _flush_warning(self, method): - util.warn( - "Usage of the '%s' operation is not currently supported " - "within the execution stage of the flush process. " - "Results may not be consistent. Consider using alternative " - "event listeners or connection-level operations instead." - % method) - - def _is_clean(self): - return not self.identity_map.check_modified() and \ - not self._deleted and \ - not self._new - - def _flush(self, objects=None): - - dirty = self._dirty_states - if not dirty and not self._deleted and not self._new: - self.identity_map._modified.clear() - return - - flush_context = UOWTransaction(self) - - if self.dispatch.before_flush: - self.dispatch.before_flush(self, flush_context, objects) - # re-establish "dirty states" in case the listeners - # added - dirty = self._dirty_states - - deleted = set(self._deleted) - new = set(self._new) - - dirty = set(dirty).difference(deleted) - - # create the set of all objects we want to operate upon - if objects: - # specific list passed in - objset = set() - for o in objects: - try: - state = attributes.instance_state(o) - except exc.NO_STATE: - raise exc.UnmappedInstanceError(o) - objset.add(state) - else: - objset = None - - # store objects whose fate has been decided - processed = set() - - # put all saves/updates into the flush context. detect top-level - # orphans and throw them into deleted. - if objset: - proc = new.union(dirty).intersection(objset).difference(deleted) - else: - proc = new.union(dirty).difference(deleted) - - for state in proc: - is_orphan = ( - _state_mapper(state)._is_orphan(state) and state.has_identity) - flush_context.register_object(state, isdelete=is_orphan) - processed.add(state) - - # put all remaining deletes into the flush context. - if objset: - proc = deleted.intersection(objset).difference(processed) - else: - proc = deleted.difference(processed) - for state in proc: - flush_context.register_object(state, isdelete=True) - - if not flush_context.has_work: - return - - flush_context.transaction = transaction = self.begin( - subtransactions=True) - try: - self._warn_on_events = True - try: - flush_context.execute() - finally: - self._warn_on_events = False - - self.dispatch.after_flush(self, flush_context) - - flush_context.finalize_flush_changes() - - if not objects and self.identity_map._modified: - len_ = len(self.identity_map._modified) - - statelib.InstanceState._commit_all_states( - [(state, state.dict) for state in - self.identity_map._modified], - instance_dict=self.identity_map) - util.warn("Attribute history events accumulated on %d " - "previously clean instances " - "within inner-flush event handlers have been " - "reset, and will not result in database updates. " - "Consider using set_committed_value() within " - "inner-flush event handlers to avoid this warning." - % len_) - - # useful assertions: - # if not objects: - # assert not self.identity_map._modified - # else: - # assert self.identity_map._modified == \ - # self.identity_map._modified.difference(objects) - - self.dispatch.after_flush_postexec(self, flush_context) - - transaction.commit() - - except: - with util.safe_reraise(): - transaction.rollback(_capture_exception=True) - - def bulk_save_objects( - self, objects, return_defaults=False, update_changed_only=True): - """Perform a bulk save of the given list of objects. - - The bulk save feature allows mapped objects to be used as the - source of simple INSERT and UPDATE operations which can be more easily - grouped together into higher performing "executemany" - operations; the extraction of data from the objects is also performed - using a lower-latency process that ignores whether or not attributes - have actually been modified in the case of UPDATEs, and also ignores - SQL expressions. - - The objects as given are not added to the session and no additional - state is established on them, unless the ``return_defaults`` flag - is also set, in which case primary key attributes and server-side - default values will be populated. - - .. versionadded:: 1.0.0 - - .. warning:: - - The bulk save feature allows for a lower-latency INSERT/UPDATE - of rows at the expense of most other unit-of-work features. - Features such as object management, relationship handling, - and SQL clause support are **silently omitted** in favor of raw - INSERT/UPDATES of records. - - **Please read the list of caveats at** :ref:`bulk_operations` - **before using this method, and fully test and confirm the - functionality of all code developed using these systems.** - - :param objects: a list of mapped object instances. The mapped - objects are persisted as is, and are **not** associated with the - :class:`.Session` afterwards. - - For each object, whether the object is sent as an INSERT or an - UPDATE is dependent on the same rules used by the :class:`.Session` - in traditional operation; if the object has the - :attr:`.InstanceState.key` - attribute set, then the object is assumed to be "detached" and - will result in an UPDATE. Otherwise, an INSERT is used. - - In the case of an UPDATE, statements are grouped based on which - attributes have changed, and are thus to be the subject of each - SET clause. If ``update_changed_only`` is False, then all - attributes present within each object are applied to the UPDATE - statement, which may help in allowing the statements to be grouped - together into a larger executemany(), and will also reduce the - overhead of checking history on attributes. - - :param return_defaults: when True, rows that are missing values which - generate defaults, namely integer primary key defaults and sequences, - will be inserted **one at a time**, so that the primary key value - is available. In particular this will allow joined-inheritance - and other multi-table mappings to insert correctly without the need - to provide primary key values ahead of time; however, - :paramref:`.Session.bulk_save_objects.return_defaults` **greatly - reduces the performance gains** of the method overall. - - :param update_changed_only: when True, UPDATE statements are rendered - based on those attributes in each state that have logged changes. - When False, all attributes present are rendered into the SET clause - with the exception of primary key attributes. - - .. seealso:: - - :ref:`bulk_operations` - - :meth:`.Session.bulk_insert_mappings` - - :meth:`.Session.bulk_update_mappings` - - """ - for (mapper, isupdate), states in itertools.groupby( - (attributes.instance_state(obj) for obj in objects), - lambda state: (state.mapper, state.key is not None) - ): - self._bulk_save_mappings( - mapper, states, isupdate, True, - return_defaults, update_changed_only) - - def bulk_insert_mappings(self, mapper, mappings, return_defaults=False): - """Perform a bulk insert of the given list of mapping dictionaries. - - The bulk insert feature allows plain Python dictionaries to be used as - the source of simple INSERT operations which can be more easily - grouped together into higher performing "executemany" - operations. Using dictionaries, there is no "history" or session - state management features in use, reducing latency when inserting - large numbers of simple rows. - - The values within the dictionaries as given are typically passed - without modification into Core :meth:`.Insert` constructs, after - organizing the values within them across the tables to which - the given mapper is mapped. - - .. versionadded:: 1.0.0 - - .. warning:: - - The bulk insert feature allows for a lower-latency INSERT - of rows at the expense of most other unit-of-work features. - Features such as object management, relationship handling, - and SQL clause support are **silently omitted** in favor of raw - INSERT of records. - - **Please read the list of caveats at** :ref:`bulk_operations` - **before using this method, and fully test and confirm the - functionality of all code developed using these systems.** - - :param mapper: a mapped class, or the actual :class:`.Mapper` object, - representing the single kind of object represented within the mapping - list. - - :param mappings: a list of dictionaries, each one containing the state - of the mapped row to be inserted, in terms of the attribute names - on the mapped class. If the mapping refers to multiple tables, - such as a joined-inheritance mapping, each dictionary must contain - all keys to be populated into all tables. - - :param return_defaults: when True, rows that are missing values which - generate defaults, namely integer primary key defaults and sequences, - will be inserted **one at a time**, so that the primary key value - is available. In particular this will allow joined-inheritance - and other multi-table mappings to insert correctly without the need - to provide primary - key values ahead of time; however, - :paramref:`.Session.bulk_insert_mappings.return_defaults` - **greatly reduces the performance gains** of the method overall. - If the rows - to be inserted only refer to a single table, then there is no - reason this flag should be set as the returned default information - is not used. - - - .. seealso:: - - :ref:`bulk_operations` - - :meth:`.Session.bulk_save_objects` - - :meth:`.Session.bulk_update_mappings` - - """ - self._bulk_save_mappings( - mapper, mappings, False, False, return_defaults, False) - - def bulk_update_mappings(self, mapper, mappings): - """Perform a bulk update of the given list of mapping dictionaries. - - The bulk update feature allows plain Python dictionaries to be used as - the source of simple UPDATE operations which can be more easily - grouped together into higher performing "executemany" - operations. Using dictionaries, there is no "history" or session - state management features in use, reducing latency when updating - large numbers of simple rows. - - .. versionadded:: 1.0.0 - - .. warning:: - - The bulk update feature allows for a lower-latency UPDATE - of rows at the expense of most other unit-of-work features. - Features such as object management, relationship handling, - and SQL clause support are **silently omitted** in favor of raw - UPDATES of records. - - **Please read the list of caveats at** :ref:`bulk_operations` - **before using this method, and fully test and confirm the - functionality of all code developed using these systems.** - - :param mapper: a mapped class, or the actual :class:`.Mapper` object, - representing the single kind of object represented within the mapping - list. - - :param mappings: a list of dictionaries, each one containing the state - of the mapped row to be updated, in terms of the attribute names - on the mapped class. If the mapping refers to multiple tables, - such as a joined-inheritance mapping, each dictionary may contain - keys corresponding to all tables. All those keys which are present - and are not part of the primary key are applied to the SET clause - of the UPDATE statement; the primary key values, which are required, - are applied to the WHERE clause. - - - .. seealso:: - - :ref:`bulk_operations` - - :meth:`.Session.bulk_insert_mappings` - - :meth:`.Session.bulk_save_objects` - - """ - self._bulk_save_mappings(mapper, mappings, True, False, False, False) - - def _bulk_save_mappings( - self, mapper, mappings, isupdate, isstates, - return_defaults, update_changed_only): - mapper = _class_to_mapper(mapper) - self._flushing = True - - transaction = self.begin( - subtransactions=True) - try: - if isupdate: - persistence._bulk_update( - mapper, mappings, transaction, - isstates, update_changed_only) - else: - persistence._bulk_insert( - mapper, mappings, transaction, isstates, return_defaults) - transaction.commit() - - except: - with util.safe_reraise(): - transaction.rollback(_capture_exception=True) - finally: - self._flushing = False - - def is_modified(self, instance, include_collections=True, - passive=True): - """Return ``True`` if the given instance has locally - modified attributes. - - This method retrieves the history for each instrumented - attribute on the instance and performs a comparison of the current - value to its previously committed value, if any. - - It is in effect a more expensive and accurate - version of checking for the given instance in the - :attr:`.Session.dirty` collection; a full test for - each attribute's net "dirty" status is performed. - - E.g.:: - - return session.is_modified(someobject) - - .. versionchanged:: 0.8 - When using SQLAlchemy 0.7 and earlier, the ``passive`` - flag should **always** be explicitly set to ``True``, - else SQL loads/autoflushes may proceed which can affect - the modified state itself: - ``session.is_modified(someobject, passive=True)``\ . - In 0.8 and above, the behavior is corrected and - this flag is ignored. - - A few caveats to this method apply: - - * Instances present in the :attr:`.Session.dirty` collection may - report ``False`` when tested with this method. This is because - the object may have received change events via attribute mutation, - thus placing it in :attr:`.Session.dirty`, but ultimately the state - is the same as that loaded from the database, resulting in no net - change here. - * Scalar attributes may not have recorded the previously set - value when a new value was applied, if the attribute was not loaded, - or was expired, at the time the new value was received - in these - cases, the attribute is assumed to have a change, even if there is - ultimately no net change against its database value. SQLAlchemy in - most cases does not need the "old" value when a set event occurs, so - it skips the expense of a SQL call if the old value isn't present, - based on the assumption that an UPDATE of the scalar value is - usually needed, and in those few cases where it isn't, is less - expensive on average than issuing a defensive SELECT. - - The "old" value is fetched unconditionally upon set only if the - attribute container has the ``active_history`` flag set to ``True``. - This flag is set typically for primary key attributes and scalar - object references that are not a simple many-to-one. To set this - flag for any arbitrary mapped column, use the ``active_history`` - argument with :func:`.column_property`. - - :param instance: mapped instance to be tested for pending changes. - :param include_collections: Indicates if multivalued collections - should be included in the operation. Setting this to ``False`` is a - way to detect only local-column based properties (i.e. scalar columns - or many-to-one foreign keys) that would result in an UPDATE for this - instance upon flush. - :param passive: - .. versionchanged:: 0.8 - Ignored for backwards compatibility. - When using SQLAlchemy 0.7 and earlier, this flag should always - be set to ``True``. - - """ - state = object_state(instance) - - if not state.modified: - return False - - dict_ = state.dict - - for attr in state.manager.attributes: - if \ - ( - not include_collections and - hasattr(attr.impl, 'get_collection') - ) or not hasattr(attr.impl, 'get_history'): - continue - - (added, unchanged, deleted) = \ - attr.impl.get_history(state, dict_, - passive=attributes.NO_CHANGE) - - if added or deleted: - return True - else: - return False - - @property - def is_active(self): - """True if this :class:`.Session` is in "transaction mode" and - is not in "partial rollback" state. - - The :class:`.Session` in its default mode of ``autocommit=False`` - is essentially always in "transaction mode", in that a - :class:`.SessionTransaction` is associated with it as soon as - it is instantiated. This :class:`.SessionTransaction` is immediately - replaced with a new one as soon as it is ended, due to a rollback, - commit, or close operation. - - "Transaction mode" does *not* indicate whether - or not actual database connection resources are in use; the - :class:`.SessionTransaction` object coordinates among zero or more - actual database transactions, and starts out with none, accumulating - individual DBAPI connections as different data sources are used - within its scope. The best way to track when a particular - :class:`.Session` has actually begun to use DBAPI resources is to - implement a listener using the :meth:`.SessionEvents.after_begin` - method, which will deliver both the :class:`.Session` as well as the - target :class:`.Connection` to a user-defined event listener. - - The "partial rollback" state refers to when an "inner" transaction, - typically used during a flush, encounters an error and emits a - rollback of the DBAPI connection. At this point, the - :class:`.Session` is in "partial rollback" and awaits for the user to - call :meth:`.Session.rollback`, in order to close out the - transaction stack. It is in this "partial rollback" period that the - :attr:`.is_active` flag returns False. After the call to - :meth:`.Session.rollback`, the :class:`.SessionTransaction` is - replaced with a new one and :attr:`.is_active` returns ``True`` again. - - When a :class:`.Session` is used in ``autocommit=True`` mode, the - :class:`.SessionTransaction` is only instantiated within the scope - of a flush call, or when :meth:`.Session.begin` is called. So - :attr:`.is_active` will always be ``False`` outside of a flush or - :meth:`.Session.begin` block in this mode, and will be ``True`` - within the :meth:`.Session.begin` block as long as it doesn't enter - "partial rollback" state. - - From all the above, it follows that the only purpose to this flag is - for application frameworks that wish to detect is a "rollback" is - necessary within a generic error handling routine, for - :class:`.Session` objects that would otherwise be in - "partial rollback" mode. In a typical integration case, this is also - not necessary as it is standard practice to emit - :meth:`.Session.rollback` unconditionally within the outermost - exception catch. - - To track the transactional state of a :class:`.Session` fully, - use event listeners, primarily the :meth:`.SessionEvents.after_begin`, - :meth:`.SessionEvents.after_commit`, - :meth:`.SessionEvents.after_rollback` and related events. - - """ - return self.transaction and self.transaction.is_active - - identity_map = None - """A mapping of object identities to objects themselves. - - Iterating through ``Session.identity_map.values()`` provides - access to the full set of persistent objects (i.e., those - that have row identity) currently in the session. - - .. seealso:: - - :func:`.identity_key` - helper function to produce the keys used - in this dictionary. - - """ - - @property - def _dirty_states(self): - """The set of all persistent states considered dirty. - - This method returns all states that were modified including - those that were possibly deleted. - - """ - return self.identity_map._dirty_states() - - @property - def dirty(self): - """The set of all persistent instances considered dirty. - - E.g.:: - - some_mapped_object in session.dirty - - Instances are considered dirty when they were modified but not - deleted. - - Note that this 'dirty' calculation is 'optimistic'; most - attribute-setting or collection modification operations will - mark an instance as 'dirty' and place it in this set, even if - there is no net change to the attribute's value. At flush - time, the value of each attribute is compared to its - previously saved value, and if there's no net change, no SQL - operation will occur (this is a more expensive operation so - it's only done at flush time). - - To check if an instance has actionable net changes to its - attributes, use the :meth:`.Session.is_modified` method. - - """ - return util.IdentitySet( - [state.obj() - for state in self._dirty_states - if state not in self._deleted]) - - @property - def deleted(self): - "The set of all instances marked as 'deleted' within this ``Session``" - - return util.IdentitySet(list(self._deleted.values())) - - @property - def new(self): - "The set of all instances marked as 'new' within this ``Session``." - - return util.IdentitySet(list(self._new.values())) - - -class sessionmaker(_SessionClassMethods): - """A configurable :class:`.Session` factory. - - The :class:`.sessionmaker` factory generates new - :class:`.Session` objects when called, creating them given - the configurational arguments established here. - - e.g.:: - - # global scope - Session = sessionmaker(autoflush=False) - - # later, in a local scope, create and use a session: - sess = Session() - - Any keyword arguments sent to the constructor itself will override the - "configured" keywords:: - - Session = sessionmaker() - - # bind an individual session to a connection - sess = Session(bind=connection) - - The class also includes a method :meth:`.configure`, which can - be used to specify additional keyword arguments to the factory, which - will take effect for subsequent :class:`.Session` objects generated. - This is usually used to associate one or more :class:`.Engine` objects - with an existing :class:`.sessionmaker` factory before it is first - used:: - - # application starts - Session = sessionmaker() - - # ... later - engine = create_engine('sqlite:///foo.db') - Session.configure(bind=engine) - - sess = Session() - - .. seealso: - - :ref:`session_getting` - introductory text on creating - sessions using :class:`.sessionmaker`. - - """ - - def __init__(self, bind=None, class_=Session, autoflush=True, - autocommit=False, - expire_on_commit=True, - info=None, **kw): - """Construct a new :class:`.sessionmaker`. - - All arguments here except for ``class_`` correspond to arguments - accepted by :class:`.Session` directly. See the - :meth:`.Session.__init__` docstring for more details on parameters. - - :param bind: a :class:`.Engine` or other :class:`.Connectable` with - which newly created :class:`.Session` objects will be associated. - :param class_: class to use in order to create new :class:`.Session` - objects. Defaults to :class:`.Session`. - :param autoflush: The autoflush setting to use with newly created - :class:`.Session` objects. - :param autocommit: The autocommit setting to use with newly created - :class:`.Session` objects. - :param expire_on_commit=True: the expire_on_commit setting to use - with newly created :class:`.Session` objects. - :param info: optional dictionary of information that will be available - via :attr:`.Session.info`. Note this dictionary is *updated*, not - replaced, when the ``info`` parameter is specified to the specific - :class:`.Session` construction operation. - - .. versionadded:: 0.9.0 - - :param \**kw: all other keyword arguments are passed to the - constructor of newly created :class:`.Session` objects. - - """ - kw['bind'] = bind - kw['autoflush'] = autoflush - kw['autocommit'] = autocommit - kw['expire_on_commit'] = expire_on_commit - if info is not None: - kw['info'] = info - self.kw = kw - # make our own subclass of the given class, so that - # events can be associated with it specifically. - self.class_ = type(class_.__name__, (class_,), {}) - - def __call__(self, **local_kw): - """Produce a new :class:`.Session` object using the configuration - established in this :class:`.sessionmaker`. - - In Python, the ``__call__`` method is invoked on an object when - it is "called" in the same way as a function:: - - Session = sessionmaker() - session = Session() # invokes sessionmaker.__call__() - - """ - for k, v in self.kw.items(): - if k == 'info' and 'info' in local_kw: - d = v.copy() - d.update(local_kw['info']) - local_kw['info'] = d - else: - local_kw.setdefault(k, v) - return self.class_(**local_kw) - - def configure(self, **new_kw): - """(Re)configure the arguments for this sessionmaker. - - e.g.:: - - Session = sessionmaker() - - Session.configure(bind=create_engine('sqlite://')) - """ - self.kw.update(new_kw) - - def __repr__(self): - return "%s(class_=%r,%s)" % ( - self.__class__.__name__, - self.class_.__name__, - ", ".join("%s=%r" % (k, v) for k, v in self.kw.items()) - ) - - -def make_transient(instance): - """Make the given instance 'transient'. - - This will remove its association with any - session and additionally will remove its "identity key", - such that it's as though the object were newly constructed, - except retaining its values. It also resets the - "deleted" flag on the state if this object - had been explicitly deleted by its session. - - Attributes which were "expired" or deferred at the - instance level are reverted to undefined, and - will not trigger any loads. - - """ - state = attributes.instance_state(instance) - s = _state_session(state) - if s: - s._expunge_state(state) - - # remove expired state - state.expired_attributes.clear() - - # remove deferred callables - if state.callables: - del state.callables - - if state.key: - del state.key - if state.deleted: - del state.deleted - - -def make_transient_to_detached(instance): - """Make the given transient instance 'detached'. - - All attribute history on the given instance - will be reset as though the instance were freshly loaded - from a query. Missing attributes will be marked as expired. - The primary key attributes of the object, which are required, will be made - into the "key" of the instance. - - The object can then be added to a session, or merged - possibly with the load=False flag, at which point it will look - as if it were loaded that way, without emitting SQL. - - This is a special use case function that differs from a normal - call to :meth:`.Session.merge` in that a given persistent state - can be manufactured without any SQL calls. - - .. versionadded:: 0.9.5 - - .. seealso:: - - :func:`.make_transient` - - """ - state = attributes.instance_state(instance) - if state.session_id or state.key: - raise sa_exc.InvalidRequestError( - "Given object must be transient") - state.key = state.mapper._identity_key_from_state(state) - if state.deleted: - del state.deleted - state._commit_all(state.dict) - state._expire_attributes(state.dict, state.unloaded) - - -def object_session(instance): - """Return the :class:`.Session` to which the given instance belongs. - - This is essentially the same as the :attr:`.InstanceState.session` - accessor. See that attribute for details. - - """ - - try: - state = attributes.instance_state(instance) - except exc.NO_STATE: - raise exc.UnmappedInstanceError(instance) - else: - return _state_session(state) - - -_new_sessionid = util.counter() diff --git a/python/sqlalchemy/orm/state.py b/python/sqlalchemy/orm/state.py deleted file mode 100644 index 3cbeed0b..00000000 --- a/python/sqlalchemy/orm/state.py +++ /dev/null @@ -1,729 +0,0 @@ -# orm/state.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Defines instrumentation of instances. - -This module is usually not directly visible to user applications, but -defines a large part of the ORM's interactivity. - -""" - -import weakref -from .. import util -from . import exc as orm_exc, interfaces -from .path_registry import PathRegistry -from .base import PASSIVE_NO_RESULT, SQL_OK, NEVER_SET, ATTR_WAS_SET, \ - NO_VALUE, PASSIVE_NO_INITIALIZE, INIT_OK, PASSIVE_OFF -from . import base - - -class InstanceState(interfaces.InspectionAttr): - """tracks state information at the instance level. - - The :class:`.InstanceState` is a key object used by the - SQLAlchemy ORM in order to track the state of an object; - it is created the moment an object is instantiated, typically - as a result of :term:`instrumentation` which SQLAlchemy applies - to the ``__init__()`` method of the class. - - :class:`.InstanceState` is also a semi-public object, - available for runtime inspection as to the state of a - mapped instance, including information such as its current - status within a particular :class:`.Session` and details - about data on individual attributes. The public API - in order to acquire a :class:`.InstanceState` object - is to use the :func:`.inspect` system:: - - >>> from sqlalchemy import inspect - >>> insp = inspect(some_mapped_object) - - .. seealso:: - - :ref:`core_inspection_toplevel` - - """ - - session_id = None - key = None - runid = None - load_options = util.EMPTY_SET - load_path = () - insert_order = None - _strong_obj = None - modified = False - expired = False - deleted = False - _load_pending = False - is_instance = True - - callables = () - """A namespace where a per-state loader callable can be associated. - - In SQLAlchemy 1.0, this is only used for lazy loaders / deferred - loaders that were set up via query option. - - Previously, callables was used also to indicate expired attributes - by storing a link to the InstanceState itself in this dictionary. - This role is now handled by the expired_attributes set. - - """ - - def __init__(self, obj, manager): - self.class_ = obj.__class__ - self.manager = manager - self.obj = weakref.ref(obj, self._cleanup) - self.committed_state = {} - self.expired_attributes = set() - - expired_attributes = None - """The set of keys which are 'expired' to be loaded by - the manager's deferred scalar loader, assuming no pending - changes. - - see also the ``unmodified`` collection which is intersected - against this set when a refresh operation occurs.""" - - - @util.memoized_property - def attrs(self): - """Return a namespace representing each attribute on - the mapped object, including its current value - and history. - - The returned object is an instance of :class:`.AttributeState`. - This object allows inspection of the current data - within an attribute as well as attribute history - since the last flush. - - """ - return util.ImmutableProperties( - dict( - (key, AttributeState(self, key)) - for key in self.manager - ) - ) - - @property - def transient(self): - """Return true if the object is :term:`transient`. - - .. seealso:: - - :ref:`session_object_states` - - """ - return self.key is None and \ - not self._attached - - @property - def pending(self): - """Return true if the object is :term:`pending`. - - - .. seealso:: - - :ref:`session_object_states` - - """ - return self.key is None and \ - self._attached - - @property - def persistent(self): - """Return true if the object is :term:`persistent`. - - .. seealso:: - - :ref:`session_object_states` - - """ - return self.key is not None and \ - self._attached - - @property - def detached(self): - """Return true if the object is :term:`detached`. - - .. seealso:: - - :ref:`session_object_states` - - """ - return self.key is not None and \ - not self._attached - - @property - @util.dependencies("sqlalchemy.orm.session") - def _attached(self, sessionlib): - return self.session_id is not None and \ - self.session_id in sessionlib._sessions - - @property - @util.dependencies("sqlalchemy.orm.session") - def session(self, sessionlib): - """Return the owning :class:`.Session` for this instance, - or ``None`` if none available. - - Note that the result here can in some cases be *different* - from that of ``obj in session``; an object that's been deleted - will report as not ``in session``, however if the transaction is - still in progress, this attribute will still refer to that session. - Only when the transaction is completed does the object become - fully detached under normal circumstances. - - """ - return sessionlib._state_session(self) - - @property - def object(self): - """Return the mapped object represented by this - :class:`.InstanceState`.""" - return self.obj() - - @property - def identity(self): - """Return the mapped identity of the mapped object. - This is the primary key identity as persisted by the ORM - which can always be passed directly to - :meth:`.Query.get`. - - Returns ``None`` if the object has no primary key identity. - - .. note:: - An object which is transient or pending - does **not** have a mapped identity until it is flushed, - even if its attributes include primary key values. - - """ - if self.key is None: - return None - else: - return self.key[1] - - @property - def identity_key(self): - """Return the identity key for the mapped object. - - This is the key used to locate the object within - the :attr:`.Session.identity_map` mapping. It contains - the identity as returned by :attr:`.identity` within it. - - - """ - # TODO: just change .key to .identity_key across - # the board ? probably - return self.key - - @util.memoized_property - def parents(self): - return {} - - @util.memoized_property - def _pending_mutations(self): - return {} - - @util.memoized_property - def mapper(self): - """Return the :class:`.Mapper` used for this mapepd object.""" - return self.manager.mapper - - @property - def has_identity(self): - """Return ``True`` if this object has an identity key. - - This should always have the same value as the - expression ``state.persistent or state.detached``. - - """ - return bool(self.key) - - def _detach(self): - self.session_id = self._strong_obj = None - - def _dispose(self): - self._detach() - del self.obj - - def _cleanup(self, ref): - """Weakref callback cleanup. - - This callable cleans out the state when it is being garbage - collected. - - this _cleanup **assumes** that there are no strong refs to us! - Will not work otherwise! - - """ - instance_dict = self._instance_dict() - if instance_dict is not None: - instance_dict._fast_discard(self) - del self._instance_dict - - # we can't possibly be in instance_dict._modified - # b.c. this is weakref cleanup only, that set - # is strong referencing! - # assert self not in instance_dict._modified - - self.session_id = self._strong_obj = None - del self.obj - - def obj(self): - return None - - @property - def dict(self): - """Return the instance dict used by the object. - - Under normal circumstances, this is always synonymous - with the ``__dict__`` attribute of the mapped object, - unless an alternative instrumentation system has been - configured. - - In the case that the actual object has been garbage - collected, this accessor returns a blank dictionary. - - """ - o = self.obj() - if o is not None: - return base.instance_dict(o) - else: - return {} - - def _initialize_instance(*mixed, **kwargs): - self, instance, args = mixed[0], mixed[1], mixed[2:] # noqa - manager = self.manager - - manager.dispatch.init(self, args, kwargs) - - try: - return manager.original_init(*mixed[1:], **kwargs) - except: - with util.safe_reraise(): - manager.dispatch.init_failure(self, args, kwargs) - - def get_history(self, key, passive): - return self.manager[key].impl.get_history(self, self.dict, passive) - - def get_impl(self, key): - return self.manager[key].impl - - def _get_pending_mutation(self, key): - if key not in self._pending_mutations: - self._pending_mutations[key] = PendingCollection() - return self._pending_mutations[key] - - def __getstate__(self): - state_dict = {'instance': self.obj()} - state_dict.update( - (k, self.__dict__[k]) for k in ( - 'committed_state', '_pending_mutations', 'modified', - 'expired', 'callables', 'key', 'parents', 'load_options', - 'class_', 'expired_attributes' - ) if k in self.__dict__ - ) - if self.load_path: - state_dict['load_path'] = self.load_path.serialize() - - state_dict['manager'] = self.manager._serialize(self, state_dict) - - return state_dict - - def __setstate__(self, state_dict): - inst = state_dict['instance'] - if inst is not None: - self.obj = weakref.ref(inst, self._cleanup) - self.class_ = inst.__class__ - else: - # None being possible here generally new as of 0.7.4 - # due to storage of state in "parents". "class_" - # also new. - self.obj = None - self.class_ = state_dict['class_'] - - self.committed_state = state_dict.get('committed_state', {}) - self._pending_mutations = state_dict.get('_pending_mutations', {}) - self.parents = state_dict.get('parents', {}) - self.modified = state_dict.get('modified', False) - self.expired = state_dict.get('expired', False) - if 'callables' in state_dict: - self.callables = state_dict['callables'] - - try: - self.expired_attributes = state_dict['expired_attributes'] - except KeyError: - self.expired_attributes = set() - # 0.9 and earlier compat - for k in list(self.callables): - if self.callables[k] is self: - self.expired_attributes.add(k) - del self.callables[k] - - self.__dict__.update([ - (k, state_dict[k]) for k in ( - 'key', 'load_options', - ) if k in state_dict - ]) - - if 'load_path' in state_dict: - self.load_path = PathRegistry.\ - deserialize(state_dict['load_path']) - - state_dict['manager'](self, inst, state_dict) - - def _reset(self, dict_, key): - """Remove the given attribute and any - callables associated with it.""" - - old = dict_.pop(key, None) - if old is not None and self.manager[key].impl.collection: - self.manager[key].impl._invalidate_collection(old) - self.expired_attributes.discard(key) - if self.callables: - self.callables.pop(key, None) - - @classmethod - def _instance_level_callable_processor(cls, manager, fn, key): - impl = manager[key].impl - if impl.collection: - def _set_callable(state, dict_, row): - if 'callables' not in state.__dict__: - state.callables = {} - old = dict_.pop(key, None) - if old is not None: - impl._invalidate_collection(old) - state.callables[key] = fn - else: - def _set_callable(state, dict_, row): - if 'callables' not in state.__dict__: - state.callables = {} - state.callables[key] = fn - return _set_callable - - def _expire(self, dict_, modified_set): - self.expired = True - - if self.modified: - modified_set.discard(self) - self.committed_state.clear() - self.modified = False - - self._strong_obj = None - - if '_pending_mutations' in self.__dict__: - del self.__dict__['_pending_mutations'] - - if 'parents' in self.__dict__: - del self.__dict__['parents'] - - self.expired_attributes.update( - [impl.key for impl in self.manager._scalar_loader_impls - if impl.expire_missing or impl.key in dict_] - ) - - if self.callables: - for k in self.expired_attributes.intersection(self.callables): - del self.callables[k] - - for k in self.manager._collection_impl_keys.intersection(dict_): - collection = dict_.pop(k) - collection._sa_adapter.invalidated = True - - for key in self.manager._all_key_set.intersection(dict_): - del dict_[key] - - self.manager.dispatch.expire(self, None) - - def _expire_attributes(self, dict_, attribute_names): - pending = self.__dict__.get('_pending_mutations', None) - - callables = self.callables - - for key in attribute_names: - impl = self.manager[key].impl - if impl.accepts_scalar_loader: - self.expired_attributes.add(key) - if callables and key in callables: - del callables[key] - old = dict_.pop(key, None) - if impl.collection and old is not None: - impl._invalidate_collection(old) - - self.committed_state.pop(key, None) - if pending: - pending.pop(key, None) - - self.manager.dispatch.expire(self, attribute_names) - - def _load_expired(self, state, passive): - """__call__ allows the InstanceState to act as a deferred - callable for loading expired attributes, which is also - serializable (picklable). - - """ - - if not passive & SQL_OK: - return PASSIVE_NO_RESULT - - toload = self.expired_attributes.\ - intersection(self.unmodified) - - self.manager.deferred_scalar_loader(self, toload) - - # if the loader failed, or this - # instance state didn't have an identity, - # the attributes still might be in the callables - # dict. ensure they are removed. - self.expired_attributes.clear() - - return ATTR_WAS_SET - - @property - def unmodified(self): - """Return the set of keys which have no uncommitted changes""" - - return set(self.manager).difference(self.committed_state) - - def unmodified_intersection(self, keys): - """Return self.unmodified.intersection(keys).""" - - return set(keys).intersection(self.manager).\ - difference(self.committed_state) - - @property - def unloaded(self): - """Return the set of keys which do not have a loaded value. - - This includes expired attributes and any other attribute that - was never populated or modified. - - """ - return set(self.manager).\ - difference(self.committed_state).\ - difference(self.dict) - - @property - def _unloaded_non_object(self): - return self.unloaded.intersection( - attr for attr in self.manager - if self.manager[attr].impl.accepts_scalar_loader - ) - - def _instance_dict(self): - return None - - def _modified_event( - self, dict_, attr, previous, collection=False, force=False): - if not attr.send_modified_events: - return - if attr.key not in self.committed_state or force: - if collection: - if previous is NEVER_SET: - if attr.key in dict_: - previous = dict_[attr.key] - - if previous not in (None, NO_VALUE, NEVER_SET): - previous = attr.copy(previous) - - self.committed_state[attr.key] = previous - - # assert self._strong_obj is None or self.modified - - if (self.session_id and self._strong_obj is None) \ - or not self.modified: - self.modified = True - instance_dict = self._instance_dict() - if instance_dict: - instance_dict._modified.add(self) - - # only create _strong_obj link if attached - # to a session - - inst = self.obj() - if self.session_id: - self._strong_obj = inst - - if inst is None: - raise orm_exc.ObjectDereferencedError( - "Can't emit change event for attribute '%s' - " - "parent object of type %s has been garbage " - "collected." - % ( - self.manager[attr.key], - base.state_class_str(self) - )) - - def _commit(self, dict_, keys): - """Commit attributes. - - This is used by a partial-attribute load operation to mark committed - those attributes which were refreshed from the database. - - Attributes marked as "expired" can potentially remain "expired" after - this step if a value was not populated in state.dict. - - """ - for key in keys: - self.committed_state.pop(key, None) - - self.expired = False - - self.expired_attributes.difference_update( - set(keys).intersection(dict_)) - - # the per-keys commit removes object-level callables, - # while that of commit_all does not. it's not clear - # if this behavior has a clear rationale, however tests do - # ensure this is what it does. - if self.callables: - for key in set(self.callables).\ - intersection(keys).\ - intersection(dict_): - del self.callables[key] - - def _commit_all(self, dict_, instance_dict=None): - """commit all attributes unconditionally. - - This is used after a flush() or a full load/refresh - to remove all pending state from the instance. - - - all attributes are marked as "committed" - - the "strong dirty reference" is removed - - the "modified" flag is set to False - - any "expired" markers for scalar attributes loaded are removed. - - lazy load callables for objects / collections *stay* - - Attributes marked as "expired" can potentially remain - "expired" after this step if a value was not populated in state.dict. - - """ - self._commit_all_states([(self, dict_)], instance_dict) - - @classmethod - def _commit_all_states(self, iter, instance_dict=None): - """Mass / highly inlined version of commit_all().""" - - for state, dict_ in iter: - state_dict = state.__dict__ - - state.committed_state.clear() - - if '_pending_mutations' in state_dict: - del state_dict['_pending_mutations'] - - state.expired_attributes.difference_update(dict_) - - if instance_dict and state.modified: - instance_dict._modified.discard(state) - - state.modified = state.expired = False - state._strong_obj = None - - -class AttributeState(object): - """Provide an inspection interface corresponding - to a particular attribute on a particular mapped object. - - The :class:`.AttributeState` object is accessed - via the :attr:`.InstanceState.attrs` collection - of a particular :class:`.InstanceState`:: - - from sqlalchemy import inspect - - insp = inspect(some_mapped_object) - attr_state = insp.attrs.some_attribute - - """ - - def __init__(self, state, key): - self.state = state - self.key = key - - @property - def loaded_value(self): - """The current value of this attribute as loaded from the database. - - If the value has not been loaded, or is otherwise not present - in the object's dictionary, returns NO_VALUE. - - """ - return self.state.dict.get(self.key, NO_VALUE) - - @property - def value(self): - """Return the value of this attribute. - - This operation is equivalent to accessing the object's - attribute directly or via ``getattr()``, and will fire - off any pending loader callables if needed. - - """ - return self.state.manager[self.key].__get__( - self.state.obj(), self.state.class_) - - @property - def history(self): - """Return the current pre-flush change history for - this attribute, via the :class:`.History` interface. - - This method will **not** emit loader callables if the value of the - attribute is unloaded. - - .. seealso:: - - :meth:`.AttributeState.load_history` - retrieve history - using loader callables if the value is not locally present. - - :func:`.attributes.get_history` - underlying function - - """ - return self.state.get_history(self.key, - PASSIVE_NO_INITIALIZE) - - def load_history(self): - """Return the current pre-flush change history for - this attribute, via the :class:`.History` interface. - - This method **will** emit loader callables if the value of the - attribute is unloaded. - - .. seealso:: - - :attr:`.AttributeState.history` - - :func:`.attributes.get_history` - underlying function - - .. versionadded:: 0.9.0 - - """ - return self.state.get_history(self.key, - PASSIVE_OFF ^ INIT_OK) - - -class PendingCollection(object): - """A writable placeholder for an unloaded collection. - - Stores items appended to and removed from a collection that has not yet - been loaded. When the collection is loaded, the changes stored in - PendingCollection are applied to it to produce the final result. - - """ - - def __init__(self): - self.deleted_items = util.IdentitySet() - self.added_items = util.OrderedIdentitySet() - - def append(self, value): - if value in self.deleted_items: - self.deleted_items.remove(value) - else: - self.added_items.add(value) - - def remove(self, value): - if value in self.added_items: - self.added_items.remove(value) - else: - self.deleted_items.add(value) diff --git a/python/sqlalchemy/orm/strategies.py b/python/sqlalchemy/orm/strategies.py deleted file mode 100644 index 67dac1cc..00000000 --- a/python/sqlalchemy/orm/strategies.py +++ /dev/null @@ -1,1608 +0,0 @@ -# orm/strategies.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""sqlalchemy.orm.interfaces.LoaderStrategy - implementations, and related MapperOptions.""" - -from .. import exc as sa_exc, inspect -from .. import util, log, event -from ..sql import util as sql_util, visitors -from .. import sql -from . import ( - attributes, interfaces, exc as orm_exc, loading, - unitofwork, util as orm_util -) -from .state import InstanceState -from .util import _none_set -from . import properties -from .interfaces import ( - LoaderStrategy, StrategizedProperty -) -from .base import _SET_DEFERRED_EXPIRED, _DEFER_FOR_STATE -from .session import _state_session -import itertools - - -def _register_attribute( - strategy, mapper, useobject, - compare_function=None, - typecallable=None, - uselist=False, - callable_=None, - proxy_property=None, - active_history=False, - impl_class=None, - **kw -): - - prop = strategy.parent_property - - attribute_ext = list(util.to_list(prop.extension, default=[])) - - listen_hooks = [] - - if useobject and prop.single_parent: - listen_hooks.append(single_parent_validator) - - if prop.key in prop.parent.validators: - fn, opts = prop.parent.validators[prop.key] - listen_hooks.append( - lambda desc, prop: orm_util._validator_events( - desc, - prop.key, fn, **opts) - ) - - if useobject: - listen_hooks.append(unitofwork.track_cascade_events) - - # need to assemble backref listeners - # after the singleparentvalidator, mapper validator - backref = kw.pop('backref', None) - if backref: - listen_hooks.append( - lambda desc, prop: attributes.backref_listeners( - desc, - backref, - uselist - ) - ) - - for m in mapper.self_and_descendants: - if prop is m._props.get(prop.key): - - desc = attributes.register_attribute_impl( - m.class_, - prop.key, - parent_token=prop, - uselist=uselist, - compare_function=compare_function, - useobject=useobject, - extension=attribute_ext, - trackparent=useobject and ( - prop.single_parent - or prop.direction is interfaces.ONETOMANY), - typecallable=typecallable, - callable_=callable_, - active_history=active_history, - impl_class=impl_class, - send_modified_events=not useobject or not prop.viewonly, - doc=prop.doc, - **kw - ) - - for hook in listen_hooks: - hook(desc, prop) - - -@properties.ColumnProperty.strategy_for(instrument=False, deferred=False) -class UninstrumentedColumnLoader(LoaderStrategy): - """Represent the a non-instrumented MapperProperty. - - The polymorphic_on argument of mapper() often results in this, - if the argument is against the with_polymorphic selectable. - - """ - __slots__ = 'columns', - - def __init__(self, parent): - super(UninstrumentedColumnLoader, self).__init__(parent) - self.columns = self.parent_property.columns - - def setup_query( - self, context, entity, path, loadopt, adapter, - column_collection=None, **kwargs): - for c in self.columns: - if adapter: - c = adapter.columns[c] - column_collection.append(c) - - def create_row_processor( - self, context, path, loadopt, - mapper, result, adapter, populators): - pass - - -@log.class_logger -@properties.ColumnProperty.strategy_for(instrument=True, deferred=False) -class ColumnLoader(LoaderStrategy): - """Provide loading behavior for a :class:`.ColumnProperty`.""" - - __slots__ = 'columns', 'is_composite' - - def __init__(self, parent): - super(ColumnLoader, self).__init__(parent) - self.columns = self.parent_property.columns - self.is_composite = hasattr(self.parent_property, 'composite_class') - - def setup_query( - self, context, entity, path, loadopt, - adapter, column_collection, memoized_populators, **kwargs): - - for c in self.columns: - if adapter: - c = adapter.columns[c] - column_collection.append(c) - - fetch = self.columns[0] - if adapter: - fetch = adapter.columns[fetch] - memoized_populators[self.parent_property] = fetch - - def init_class_attribute(self, mapper): - self.is_class_level = True - coltype = self.columns[0].type - # TODO: check all columns ? check for foreign key as well? - active_history = self.parent_property.active_history or \ - self.columns[0].primary_key or \ - mapper.version_id_col in set(self.columns) - - _register_attribute( - self, mapper, useobject=False, - compare_function=coltype.compare_values, - active_history=active_history - ) - - def create_row_processor( - self, context, path, - loadopt, mapper, result, adapter, populators): - # look through list of columns represented here - # to see which, if any, is present in the row. - for col in self.columns: - if adapter: - col = adapter.columns[col] - getter = result._getter(col) - if getter: - populators["quick"].append((self.key, getter)) - break - else: - populators["expire"].append((self.key, True)) - - -@log.class_logger -@properties.ColumnProperty.strategy_for(deferred=True, instrument=True) -class DeferredColumnLoader(LoaderStrategy): - """Provide loading behavior for a deferred :class:`.ColumnProperty`.""" - - __slots__ = 'columns', 'group' - - def __init__(self, parent): - super(DeferredColumnLoader, self).__init__(parent) - if hasattr(self.parent_property, 'composite_class'): - raise NotImplementedError("Deferred loading for composite " - "types not implemented yet") - self.columns = self.parent_property.columns - self.group = self.parent_property.group - - def create_row_processor( - self, context, path, loadopt, - mapper, result, adapter, populators): - - # this path currently does not check the result - # for the column; this is because in most cases we are - # working just with the setup_query() directive which does - # not support this, and the behavior here should be consistent. - if not self.is_class_level: - set_deferred_for_local_state = \ - self.parent_property._deferred_column_loader - populators["new"].append((self.key, set_deferred_for_local_state)) - else: - populators["expire"].append((self.key, False)) - - def init_class_attribute(self, mapper): - self.is_class_level = True - - _register_attribute( - self, mapper, useobject=False, - compare_function=self.columns[0].type.compare_values, - callable_=self._load_for_state, - expire_missing=False - ) - - def setup_query( - self, context, entity, path, loadopt, - adapter, column_collection, memoized_populators, - only_load_props=None, **kw): - - if ( - ( - loadopt and - 'undefer_pks' in loadopt.local_opts and - set(self.columns).intersection( - self.parent._should_undefer_in_wildcard) - ) - or - ( - loadopt and - self.group and - loadopt.local_opts.get('undefer_group', False) == self.group - ) - or - ( - only_load_props and self.key in only_load_props - ) - ): - self.parent_property._get_strategy_by_cls(ColumnLoader).\ - setup_query(context, entity, - path, loadopt, adapter, - column_collection, memoized_populators, **kw) - elif self.is_class_level: - memoized_populators[self.parent_property] = _SET_DEFERRED_EXPIRED - else: - memoized_populators[self.parent_property] = _DEFER_FOR_STATE - - def _load_for_state(self, state, passive): - if not state.key: - return attributes.ATTR_EMPTY - - if not passive & attributes.SQL_OK: - return attributes.PASSIVE_NO_RESULT - - localparent = state.manager.mapper - - if self.group: - toload = [ - p.key for p in - localparent.iterate_properties - if isinstance(p, StrategizedProperty) and - isinstance(p.strategy, DeferredColumnLoader) and - p.group == self.group - ] - else: - toload = [self.key] - - # narrow the keys down to just those which have no history - group = [k for k in toload if k in state.unmodified] - - session = _state_session(state) - if session is None: - raise orm_exc.DetachedInstanceError( - "Parent instance %s is not bound to a Session; " - "deferred load operation of attribute '%s' cannot proceed" % - (orm_util.state_str(state), self.key) - ) - - query = session.query(localparent) - if loading.load_on_ident( - query, state.key, - only_load_props=group, refresh_state=state) is None: - raise orm_exc.ObjectDeletedError(state) - - return attributes.ATTR_WAS_SET - - -class LoadDeferredColumns(object): - """serializable loader object used by DeferredColumnLoader""" - - def __init__(self, key): - self.key = key - - def __call__(self, state, passive=attributes.PASSIVE_OFF): - key = self.key - - localparent = state.manager.mapper - prop = localparent._props[key] - strategy = prop._strategies[DeferredColumnLoader] - return strategy._load_for_state(state, passive) - - -class AbstractRelationshipLoader(LoaderStrategy): - """LoaderStratgies which deal with related objects.""" - - __slots__ = 'mapper', 'target', 'uselist' - - def __init__(self, parent): - super(AbstractRelationshipLoader, self).__init__(parent) - self.mapper = self.parent_property.mapper - self.target = self.parent_property.target - self.uselist = self.parent_property.uselist - - -@log.class_logger -@properties.RelationshipProperty.strategy_for(lazy="noload") -@properties.RelationshipProperty.strategy_for(lazy=None) -class NoLoader(AbstractRelationshipLoader): - """Provide loading behavior for a :class:`.RelationshipProperty` - with "lazy=None". - - """ - - __slots__ = () - - def init_class_attribute(self, mapper): - self.is_class_level = True - - _register_attribute( - self, mapper, - useobject=True, - uselist=self.parent_property.uselist, - typecallable=self.parent_property.collection_class, - ) - - def create_row_processor( - self, context, path, loadopt, mapper, - result, adapter, populators): - def invoke_no_load(state, dict_, row): - if self.uselist: - state.manager.get_impl(self.key).initialize(state, dict_) - else: - dict_[self.key] = None - populators["new"].append((self.key, invoke_no_load)) - - -@log.class_logger -@properties.RelationshipProperty.strategy_for(lazy=True) -@properties.RelationshipProperty.strategy_for(lazy="select") -class LazyLoader(AbstractRelationshipLoader, util.MemoizedSlots): - """Provide loading behavior for a :class:`.RelationshipProperty` - with "lazy=True", that is loads when first accessed. - - """ - - __slots__ = ( - '_lazywhere', '_rev_lazywhere', 'use_get', '_bind_to_col', - '_equated_columns', '_rev_bind_to_col', '_rev_equated_columns', - '_simple_lazy_clause') - - def __init__(self, parent): - super(LazyLoader, self).__init__(parent) - join_condition = self.parent_property._join_condition - self._lazywhere, \ - self._bind_to_col, \ - self._equated_columns = join_condition.create_lazy_clause() - - self._rev_lazywhere, \ - self._rev_bind_to_col, \ - self._rev_equated_columns = join_condition.create_lazy_clause( - reverse_direction=True) - - self.logger.info("%s lazy loading clause %s", self, self._lazywhere) - - # determine if our "lazywhere" clause is the same as the mapper's - # get() clause. then we can just use mapper.get() - self.use_get = not self.uselist and \ - self.mapper._get_clause[0].compare( - self._lazywhere, - use_proxies=True, - equivalents=self.mapper._equivalent_columns - ) - - if self.use_get: - for col in list(self._equated_columns): - if col in self.mapper._equivalent_columns: - for c in self.mapper._equivalent_columns[col]: - self._equated_columns[c] = self._equated_columns[col] - - self.logger.info("%s will use query.get() to " - "optimize instance loads", self) - - def init_class_attribute(self, mapper): - self.is_class_level = True - - active_history = ( - self.parent_property.active_history or - self.parent_property.direction is not interfaces.MANYTOONE or - not self.use_get - ) - - # MANYTOONE currently only needs the - # "old" value for delete-orphan - # cascades. the required _SingleParentValidator - # will enable active_history - # in that case. otherwise we don't need the - # "old" value during backref operations. - _register_attribute( - self, - mapper, - useobject=True, - callable_=self._load_for_state, - uselist=self.parent_property.uselist, - backref=self.parent_property.back_populates, - typecallable=self.parent_property.collection_class, - active_history=active_history - ) - - def _memoized_attr__simple_lazy_clause(self): - criterion, bind_to_col = ( - self._lazywhere, - self._bind_to_col - ) - - params = [] - - def visit_bindparam(bindparam): - bindparam.unique = False - if bindparam._identifying_key in bind_to_col: - params.append(( - bindparam.key, bind_to_col[bindparam._identifying_key], - None)) - else: - params.append((bindparam.key, None, bindparam.value)) - - criterion = visitors.cloned_traverse( - criterion, {}, {'bindparam': visit_bindparam} - ) - - return criterion, params - - def _generate_lazy_clause(self, state, passive): - criterion, param_keys = self._simple_lazy_clause - - if state is None: - return sql_util.adapt_criterion_to_null( - criterion, [key for key, ident, value in param_keys]) - - mapper = self.parent_property.parent - - o = state.obj() # strong ref - dict_ = attributes.instance_dict(o) - - if passive & attributes.INIT_OK: - passive ^= attributes.INIT_OK - - params = {} - for key, ident, value in param_keys: - if ident is not None: - if passive and passive & attributes.LOAD_AGAINST_COMMITTED: - value = mapper._get_committed_state_attr_by_column( - state, dict_, ident, passive) - else: - value = mapper._get_state_attr_by_column( - state, dict_, ident, passive) - - params[key] = value - - return criterion, params - - def _load_for_state(self, state, passive): - if not state.key and ( - ( - not self.parent_property.load_on_pending - and not state._load_pending - ) - or not state.session_id - ): - return attributes.ATTR_EMPTY - - pending = not state.key - ident_key = None - - if ( - (not passive & attributes.SQL_OK and not self.use_get) - or - (not passive & attributes.NON_PERSISTENT_OK and pending) - ): - return attributes.PASSIVE_NO_RESULT - - session = _state_session(state) - if not session: - raise orm_exc.DetachedInstanceError( - "Parent instance %s is not bound to a Session; " - "lazy load operation of attribute '%s' cannot proceed" % - (orm_util.state_str(state), self.key) - ) - - # if we have a simple primary key load, check the - # identity map without generating a Query at all - if self.use_get: - ident = self._get_ident_for_use_get( - session, - state, - passive - ) - if attributes.PASSIVE_NO_RESULT in ident: - return attributes.PASSIVE_NO_RESULT - elif attributes.NEVER_SET in ident: - return attributes.NEVER_SET - - if _none_set.issuperset(ident): - return None - - ident_key = self.mapper.identity_key_from_primary_key(ident) - instance = loading.get_from_identity(session, ident_key, passive) - if instance is not None: - return instance - elif not passive & attributes.SQL_OK or \ - not passive & attributes.RELATED_OBJECT_OK: - return attributes.PASSIVE_NO_RESULT - - return self._emit_lazyload(session, state, ident_key, passive) - - def _get_ident_for_use_get(self, session, state, passive): - instance_mapper = state.manager.mapper - - if passive & attributes.LOAD_AGAINST_COMMITTED: - get_attr = instance_mapper._get_committed_state_attr_by_column - else: - get_attr = instance_mapper._get_state_attr_by_column - - dict_ = state.dict - - return [ - get_attr( - state, - dict_, - self._equated_columns[pk], - passive=passive) - for pk in self.mapper.primary_key - ] - - @util.dependencies("sqlalchemy.orm.strategy_options") - def _emit_lazyload( - self, strategy_options, session, state, ident_key, passive): - - q = session.query(self.mapper)._adapt_all_clauses() - if self.parent_property.secondary is not None: - q = q.select_from(self.mapper, self.parent_property.secondary) - - q = q._with_invoke_all_eagers(False) - - pending = not state.key - - # don't autoflush on pending - if pending or passive & attributes.NO_AUTOFLUSH: - q = q.autoflush(False) - - if state.load_path: - q = q._with_current_path(state.load_path[self.parent_property]) - - if state.load_options: - q = q._conditional_options(*state.load_options) - - if self.use_get: - return loading.load_on_ident(q, ident_key) - - if self.parent_property.order_by: - q = q.order_by(*util.to_list(self.parent_property.order_by)) - - for rev in self.parent_property._reverse_property: - # reverse props that are MANYTOONE are loading *this* - # object from get(), so don't need to eager out to those. - if rev.direction is interfaces.MANYTOONE and \ - rev._use_get and \ - not isinstance(rev.strategy, LazyLoader): - q = q.options( - strategy_options.Load(rev.parent).lazyload(rev.key)) - - lazy_clause, params = self._generate_lazy_clause( - state, passive=passive) - - if pending: - if util.has_intersection( - orm_util._none_set, params.values()): - return None - elif util.has_intersection(orm_util._never_set, params.values()): - return None - - q = q.filter(lazy_clause).params(params) - - result = q.all() - if self.uselist: - return result - else: - l = len(result) - if l: - if l > 1: - util.warn( - "Multiple rows returned with " - "uselist=False for lazily-loaded attribute '%s' " - % self.parent_property) - - return result[0] - else: - return None - - def create_row_processor( - self, context, path, loadopt, - mapper, result, adapter, populators): - key = self.key - if not self.is_class_level: - # we are not the primary manager for this attribute - # on this class - set up a - # per-instance lazyloader, which will override the - # class-level behavior. - # this currently only happens when using a - # "lazyload" option on a "no load" - # attribute - "eager" attributes always have a - # class-level lazyloader installed. - set_lazy_callable = InstanceState._instance_level_callable_processor( - mapper.class_manager, - LoadLazyAttribute(key, self._strategy_keys[0]), key) - - populators["new"].append((self.key, set_lazy_callable)) - elif context.populate_existing or mapper.always_refresh: - def reset_for_lazy_callable(state, dict_, row): - # we are the primary manager for this attribute on - # this class - reset its - # per-instance attribute state, so that the class-level - # lazy loader is - # executed when next referenced on this instance. - # this is needed in - # populate_existing() types of scenarios to reset - # any existing state. - state._reset(dict_, key) - - populators["new"].append((self.key, reset_for_lazy_callable)) - - -class LoadLazyAttribute(object): - """serializable loader object used by LazyLoader""" - - def __init__(self, key, strategy_key=(('lazy', 'select'),)): - self.key = key - self.strategy_key = strategy_key - - def __call__(self, state, passive=attributes.PASSIVE_OFF): - key = self.key - instance_mapper = state.manager.mapper - prop = instance_mapper._props[key] - strategy = prop._strategies[self.strategy_key] - - return strategy._load_for_state(state, passive) - - -@properties.RelationshipProperty.strategy_for(lazy="immediate") -class ImmediateLoader(AbstractRelationshipLoader): - __slots__ = () - - def init_class_attribute(self, mapper): - self.parent_property.\ - _get_strategy_by_cls(LazyLoader).\ - init_class_attribute(mapper) - - def setup_query( - self, context, entity, - path, loadopt, adapter, column_collection=None, - parentmapper=None, **kwargs): - pass - - def create_row_processor( - self, context, path, loadopt, - mapper, result, adapter, populators): - def load_immediate(state, dict_, row): - state.get_impl(self.key).get(state, dict_) - - populators["delayed"].append((self.key, load_immediate)) - - -@log.class_logger -@properties.RelationshipProperty.strategy_for(lazy="subquery") -class SubqueryLoader(AbstractRelationshipLoader): - __slots__ = 'join_depth', - - def __init__(self, parent): - super(SubqueryLoader, self).__init__(parent) - self.join_depth = self.parent_property.join_depth - - def init_class_attribute(self, mapper): - self.parent_property.\ - _get_strategy_by_cls(LazyLoader).\ - init_class_attribute(mapper) - - def setup_query( - self, context, entity, - path, loadopt, adapter, - column_collection=None, - parentmapper=None, **kwargs): - - if not context.query._enable_eagerloads: - return - elif context.query._yield_per: - context.query._no_yield_per("subquery") - - path = path[self.parent_property] - - # build up a path indicating the path from the leftmost - # entity to the thing we're subquery loading. - with_poly_info = path.get( - context.attributes, - "path_with_polymorphic", None) - if with_poly_info is not None: - effective_entity = with_poly_info.entity - else: - effective_entity = self.mapper - - subq_path = context.attributes.get( - ('subquery_path', None), - orm_util.PathRegistry.root) - - subq_path = subq_path + path - - # if not via query option, check for - # a cycle - if not path.contains(context.attributes, "loader"): - if self.join_depth: - if path.length / 2 > self.join_depth: - return - elif subq_path.contains_mapper(self.mapper): - return - - leftmost_mapper, leftmost_attr, leftmost_relationship = \ - self._get_leftmost(subq_path) - - orig_query = context.attributes.get( - ("orig_query", SubqueryLoader), - context.query) - - # generate a new Query from the original, then - # produce a subquery from it. - left_alias = self._generate_from_original_query( - orig_query, leftmost_mapper, - leftmost_attr, leftmost_relationship, - entity.entity_zero - ) - - # generate another Query that will join the - # left alias to the target relationships. - # basically doing a longhand - # "from_self()". (from_self() itself not quite industrial - # strength enough for all contingencies...but very close) - q = orig_query.session.query(effective_entity) - q._attributes = { - ("orig_query", SubqueryLoader): orig_query, - ('subquery_path', None): subq_path - } - - q = q._set_enable_single_crit(False) - to_join, local_attr, parent_alias = \ - self._prep_for_joins(left_alias, subq_path) - q = q.order_by(*local_attr) - q = q.add_columns(*local_attr) - q = self._apply_joins( - q, to_join, left_alias, - parent_alias, effective_entity) - - q = self._setup_options(q, subq_path, orig_query, effective_entity) - q = self._setup_outermost_orderby(q) - - # add new query to attributes to be picked up - # by create_row_processor - path.set(context.attributes, "subquery", q) - - def _get_leftmost(self, subq_path): - subq_path = subq_path.path - subq_mapper = orm_util._class_to_mapper(subq_path[0]) - - # determine attributes of the leftmost mapper - if self.parent.isa(subq_mapper) and \ - self.parent_property is subq_path[1]: - leftmost_mapper, leftmost_prop = \ - self.parent, self.parent_property - else: - leftmost_mapper, leftmost_prop = \ - subq_mapper, \ - subq_path[1] - - leftmost_cols = leftmost_prop.local_columns - - leftmost_attr = [ - getattr( - subq_path[0].entity, - leftmost_mapper._columntoproperty[c].key) - for c in leftmost_cols - ] - - return leftmost_mapper, leftmost_attr, leftmost_prop - - def _generate_from_original_query( - self, - orig_query, leftmost_mapper, - leftmost_attr, leftmost_relationship, orig_entity - ): - # reformat the original query - # to look only for significant columns - q = orig_query._clone().correlate(None) - - # set a real "from" if not present, as this is more - # accurate than just going off of the column expression - if not q._from_obj and orig_entity.mapper.isa(leftmost_mapper): - q._set_select_from([orig_entity], False) - target_cols = q._adapt_col_list(leftmost_attr) - - # select from the identity columns of the outer - q._set_entities(target_cols) - - distinct_target_key = leftmost_relationship.distinct_target_key - - if distinct_target_key is True: - q._distinct = True - elif distinct_target_key is None: - # if target_cols refer to a non-primary key or only - # part of a composite primary key, set the q as distinct - for t in set(c.table for c in target_cols): - if not set(target_cols).issuperset(t.primary_key): - q._distinct = True - break - - if q._order_by is False: - q._order_by = leftmost_mapper.order_by - - # don't need ORDER BY if no limit/offset - if q._limit is None and q._offset is None: - q._order_by = None - - # the original query now becomes a subquery - # which we'll join onto. - - embed_q = q.with_labels().subquery() - left_alias = orm_util.AliasedClass( - leftmost_mapper, embed_q, - use_mapper_path=True) - return left_alias - - def _prep_for_joins(self, left_alias, subq_path): - # figure out what's being joined. a.k.a. the fun part - to_join = [] - pairs = list(subq_path.pairs()) - - for i, (mapper, prop) in enumerate(pairs): - if i > 0: - # look at the previous mapper in the chain - - # if it is as or more specific than this prop's - # mapper, use that instead. - # note we have an assumption here that - # the non-first element is always going to be a mapper, - # not an AliasedClass - - prev_mapper = pairs[i - 1][1].mapper - to_append = prev_mapper if prev_mapper.isa(mapper) else mapper - else: - to_append = mapper - - to_join.append((to_append, prop.key)) - - # determine the immediate parent class we are joining from, - # which needs to be aliased. - if len(to_join) > 1: - info = inspect(to_join[-1][0]) - - if len(to_join) < 2: - # in the case of a one level eager load, this is the - # leftmost "left_alias". - parent_alias = left_alias - elif info.mapper.isa(self.parent): - # In the case of multiple levels, retrieve - # it from subq_path[-2]. This is the same as self.parent - # in the vast majority of cases, and [ticket:2014] - # illustrates a case where sub_path[-2] is a subclass - # of self.parent - parent_alias = orm_util.AliasedClass( - to_join[-1][0], - use_mapper_path=True) - else: - # if of_type() were used leading to this relationship, - # self.parent is more specific than subq_path[-2] - parent_alias = orm_util.AliasedClass( - self.parent, - use_mapper_path=True) - - local_cols = self.parent_property.local_columns - - local_attr = [ - getattr(parent_alias, self.parent._columntoproperty[c].key) - for c in local_cols - ] - return to_join, local_attr, parent_alias - - def _apply_joins( - self, q, to_join, left_alias, parent_alias, - effective_entity): - for i, (mapper, key) in enumerate(to_join): - - # we need to use query.join() as opposed to - # orm.join() here because of the - # rich behavior it brings when dealing with - # "with_polymorphic" mappers. "aliased" - # and "from_joinpoint" take care of most of - # the chaining and aliasing for us. - - first = i == 0 - middle = i < len(to_join) - 1 - second_to_last = i == len(to_join) - 2 - last = i == len(to_join) - 1 - - if first: - attr = getattr(left_alias, key) - if last and effective_entity is not self.mapper: - attr = attr.of_type(effective_entity) - else: - if last and effective_entity is not self.mapper: - attr = getattr(parent_alias, key).\ - of_type(effective_entity) - else: - attr = getattr(mapper.entity, key) - - if second_to_last: - q = q.join(parent_alias, attr, from_joinpoint=True) - else: - q = q.join(attr, aliased=middle, from_joinpoint=True) - return q - - def _setup_options(self, q, subq_path, orig_query, effective_entity): - # propagate loader options etc. to the new query. - # these will fire relative to subq_path. - q = q._with_current_path(subq_path) - q = q._conditional_options(*orig_query._with_options) - if orig_query._populate_existing: - q._populate_existing = orig_query._populate_existing - - return q - - def _setup_outermost_orderby(self, q): - if self.parent_property.order_by: - # if there's an ORDER BY, alias it the same - # way joinedloader does, but we have to pull out - # the "eagerjoin" from the query. - # this really only picks up the "secondary" table - # right now. - eagerjoin = q._from_obj[0] - eager_order_by = \ - eagerjoin._target_adapter.\ - copy_and_process( - util.to_list( - self.parent_property.order_by - ) - ) - q = q.order_by(*eager_order_by) - return q - - class _SubqCollections(object): - """Given a :class:`.Query` used to emit the "subquery load", - provide a load interface that executes the query at the - first moment a value is needed. - - """ - _data = None - - def __init__(self, subq): - self.subq = subq - - def get(self, key, default): - if self._data is None: - self._load() - return self._data.get(key, default) - - def _load(self): - self._data = dict( - (k, [vv[0] for vv in v]) - for k, v in itertools.groupby( - self.subq, - lambda x: x[1:] - ) - ) - - def loader(self, state, dict_, row): - if self._data is None: - self._load() - - def create_row_processor( - self, context, path, loadopt, - mapper, result, adapter, populators): - if not self.parent.class_manager[self.key].impl.supports_population: - raise sa_exc.InvalidRequestError( - "'%s' does not support object " - "population - eager loading cannot be applied." % - self) - - path = path[self.parent_property] - - subq = path.get(context.attributes, 'subquery') - - if subq is None: - return - - assert subq.session is context.session, ( - "Subquery session doesn't refer to that of " - "our context. Are there broken context caching " - "schemes being used?" - ) - - local_cols = self.parent_property.local_columns - - # cache the loaded collections in the context - # so that inheriting mappers don't re-load when they - # call upon create_row_processor again - collections = path.get(context.attributes, "collections") - if collections is None: - collections = self._SubqCollections(subq) - path.set(context.attributes, 'collections', collections) - - if adapter: - local_cols = [adapter.columns[c] for c in local_cols] - - if self.uselist: - self._create_collection_loader( - context, collections, local_cols, populators) - else: - self._create_scalar_loader( - context, collections, local_cols, populators) - - def _create_collection_loader( - self, context, collections, local_cols, populators): - def load_collection_from_subq(state, dict_, row): - collection = collections.get( - tuple([row[col] for col in local_cols]), - () - ) - state.get_impl(self.key).\ - set_committed_value(state, dict_, collection) - - populators["new"].append((self.key, load_collection_from_subq)) - if context.invoke_all_eagers: - populators["eager"].append((self.key, collections.loader)) - - def _create_scalar_loader( - self, context, collections, local_cols, populators): - def load_scalar_from_subq(state, dict_, row): - collection = collections.get( - tuple([row[col] for col in local_cols]), - (None,) - ) - if len(collection) > 1: - util.warn( - "Multiple rows returned with " - "uselist=False for eagerly-loaded attribute '%s' " - % self) - - scalar = collection[0] - state.get_impl(self.key).\ - set_committed_value(state, dict_, scalar) - - populators["new"].append((self.key, load_scalar_from_subq)) - if context.invoke_all_eagers: - populators["eager"].append((self.key, collections.loader)) - - -@log.class_logger -@properties.RelationshipProperty.strategy_for(lazy="joined") -@properties.RelationshipProperty.strategy_for(lazy=False) -class JoinedLoader(AbstractRelationshipLoader): - """Provide loading behavior for a :class:`.RelationshipProperty` - using joined eager loading. - - """ - - __slots__ = 'join_depth', - - def __init__(self, parent): - super(JoinedLoader, self).__init__(parent) - self.join_depth = self.parent_property.join_depth - - def init_class_attribute(self, mapper): - self.parent_property.\ - _get_strategy_by_cls(LazyLoader).init_class_attribute(mapper) - - def setup_query( - self, context, entity, path, loadopt, adapter, - column_collection=None, parentmapper=None, - chained_from_outerjoin=False, - **kwargs): - """Add a left outer join to the statement that's being constructed.""" - - if not context.query._enable_eagerloads: - return - elif context.query._yield_per and self.uselist: - context.query._no_yield_per("joined collection") - - path = path[self.parent_property] - - with_polymorphic = None - - user_defined_adapter = self._init_user_defined_eager_proc( - loadopt, context) if loadopt else False - - if user_defined_adapter is not False: - clauses, adapter, add_to_collection = \ - self._setup_query_on_user_defined_adapter( - context, entity, path, adapter, - user_defined_adapter - ) - else: - # if not via query option, check for - # a cycle - if not path.contains(context.attributes, "loader"): - if self.join_depth: - if path.length / 2 > self.join_depth: - return - elif path.contains_mapper(self.mapper): - return - - clauses, adapter, add_to_collection, chained_from_outerjoin = \ - self._generate_row_adapter( - context, entity, path, loadopt, adapter, - column_collection, parentmapper, chained_from_outerjoin - ) - - with_poly_info = path.get( - context.attributes, - "path_with_polymorphic", - None - ) - if with_poly_info is not None: - with_polymorphic = with_poly_info.with_polymorphic_mappers - else: - with_polymorphic = None - - path = path[self.mapper] - - loading._setup_entity_query( - context, self.mapper, entity, - path, clauses, add_to_collection, - with_polymorphic=with_polymorphic, - parentmapper=self.mapper, - chained_from_outerjoin=chained_from_outerjoin) - - if with_poly_info is not None and \ - None in set(context.secondary_columns): - raise sa_exc.InvalidRequestError( - "Detected unaliased columns when generating joined " - "load. Make sure to use aliased=True or flat=True " - "when using joined loading with with_polymorphic()." - ) - - def _init_user_defined_eager_proc(self, loadopt, context): - - # check if the opt applies at all - if "eager_from_alias" not in loadopt.local_opts: - # nope - return False - - path = loadopt.path.parent - - # the option applies. check if the "user_defined_eager_row_processor" - # has been built up. - adapter = path.get( - context.attributes, - "user_defined_eager_row_processor", False) - if adapter is not False: - # just return it - return adapter - - # otherwise figure it out. - alias = loadopt.local_opts["eager_from_alias"] - - root_mapper, prop = path[-2:] - - #from .mapper import Mapper - #from .interfaces import MapperProperty - #assert isinstance(root_mapper, Mapper) - #assert isinstance(prop, MapperProperty) - - if alias is not None: - if isinstance(alias, str): - alias = prop.target.alias(alias) - adapter = sql_util.ColumnAdapter( - alias, - equivalents=prop.mapper._equivalent_columns) - else: - if path.contains(context.attributes, "path_with_polymorphic"): - with_poly_info = path.get( - context.attributes, - "path_with_polymorphic") - adapter = orm_util.ORMAdapter( - with_poly_info.entity, - equivalents=prop.mapper._equivalent_columns) - else: - adapter = context.query._polymorphic_adapters.get( - prop.mapper, None) - path.set( - context.attributes, - "user_defined_eager_row_processor", - adapter) - - return adapter - - def _setup_query_on_user_defined_adapter( - self, context, entity, - path, adapter, user_defined_adapter): - - # apply some more wrapping to the "user defined adapter" - # if we are setting up the query for SQL render. - adapter = entity._get_entity_clauses(context.query, context) - - if adapter and user_defined_adapter: - user_defined_adapter = user_defined_adapter.wrap(adapter) - path.set( - context.attributes, "user_defined_eager_row_processor", - user_defined_adapter) - elif adapter: - user_defined_adapter = adapter - path.set( - context.attributes, "user_defined_eager_row_processor", - user_defined_adapter) - - add_to_collection = context.primary_columns - return user_defined_adapter, adapter, add_to_collection - - def _generate_row_adapter( - self, - context, entity, path, loadopt, adapter, - column_collection, parentmapper, chained_from_outerjoin): - with_poly_info = path.get( - context.attributes, - "path_with_polymorphic", - None - ) - if with_poly_info: - to_adapt = with_poly_info.entity - else: - to_adapt = orm_util.AliasedClass( - self.mapper, - flat=True, - use_mapper_path=True) - clauses = orm_util.ORMAdapter( - to_adapt, - equivalents=self.mapper._equivalent_columns, - adapt_required=True, allow_label_resolve=False, - anonymize_labels=True) - assert clauses.aliased_class is not None - - if self.parent_property.uselist: - context.multi_row_eager_loaders = True - - innerjoin = ( - loadopt.local_opts.get( - 'innerjoin', self.parent_property.innerjoin) - if loadopt is not None - else self.parent_property.innerjoin - ) - - if not innerjoin: - # if this is an outer join, all non-nested eager joins from - # this path must also be outer joins - chained_from_outerjoin = True - - context.create_eager_joins.append( - ( - self._create_eager_join, context, - entity, path, adapter, - parentmapper, clauses, innerjoin, chained_from_outerjoin - ) - ) - - add_to_collection = context.secondary_columns - path.set(context.attributes, "eager_row_processor", clauses) - - return clauses, adapter, add_to_collection, chained_from_outerjoin - - def _create_eager_join( - self, context, entity, - path, adapter, parentmapper, - clauses, innerjoin, chained_from_outerjoin): - - if parentmapper is None: - localparent = entity.mapper - else: - localparent = parentmapper - - # whether or not the Query will wrap the selectable in a subquery, - # and then attach eager load joins to that (i.e., in the case of - # LIMIT/OFFSET etc.) - should_nest_selectable = context.multi_row_eager_loaders and \ - context.query._should_nest_selectable - - entity_key = None - - if entity not in context.eager_joins and \ - not should_nest_selectable and \ - context.from_clause: - index, clause = sql_util.find_join_source( - context.from_clause, entity.selectable) - if clause is not None: - # join to an existing FROM clause on the query. - # key it to its list index in the eager_joins dict. - # Query._compile_context will adapt as needed and - # append to the FROM clause of the select(). - entity_key, default_towrap = index, clause - - if entity_key is None: - entity_key, default_towrap = entity, entity.selectable - - towrap = context.eager_joins.setdefault(entity_key, default_towrap) - - if adapter: - if getattr(adapter, 'aliased_class', None): - onclause = getattr( - adapter.aliased_class, self.key, - self.parent_property) - else: - onclause = getattr( - orm_util.AliasedClass( - self.parent, - adapter.selectable, - use_mapper_path=True - ), - self.key, self.parent_property - ) - - else: - onclause = self.parent_property - - assert clauses.aliased_class is not None - - attach_on_outside = ( - not chained_from_outerjoin or - not innerjoin or innerjoin == 'unnested') - - if attach_on_outside: - # this is the "classic" eager join case. - eagerjoin = orm_util._ORMJoin( - towrap, - clauses.aliased_class, - onclause, - isouter=not innerjoin or ( - chained_from_outerjoin and isinstance(towrap, sql.Join) - ), _left_memo=self.parent, _right_memo=self.mapper - ) - else: - # all other cases are innerjoin=='nested' approach - eagerjoin = self._splice_nested_inner_join( - path, towrap, clauses, onclause) - - context.eager_joins[entity_key] = eagerjoin - - # send a hint to the Query as to where it may "splice" this join - eagerjoin.stop_on = entity.selectable - - if self.parent_property.secondary is None and \ - not parentmapper: - # for parentclause that is the non-eager end of the join, - # ensure all the parent cols in the primaryjoin are actually - # in the - # columns clause (i.e. are not deferred), so that aliasing applied - # by the Query propagates those columns outward. - # This has the effect - # of "undefering" those columns. - for col in sql_util._find_columns( - self.parent_property.primaryjoin): - if localparent.mapped_table.c.contains_column(col): - if adapter: - col = adapter.columns[col] - context.primary_columns.append(col) - - if self.parent_property.order_by: - context.eager_order_by += eagerjoin._target_adapter.\ - copy_and_process( - util.to_list( - self.parent_property.order_by - ) - ) - - def _splice_nested_inner_join( - self, path, join_obj, clauses, onclause, splicing=False): - - if splicing is False: - # first call is always handed a join object - # from the outside - assert isinstance(join_obj, orm_util._ORMJoin) - elif isinstance(join_obj, sql.selectable.FromGrouping): - return self._splice_nested_inner_join( - path, join_obj.element, clauses, onclause, splicing - ) - elif not isinstance(join_obj, orm_util._ORMJoin): - if path[-2] is splicing: - return orm_util._ORMJoin( - join_obj, clauses.aliased_class, - onclause, isouter=False, - _left_memo=splicing, - _right_memo=path[-1].mapper - ) - else: - # only here if splicing == True - return None - - target_join = self._splice_nested_inner_join( - path, join_obj.right, clauses, - onclause, join_obj._right_memo) - if target_join is None: - right_splice = False - target_join = self._splice_nested_inner_join( - path, join_obj.left, clauses, - onclause, join_obj._left_memo) - if target_join is None: - # should only return None when recursively called, - # e.g. splicing==True - assert splicing is not False, \ - "assertion failed attempting to produce joined eager loads" - return None - else: - right_splice = True - - if right_splice: - # for a right splice, attempt to flatten out - # a JOIN b JOIN c JOIN .. to avoid needless - # parenthesis nesting - if not join_obj.isouter and not target_join.isouter: - eagerjoin = join_obj._splice_into_center(target_join) - else: - eagerjoin = orm_util._ORMJoin( - join_obj.left, target_join, - join_obj.onclause, isouter=join_obj.isouter, - _left_memo=join_obj._left_memo) - else: - eagerjoin = orm_util._ORMJoin( - target_join, join_obj.right, - join_obj.onclause, isouter=join_obj.isouter, - _right_memo=join_obj._right_memo) - - eagerjoin._target_adapter = target_join._target_adapter - return eagerjoin - - def _create_eager_adapter(self, context, result, adapter, path, loadopt): - user_defined_adapter = self._init_user_defined_eager_proc( - loadopt, context) if loadopt else False - - if user_defined_adapter is not False: - decorator = user_defined_adapter - # user defined eagerloads are part of the "primary" - # portion of the load. - # the adapters applied to the Query should be honored. - if context.adapter and decorator: - decorator = decorator.wrap(context.adapter) - elif context.adapter: - decorator = context.adapter - else: - decorator = path.get(context.attributes, "eager_row_processor") - if decorator is None: - return False - - if self.mapper._result_has_identity_key(result, decorator): - return decorator - else: - # no identity key - don't return a row - # processor, will cause a degrade to lazy - return False - - def create_row_processor( - self, context, path, loadopt, mapper, - result, adapter, populators): - if not self.parent.class_manager[self.key].impl.supports_population: - raise sa_exc.InvalidRequestError( - "'%s' does not support object " - "population - eager loading cannot be applied." % - self - ) - - our_path = path[self.parent_property] - - eager_adapter = self._create_eager_adapter( - context, - result, - adapter, our_path, loadopt) - - if eager_adapter is not False: - key = self.key - - _instance = loading._instance_processor( - self.mapper, - context, - result, - our_path[self.mapper], - eager_adapter) - - if not self.uselist: - self._create_scalar_loader(context, key, _instance, populators) - else: - self._create_collection_loader( - context, key, _instance, populators) - else: - self.parent_property._get_strategy_by_cls(LazyLoader).\ - create_row_processor( - context, path, loadopt, - mapper, result, adapter, populators) - - def _create_collection_loader(self, context, key, _instance, populators): - def load_collection_from_joined_new_row(state, dict_, row): - collection = attributes.init_state_collection( - state, dict_, key) - result_list = util.UniqueAppender(collection, - 'append_without_event') - context.attributes[(state, key)] = result_list - inst = _instance(row) - if inst is not None: - result_list.append(inst) - - def load_collection_from_joined_existing_row(state, dict_, row): - if (state, key) in context.attributes: - result_list = context.attributes[(state, key)] - else: - # appender_key can be absent from context.attributes - # with isnew=False when self-referential eager loading - # is used; the same instance may be present in two - # distinct sets of result columns - collection = attributes.init_state_collection( - state, dict_, key) - result_list = util.UniqueAppender( - collection, - 'append_without_event') - context.attributes[(state, key)] = result_list - inst = _instance(row) - if inst is not None: - result_list.append(inst) - - def load_collection_from_joined_exec(state, dict_, row): - _instance(row) - - populators["new"].append((self.key, load_collection_from_joined_new_row)) - populators["existing"].append( - (self.key, load_collection_from_joined_existing_row)) - if context.invoke_all_eagers: - populators["eager"].append( - (self.key, load_collection_from_joined_exec)) - - def _create_scalar_loader(self, context, key, _instance, populators): - def load_scalar_from_joined_new_row(state, dict_, row): - # set a scalar object instance directly on the parent - # object, bypassing InstrumentedAttribute event handlers. - dict_[key] = _instance(row) - - def load_scalar_from_joined_existing_row(state, dict_, row): - # call _instance on the row, even though the object has - # been created, so that we further descend into properties - existing = _instance(row) - if existing is not None \ - and key in dict_ \ - and existing is not dict_[key]: - util.warn( - "Multiple rows returned with " - "uselist=False for eagerly-loaded attribute '%s' " - % self) - - def load_scalar_from_joined_exec(state, dict_, row): - _instance(row) - - populators["new"].append((self.key, load_scalar_from_joined_new_row)) - populators["existing"].append( - (self.key, load_scalar_from_joined_existing_row)) - if context.invoke_all_eagers: - populators["eager"].append((self.key, load_scalar_from_joined_exec)) - - -def single_parent_validator(desc, prop): - def _do_check(state, value, oldvalue, initiator): - if value is not None and initiator.key == prop.key: - hasparent = initiator.hasparent(attributes.instance_state(value)) - if hasparent and oldvalue is not value: - raise sa_exc.InvalidRequestError( - "Instance %s is already associated with an instance " - "of %s via its %s attribute, and is only allowed a " - "single parent." % - (orm_util.instance_str(value), state.class_, prop) - ) - return value - - def append(state, value, initiator): - return _do_check(state, value, None, initiator) - - def set_(state, value, oldvalue, initiator): - return _do_check(state, value, oldvalue, initiator) - - event.listen( - desc, 'append', append, raw=True, retval=True, - active_history=True) - event.listen( - desc, 'set', set_, raw=True, retval=True, - active_history=True) diff --git a/python/sqlalchemy/orm/strategy_options.py b/python/sqlalchemy/orm/strategy_options.py deleted file mode 100644 index 3467328e..00000000 --- a/python/sqlalchemy/orm/strategy_options.py +++ /dev/null @@ -1,1037 +0,0 @@ -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" - -""" - -from .interfaces import MapperOption, PropComparator -from .. import util -from ..sql.base import _generative, Generative -from .. import exc as sa_exc, inspect -from .base import _is_aliased_class, _class_to_mapper -from . import util as orm_util -from .path_registry import PathRegistry, TokenRegistry, \ - _WILDCARD_TOKEN, _DEFAULT_TOKEN - - -class Load(Generative, MapperOption): - """Represents loader options which modify the state of a - :class:`.Query` in order to affect how various mapped attributes are - loaded. - - .. versionadded:: 0.9.0 The :meth:`.Load` system is a new foundation for - the existing system of loader options, including options such as - :func:`.orm.joinedload`, :func:`.orm.defer`, and others. In - particular, it introduces a new method-chained system that replaces the - need for dot-separated paths as well as "_all()" options such as - :func:`.orm.joinedload_all`. - - A :class:`.Load` object can be used directly or indirectly. To use one - directly, instantiate given the parent class. This style of usage is - useful when dealing with a :class:`.Query` that has multiple entities, - or when producing a loader option that can be applied generically to - any style of query:: - - myopt = Load(MyClass).joinedload("widgets") - - The above ``myopt`` can now be used with :meth:`.Query.options`:: - - session.query(MyClass).options(myopt) - - The :class:`.Load` construct is invoked indirectly whenever one makes use - of the various loader options that are present in ``sqlalchemy.orm``, - including options such as :func:`.orm.joinedload`, :func:`.orm.defer`, - :func:`.orm.subqueryload`, and all the rest. These constructs produce an - "anonymous" form of the :class:`.Load` object which tracks attributes and - options, but is not linked to a parent class until it is associated with a - parent :class:`.Query`:: - - # produce "unbound" Load object - myopt = joinedload("widgets") - - # when applied using options(), the option is "bound" to the - # class observed in the given query, e.g. MyClass - session.query(MyClass).options(myopt) - - Whether the direct or indirect style is used, the :class:`.Load` object - returned now represents a specific "path" along the entities of a - :class:`.Query`. This path can be traversed using a standard - method-chaining approach. Supposing a class hierarchy such as ``User``, - ``User.addresses -> Address``, ``User.orders -> Order`` and - ``Order.items -> Item``, we can specify a variety of loader options along - each element in the "path":: - - session.query(User).options( - joinedload("addresses"), - subqueryload("orders").joinedload("items") - ) - - Where above, the ``addresses`` collection will be joined-loaded, the - ``orders`` collection will be subquery-loaded, and within that subquery - load the ``items`` collection will be joined-loaded. - - - """ - - def __init__(self, entity): - insp = inspect(entity) - self.path = insp._path_registry - self.context = {} - self.local_opts = {} - - def _generate(self): - cloned = super(Load, self)._generate() - cloned.local_opts = {} - return cloned - - strategy = None - propagate_to_loaders = False - - def process_query(self, query): - self._process(query, True) - - def process_query_conditionally(self, query): - self._process(query, False) - - def _process(self, query, raiseerr): - current_path = query._current_path - if current_path: - for (token, start_path), loader in self.context.items(): - chopped_start_path = self._chop_path(start_path, current_path) - if chopped_start_path is not None: - query._attributes[(token, chopped_start_path)] = loader - else: - query._attributes.update(self.context) - - def _generate_path(self, path, attr, wildcard_key, raiseerr=True): - if raiseerr and not path.has_entity: - if isinstance(path, TokenRegistry): - raise sa_exc.ArgumentError( - "Wildcard token cannot be followed by another entity") - else: - raise sa_exc.ArgumentError( - "Attribute '%s' of entity '%s' does not " - "refer to a mapped entity" % - (path.prop.key, path.parent.entity) - ) - - if isinstance(attr, util.string_types): - default_token = attr.endswith(_DEFAULT_TOKEN) - if attr.endswith(_WILDCARD_TOKEN) or default_token: - if default_token: - self.propagate_to_loaders = False - if wildcard_key: - attr = "%s:%s" % (wildcard_key, attr) - return path.token(attr) - - try: - # use getattr on the class to work around - # synonyms, hybrids, etc. - attr = getattr(path.entity.class_, attr) - except AttributeError: - if raiseerr: - raise sa_exc.ArgumentError( - "Can't find property named '%s' on the " - "mapped entity %s in this Query. " % ( - attr, path.entity) - ) - else: - return None - else: - attr = attr.property - - path = path[attr] - else: - prop = attr.property - - if not prop.parent.common_parent(path.mapper): - if raiseerr: - raise sa_exc.ArgumentError( - "Attribute '%s' does not " - "link from element '%s'" % (attr, path.entity)) - else: - return None - - if getattr(attr, '_of_type', None): - ac = attr._of_type - ext_info = inspect(ac) - - path_element = ext_info.mapper - existing = path.entity_path[prop].get( - self.context, "path_with_polymorphic") - if not ext_info.is_aliased_class: - ac = orm_util.with_polymorphic( - ext_info.mapper.base_mapper, - ext_info.mapper, aliased=True, - _use_mapper_path=True, - _existing_alias=existing) - path.entity_path[prop].set( - self.context, "path_with_polymorphic", inspect(ac)) - path = path[prop][path_element] - else: - path = path[prop] - - if path.has_entity: - path = path.entity_path - return path - - def __str__(self): - return "Load(strategy=%r)" % (self.strategy, ) - - def _coerce_strat(self, strategy): - if strategy is not None: - strategy = tuple(sorted(strategy.items())) - return strategy - - @_generative - def set_relationship_strategy( - self, attr, strategy, propagate_to_loaders=True): - strategy = self._coerce_strat(strategy) - - self.propagate_to_loaders = propagate_to_loaders - # if the path is a wildcard, this will set propagate_to_loaders=False - self.path = self._generate_path(self.path, attr, "relationship") - self.strategy = strategy - if strategy is not None: - self._set_path_strategy() - - @_generative - def set_column_strategy(self, attrs, strategy, opts=None): - strategy = self._coerce_strat(strategy) - - for attr in attrs: - path = self._generate_path(self.path, attr, "column") - cloned = self._generate() - cloned.strategy = strategy - cloned.path = path - cloned.propagate_to_loaders = True - if opts: - cloned.local_opts.update(opts) - cloned._set_path_strategy() - - def _set_path_strategy(self): - if self.path.has_entity: - self.path.parent.set(self.context, "loader", self) - else: - self.path.set(self.context, "loader", self) - - def __getstate__(self): - d = self.__dict__.copy() - d["path"] = self.path.serialize() - return d - - def __setstate__(self, state): - self.__dict__.update(state) - self.path = PathRegistry.deserialize(self.path) - - def _chop_path(self, to_chop, path): - i = -1 - - for i, (c_token, p_token) in enumerate(zip(to_chop, path.path)): - if isinstance(c_token, util.string_types): - # TODO: this is approximated from the _UnboundLoad - # version and probably has issues, not fully covered. - - if i == 0 and c_token.endswith(':' + _DEFAULT_TOKEN): - return to_chop - elif c_token != 'relationship:%s' % (_WILDCARD_TOKEN,) and \ - c_token != p_token.key: - return None - - if c_token is p_token: - continue - else: - return None - return to_chop[i + 1:] - - -class _UnboundLoad(Load): - """Represent a loader option that isn't tied to a root entity. - - The loader option will produce an entity-linked :class:`.Load` - object when it is passed :meth:`.Query.options`. - - This provides compatibility with the traditional system - of freestanding options, e.g. ``joinedload('x.y.z')``. - - """ - - def __init__(self): - self.path = () - self._to_bind = set() - self.local_opts = {} - - _is_chain_link = False - - def _set_path_strategy(self): - self._to_bind.add(self) - - def _generate_path(self, path, attr, wildcard_key): - if wildcard_key and isinstance(attr, util.string_types) and \ - attr in (_WILDCARD_TOKEN, _DEFAULT_TOKEN): - if attr == _DEFAULT_TOKEN: - self.propagate_to_loaders = False - attr = "%s:%s" % (wildcard_key, attr) - - return path + (attr, ) - - def __getstate__(self): - d = self.__dict__.copy() - d['path'] = ret = [] - for token in util.to_list(self.path): - if isinstance(token, PropComparator): - ret.append((token._parentmapper.class_, token.key)) - else: - ret.append(token) - return d - - def __setstate__(self, state): - ret = [] - for key in state['path']: - if isinstance(key, tuple): - cls, propkey = key - ret.append(getattr(cls, propkey)) - else: - ret.append(key) - state['path'] = tuple(ret) - self.__dict__ = state - - def _process(self, query, raiseerr): - for val in self._to_bind: - val._bind_loader(query, query._attributes, raiseerr) - - @classmethod - def _from_keys(self, meth, keys, chained, kw): - opt = _UnboundLoad() - - def _split_key(key): - if isinstance(key, util.string_types): - # coerce fooload('*') into "default loader strategy" - if key == _WILDCARD_TOKEN: - return (_DEFAULT_TOKEN, ) - # coerce fooload(".*") into "wildcard on default entity" - elif key.startswith("." + _WILDCARD_TOKEN): - key = key[1:] - return key.split(".") - else: - return (key,) - all_tokens = [token for key in keys for token in _split_key(key)] - - for token in all_tokens[0:-1]: - if chained: - opt = meth(opt, token, **kw) - else: - opt = opt.defaultload(token) - opt._is_chain_link = True - - opt = meth(opt, all_tokens[-1], **kw) - opt._is_chain_link = False - - return opt - - def _chop_path(self, to_chop, path): - i = -1 - for i, (c_token, (p_mapper, p_prop)) in enumerate( - zip(to_chop, path.pairs())): - if isinstance(c_token, util.string_types): - if i == 0 and c_token.endswith(':' + _DEFAULT_TOKEN): - return to_chop - elif c_token != 'relationship:%s' % ( - _WILDCARD_TOKEN,) and c_token != p_prop.key: - return None - elif isinstance(c_token, PropComparator): - if c_token.property is not p_prop: - return None - else: - i += 1 - - return to_chop[i:] - - def _bind_loader(self, query, context, raiseerr): - start_path = self.path - # _current_path implies we're in a - # secondary load with an existing path - - current_path = query._current_path - if current_path: - start_path = self._chop_path(start_path, current_path) - - if not start_path: - return None - - token = start_path[0] - - if isinstance(token, util.string_types): - entity = self._find_entity_basestring(query, token, raiseerr) - elif isinstance(token, PropComparator): - prop = token.property - entity = self._find_entity_prop_comparator( - query, - prop.key, - token._parententity, - raiseerr) - - else: - raise sa_exc.ArgumentError( - "mapper option expects " - "string key or list of attributes") - - if not entity: - return - - path_element = entity.entity_zero - - # transfer our entity-less state into a Load() object - # with a real entity path. - loader = Load(path_element) - loader.context = context - loader.strategy = self.strategy - - path = loader.path - for token in start_path: - loader.path = path = loader._generate_path( - loader.path, token, None, raiseerr) - if path is None: - return - - loader.local_opts.update(self.local_opts) - - if loader.path.has_entity: - effective_path = loader.path.parent - else: - effective_path = loader.path - - # prioritize "first class" options over those - # that were "links in the chain", e.g. "x" and "y" in - # someload("x.y.z") versus someload("x") / someload("x.y") - - if effective_path.is_token: - for path in effective_path.generate_for_superclasses(): - if self._is_chain_link: - path.setdefault(context, "loader", loader) - else: - path.set(context, "loader", loader) - else: - if self._is_chain_link: - effective_path.setdefault(context, "loader", loader) - else: - effective_path.set(context, "loader", loader) - - def _find_entity_prop_comparator(self, query, token, mapper, raiseerr): - if _is_aliased_class(mapper): - searchfor = mapper - else: - searchfor = _class_to_mapper(mapper) - for ent in query._mapper_entities: - if ent.corresponds_to(searchfor): - return ent - else: - if raiseerr: - if not list(query._mapper_entities): - raise sa_exc.ArgumentError( - "Query has only expression-based entities - " - "can't find property named '%s'." - % (token, ) - ) - else: - raise sa_exc.ArgumentError( - "Can't find property '%s' on any entity " - "specified in this Query. Note the full path " - "from root (%s) to target entity must be specified." - % (token, ",".join(str(x) for - x in query._mapper_entities)) - ) - else: - return None - - def _find_entity_basestring(self, query, token, raiseerr): - if token.endswith(':' + _WILDCARD_TOKEN): - if len(list(query._mapper_entities)) != 1: - if raiseerr: - raise sa_exc.ArgumentError( - "Wildcard loader can only be used with exactly " - "one entity. Use Load(ent) to specify " - "specific entities.") - elif token.endswith(_DEFAULT_TOKEN): - raiseerr = False - - for ent in query._mapper_entities: - # return only the first _MapperEntity when searching - # based on string prop name. Ideally object - # attributes are used to specify more exactly. - return ent - else: - if raiseerr: - raise sa_exc.ArgumentError( - "Query has only expression-based entities - " - "can't find property named '%s'." - % (token, ) - ) - else: - return None - - -class loader_option(object): - def __init__(self): - pass - - def __call__(self, fn): - self.name = name = fn.__name__ - self.fn = fn - if hasattr(Load, name): - raise TypeError("Load class already has a %s method." % (name)) - setattr(Load, name, fn) - - return self - - def _add_unbound_fn(self, fn): - self._unbound_fn = fn - fn_doc = self.fn.__doc__ - self.fn.__doc__ = """Produce a new :class:`.Load` object with the -:func:`.orm.%(name)s` option applied. - -See :func:`.orm.%(name)s` for usage examples. - -""" % {"name": self.name} - - fn.__doc__ = fn_doc - return self - - def _add_unbound_all_fn(self, fn): - self._unbound_all_fn = fn - fn.__doc__ = """Produce a standalone "all" option for :func:`.orm.%(name)s`. - -.. deprecated:: 0.9.0 - - The "_all()" style is replaced by method chaining, e.g.:: - - session.query(MyClass).options( - %(name)s("someattribute").%(name)s("anotherattribute") - ) - -""" % {"name": self.name} - return self - - -@loader_option() -def contains_eager(loadopt, attr, alias=None): - """Indicate that the given attribute should be eagerly loaded from - columns stated manually in the query. - - This function is part of the :class:`.Load` interface and supports - both method-chained and standalone operation. - - The option is used in conjunction with an explicit join that loads - the desired rows, i.e.:: - - sess.query(Order).\\ - join(Order.user).\\ - options(contains_eager(Order.user)) - - The above query would join from the ``Order`` entity to its related - ``User`` entity, and the returned ``Order`` objects would have the - ``Order.user`` attribute pre-populated. - - :func:`contains_eager` also accepts an `alias` argument, which is the - string name of an alias, an :func:`~sqlalchemy.sql.expression.alias` - construct, or an :func:`~sqlalchemy.orm.aliased` construct. Use this when - the eagerly-loaded rows are to come from an aliased table:: - - user_alias = aliased(User) - sess.query(Order).\\ - join((user_alias, Order.user)).\\ - options(contains_eager(Order.user, alias=user_alias)) - - .. seealso:: - - :ref:`contains_eager` - - """ - if alias is not None: - if not isinstance(alias, str): - info = inspect(alias) - alias = info.selectable - - cloned = loadopt.set_relationship_strategy( - attr, - {"lazy": "joined"}, - propagate_to_loaders=False - ) - cloned.local_opts['eager_from_alias'] = alias - return cloned - - -@contains_eager._add_unbound_fn -def contains_eager(*keys, **kw): - return _UnboundLoad()._from_keys( - _UnboundLoad.contains_eager, keys, True, kw) - - -@loader_option() -def load_only(loadopt, *attrs): - """Indicate that for a particular entity, only the given list - of column-based attribute names should be loaded; all others will be - deferred. - - This function is part of the :class:`.Load` interface and supports - both method-chained and standalone operation. - - Example - given a class ``User``, load only the ``name`` and ``fullname`` - attributes:: - - session.query(User).options(load_only("name", "fullname")) - - Example - given a relationship ``User.addresses -> Address``, specify - subquery loading for the ``User.addresses`` collection, but on each - ``Address`` object load only the ``email_address`` attribute:: - - session.query(User).options( - subqueryload("addreses").load_only("email_address") - ) - - For a :class:`.Query` that has multiple entities, the lead entity can be - specifically referred to using the :class:`.Load` constructor:: - - session.query(User, Address).join(User.addresses).options( - Load(User).load_only("name", "fullname"), - Load(Address).load_only("email_addres") - ) - - - .. versionadded:: 0.9.0 - - """ - cloned = loadopt.set_column_strategy( - attrs, - {"deferred": False, "instrument": True} - ) - cloned.set_column_strategy("*", - {"deferred": True, "instrument": True}, - {"undefer_pks": True}) - return cloned - - -@load_only._add_unbound_fn -def load_only(*attrs): - return _UnboundLoad().load_only(*attrs) - - -@loader_option() -def joinedload(loadopt, attr, innerjoin=None): - """Indicate that the given attribute should be loaded using joined - eager loading. - - This function is part of the :class:`.Load` interface and supports - both method-chained and standalone operation. - - examples:: - - # joined-load the "orders" collection on "User" - query(User).options(joinedload(User.orders)) - - # joined-load Order.items and then Item.keywords - query(Order).options(joinedload(Order.items).joinedload(Item.keywords)) - - # lazily load Order.items, but when Items are loaded, - # joined-load the keywords collection - query(Order).options(lazyload(Order.items).joinedload(Item.keywords)) - - :param innerjoin: if ``True``, indicates that the joined eager load should - use an inner join instead of the default of left outer join:: - - query(Order).options(joinedload(Order.user, innerjoin=True)) - - In order to chain multiple eager joins together where some may be - OUTER and others INNER, right-nested joins are used to link them:: - - query(A).options( - joinedload(A.bs, innerjoin=False). - joinedload(B.cs, innerjoin=True) - ) - - The above query, linking A.bs via "outer" join and B.cs via "inner" join - would render the joins as "a LEFT OUTER JOIN (b JOIN c)". When using - SQLite, this form of JOIN is translated to use full subqueries as this - syntax is otherwise not directly supported. - - The ``innerjoin`` flag can also be stated with the term ``"unnested"``. - This will prevent joins from being right-nested, and will instead - link an "innerjoin" eagerload to an "outerjoin" eagerload by bypassing - the "inner" join. Using this form as follows:: - - query(A).options( - joinedload(A.bs, innerjoin=False). - joinedload(B.cs, innerjoin="unnested") - ) - - Joins will be rendered as "a LEFT OUTER JOIN b LEFT OUTER JOIN c", so that - all of "a" is matched rather than being incorrectly limited by a "b" that - does not contain a "c". - - .. note:: The "unnested" flag does **not** affect the JOIN rendered - from a many-to-many association table, e.g. a table configured - as :paramref:`.relationship.secondary`, to the target table; for - correctness of results, these joins are always INNER and are - therefore right-nested if linked to an OUTER join. - - .. versionadded:: 0.9.4 Added support for "nesting" of eager "inner" - joins. See :ref:`feature_2976`. - - .. versionchanged:: 1.0.0 ``innerjoin=True`` now implies - ``innerjoin="nested"``, whereas in 0.9 it implied - ``innerjoin="unnested"``. In order to achieve the pre-1.0 "unnested" - inner join behavior, use the value ``innerjoin="unnested"``. - See :ref:`migration_3008`. - - .. note:: - - The joins produced by :func:`.orm.joinedload` are **anonymously - aliased**. The criteria by which the join proceeds cannot be - modified, nor can the :class:`.Query` refer to these joins in any way, - including ordering. - - To produce a specific SQL JOIN which is explicitly available, use - :meth:`.Query.join`. To combine explicit JOINs with eager loading - of collections, use :func:`.orm.contains_eager`; see - :ref:`contains_eager`. - - .. seealso:: - - :ref:`loading_toplevel` - - :ref:`contains_eager` - - :func:`.orm.subqueryload` - - :func:`.orm.lazyload` - - :paramref:`.relationship.lazy` - - :paramref:`.relationship.innerjoin` - :func:`.relationship`-level - version of the :paramref:`.joinedload.innerjoin` option. - - """ - loader = loadopt.set_relationship_strategy(attr, {"lazy": "joined"}) - if innerjoin is not None: - loader.local_opts['innerjoin'] = innerjoin - return loader - - -@joinedload._add_unbound_fn -def joinedload(*keys, **kw): - return _UnboundLoad._from_keys( - _UnboundLoad.joinedload, keys, False, kw) - - -@joinedload._add_unbound_all_fn -def joinedload_all(*keys, **kw): - return _UnboundLoad._from_keys( - _UnboundLoad.joinedload, keys, True, kw) - - -@loader_option() -def subqueryload(loadopt, attr): - """Indicate that the given attribute should be loaded using - subquery eager loading. - - This function is part of the :class:`.Load` interface and supports - both method-chained and standalone operation. - - examples:: - - # subquery-load the "orders" collection on "User" - query(User).options(subqueryload(User.orders)) - - # subquery-load Order.items and then Item.keywords - query(Order).options(subqueryload(Order.items).subqueryload(Item.keywords)) - - # lazily load Order.items, but when Items are loaded, - # subquery-load the keywords collection - query(Order).options(lazyload(Order.items).subqueryload(Item.keywords)) - - - .. seealso:: - - :ref:`loading_toplevel` - - :func:`.orm.joinedload` - - :func:`.orm.lazyload` - - :paramref:`.relationship.lazy` - - """ - return loadopt.set_relationship_strategy(attr, {"lazy": "subquery"}) - - -@subqueryload._add_unbound_fn -def subqueryload(*keys): - return _UnboundLoad._from_keys(_UnboundLoad.subqueryload, keys, False, {}) - - -@subqueryload._add_unbound_all_fn -def subqueryload_all(*keys): - return _UnboundLoad._from_keys(_UnboundLoad.subqueryload, keys, True, {}) - - -@loader_option() -def lazyload(loadopt, attr): - """Indicate that the given attribute should be loaded using "lazy" - loading. - - This function is part of the :class:`.Load` interface and supports - both method-chained and standalone operation. - - .. seealso:: - - :paramref:`.relationship.lazy` - - """ - return loadopt.set_relationship_strategy(attr, {"lazy": "select"}) - - -@lazyload._add_unbound_fn -def lazyload(*keys): - return _UnboundLoad._from_keys(_UnboundLoad.lazyload, keys, False, {}) - - -@lazyload._add_unbound_all_fn -def lazyload_all(*keys): - return _UnboundLoad._from_keys(_UnboundLoad.lazyload, keys, True, {}) - - -@loader_option() -def immediateload(loadopt, attr): - """Indicate that the given attribute should be loaded using - an immediate load with a per-attribute SELECT statement. - - This function is part of the :class:`.Load` interface and supports - both method-chained and standalone operation. - - .. seealso:: - - :ref:`loading_toplevel` - - :func:`.orm.joinedload` - - :func:`.orm.lazyload` - - :paramref:`.relationship.lazy` - - """ - loader = loadopt.set_relationship_strategy(attr, {"lazy": "immediate"}) - return loader - - -@immediateload._add_unbound_fn -def immediateload(*keys): - return _UnboundLoad._from_keys( - _UnboundLoad.immediateload, keys, False, {}) - - -@loader_option() -def noload(loadopt, attr): - """Indicate that the given relationship attribute should remain unloaded. - - This function is part of the :class:`.Load` interface and supports - both method-chained and standalone operation. - - :func:`.orm.noload` applies to :func:`.relationship` attributes; for - column-based attributes, see :func:`.orm.defer`. - - """ - - return loadopt.set_relationship_strategy(attr, {"lazy": "noload"}) - - -@noload._add_unbound_fn -def noload(*keys): - return _UnboundLoad._from_keys(_UnboundLoad.noload, keys, False, {}) - - -@loader_option() -def defaultload(loadopt, attr): - """Indicate an attribute should load using its default loader style. - - This method is used to link to other loader options, such as - to set the :func:`.orm.defer` option on a class that is linked to - a relationship of the parent class being loaded, :func:`.orm.defaultload` - can be used to navigate this path without changing the loading style - of the relationship:: - - session.query(MyClass).options(defaultload("someattr").defer("some_column")) - - .. seealso:: - - :func:`.orm.defer` - - :func:`.orm.undefer` - - """ - return loadopt.set_relationship_strategy( - attr, - None - ) - - -@defaultload._add_unbound_fn -def defaultload(*keys): - return _UnboundLoad._from_keys(_UnboundLoad.defaultload, keys, False, {}) - - -@loader_option() -def defer(loadopt, key): - """Indicate that the given column-oriented attribute should be deferred, e.g. - not loaded until accessed. - - This function is part of the :class:`.Load` interface and supports - both method-chained and standalone operation. - - e.g.:: - - from sqlalchemy.orm import defer - - session.query(MyClass).options( - defer("attribute_one"), - defer("attribute_two")) - - session.query(MyClass).options( - defer(MyClass.attribute_one), - defer(MyClass.attribute_two)) - - To specify a deferred load of an attribute on a related class, - the path can be specified one token at a time, specifying the loading - style for each link along the chain. To leave the loading style - for a link unchanged, use :func:`.orm.defaultload`:: - - session.query(MyClass).options(defaultload("someattr").defer("some_column")) - - A :class:`.Load` object that is present on a certain path can have - :meth:`.Load.defer` called multiple times, each will operate on the same - parent entity:: - - - session.query(MyClass).options( - defaultload("someattr"). - defer("some_column"). - defer("some_other_column"). - defer("another_column") - ) - - :param key: Attribute to be deferred. - - :param \*addl_attrs: Deprecated; this option supports the old 0.8 style - of specifying a path as a series of attributes, which is now superseded - by the method-chained style. - - .. seealso:: - - :ref:`deferred` - - :func:`.orm.undefer` - - """ - return loadopt.set_column_strategy( - (key, ), - {"deferred": True, "instrument": True} - ) - - -@defer._add_unbound_fn -def defer(key, *addl_attrs): - return _UnboundLoad._from_keys( - _UnboundLoad.defer, (key, ) + addl_attrs, False, {}) - - -@loader_option() -def undefer(loadopt, key): - """Indicate that the given column-oriented attribute should be undeferred, - e.g. specified within the SELECT statement of the entity as a whole. - - The column being undeferred is typically set up on the mapping as a - :func:`.deferred` attribute. - - This function is part of the :class:`.Load` interface and supports - both method-chained and standalone operation. - - Examples:: - - # undefer two columns - session.query(MyClass).options(undefer("col1"), undefer("col2")) - - # undefer all columns specific to a single class using Load + * - session.query(MyClass, MyOtherClass).options( - Load(MyClass).undefer("*")) - - :param key: Attribute to be undeferred. - - :param \*addl_attrs: Deprecated; this option supports the old 0.8 style - of specifying a path as a series of attributes, which is now superseded - by the method-chained style. - - .. seealso:: - - :ref:`deferred` - - :func:`.orm.defer` - - :func:`.orm.undefer_group` - - """ - return loadopt.set_column_strategy( - (key, ), - {"deferred": False, "instrument": True} - ) - - -@undefer._add_unbound_fn -def undefer(key, *addl_attrs): - return _UnboundLoad._from_keys( - _UnboundLoad.undefer, (key, ) + addl_attrs, False, {}) - - -@loader_option() -def undefer_group(loadopt, name): - """Indicate that columns within the given deferred group name should be - undeferred. - - The columns being undeferred are set up on the mapping as - :func:`.deferred` attributes and include a "group" name. - - E.g:: - - session.query(MyClass).options(undefer_group("large_attrs")) - - To undefer a group of attributes on a related entity, the path can be - spelled out using relationship loader options, such as - :func:`.orm.defaultload`:: - - session.query(MyClass).options( - defaultload("someattr").undefer_group("large_attrs")) - - .. versionchanged:: 0.9.0 :func:`.orm.undefer_group` is now specific to a - particiular entity load path. - - .. seealso:: - - :ref:`deferred` - - :func:`.orm.defer` - - :func:`.orm.undefer` - - """ - return loadopt.set_column_strategy( - "*", - None, - {"undefer_group": name} - ) - - -@undefer_group._add_unbound_fn -def undefer_group(name): - return _UnboundLoad().undefer_group(name) diff --git a/python/sqlalchemy/orm/sync.py b/python/sqlalchemy/orm/sync.py deleted file mode 100644 index e8e273a8..00000000 --- a/python/sqlalchemy/orm/sync.py +++ /dev/null @@ -1,140 +0,0 @@ -# orm/sync.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""private module containing functions used for copying data -between instances based on join conditions. - -""" - -from . import exc, util as orm_util, attributes - - -def populate(source, source_mapper, dest, dest_mapper, - synchronize_pairs, uowcommit, flag_cascaded_pks): - source_dict = source.dict - dest_dict = dest.dict - - for l, r in synchronize_pairs: - try: - # inline of source_mapper._get_state_attr_by_column - prop = source_mapper._columntoproperty[l] - value = source.manager[prop.key].impl.get(source, source_dict, - attributes.PASSIVE_OFF) - except exc.UnmappedColumnError: - _raise_col_to_prop(False, source_mapper, l, dest_mapper, r) - - try: - # inline of dest_mapper._set_state_attr_by_column - prop = dest_mapper._columntoproperty[r] - dest.manager[prop.key].impl.set(dest, dest_dict, value, None) - except exc.UnmappedColumnError: - _raise_col_to_prop(True, source_mapper, l, dest_mapper, r) - - # technically the "r.primary_key" check isn't - # needed here, but we check for this condition to limit - # how often this logic is invoked for memory/performance - # reasons, since we only need this info for a primary key - # destination. - if flag_cascaded_pks and l.primary_key and \ - r.primary_key and \ - r.references(l): - uowcommit.attributes[("pk_cascaded", dest, r)] = True - - -def bulk_populate_inherit_keys( - source_dict, source_mapper, synchronize_pairs): - # a simplified version of populate() used by bulk insert mode - for l, r in synchronize_pairs: - try: - prop = source_mapper._columntoproperty[l] - value = source_dict[prop.key] - except exc.UnmappedColumnError: - _raise_col_to_prop(False, source_mapper, l, source_mapper, r) - - try: - prop = source_mapper._columntoproperty[r] - source_dict[prop.key] = value - except exc.UnmappedColumnError: - _raise_col_to_prop(True, source_mapper, l, source_mapper, r) - - -def clear(dest, dest_mapper, synchronize_pairs): - for l, r in synchronize_pairs: - if r.primary_key and \ - dest_mapper._get_state_attr_by_column( - dest, dest.dict, r) not in orm_util._none_set: - - raise AssertionError( - "Dependency rule tried to blank-out primary key " - "column '%s' on instance '%s'" % - (r, orm_util.state_str(dest)) - ) - try: - dest_mapper._set_state_attr_by_column(dest, dest.dict, r, None) - except exc.UnmappedColumnError: - _raise_col_to_prop(True, None, l, dest_mapper, r) - - -def update(source, source_mapper, dest, old_prefix, synchronize_pairs): - for l, r in synchronize_pairs: - try: - oldvalue = source_mapper._get_committed_attr_by_column( - source.obj(), l) - value = source_mapper._get_state_attr_by_column( - source, source.dict, l, passive=attributes.PASSIVE_OFF) - except exc.UnmappedColumnError: - _raise_col_to_prop(False, source_mapper, l, None, r) - dest[r.key] = value - dest[old_prefix + r.key] = oldvalue - - -def populate_dict(source, source_mapper, dict_, synchronize_pairs): - for l, r in synchronize_pairs: - try: - value = source_mapper._get_state_attr_by_column( - source, source.dict, l, passive=attributes.PASSIVE_OFF) - except exc.UnmappedColumnError: - _raise_col_to_prop(False, source_mapper, l, None, r) - - dict_[r.key] = value - - -def source_modified(uowcommit, source, source_mapper, synchronize_pairs): - """return true if the source object has changes from an old to a - new value on the given synchronize pairs - - """ - for l, r in synchronize_pairs: - try: - prop = source_mapper._columntoproperty[l] - except exc.UnmappedColumnError: - _raise_col_to_prop(False, source_mapper, l, None, r) - history = uowcommit.get_attribute_history( - source, prop.key, attributes.PASSIVE_NO_INITIALIZE) - if bool(history.deleted): - return True - else: - return False - - -def _raise_col_to_prop(isdest, source_mapper, source_column, - dest_mapper, dest_column): - if isdest: - raise exc.UnmappedColumnError( - "Can't execute sync rule for " - "destination column '%s'; mapper '%s' does not map " - "this column. Try using an explicit `foreign_keys` " - "collection which does not include this column (or use " - "a viewonly=True relation)." % (dest_column, dest_mapper)) - else: - raise exc.UnmappedColumnError( - "Can't execute sync rule for " - "source column '%s'; mapper '%s' does not map this " - "column. Try using an explicit `foreign_keys` " - "collection which does not include destination column " - "'%s' (or use a viewonly=True relation)." % - (source_column, source_mapper, dest_column)) diff --git a/python/sqlalchemy/orm/unitofwork.py b/python/sqlalchemy/orm/unitofwork.py deleted file mode 100644 index 1ef0d24c..00000000 --- a/python/sqlalchemy/orm/unitofwork.py +++ /dev/null @@ -1,656 +0,0 @@ -# orm/unitofwork.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""The internals for the unit of work system. - -The session's flush() process passes objects to a contextual object -here, which assembles flush tasks based on mappers and their properties, -organizes them in order of dependency, and executes. - -""" - -from .. import util, event -from ..util import topological -from . import attributes, persistence, util as orm_util -import itertools - - -def track_cascade_events(descriptor, prop): - """Establish event listeners on object attributes which handle - cascade-on-set/append. - - """ - key = prop.key - - def append(state, item, initiator): - # process "save_update" cascade rules for when - # an instance is appended to the list of another instance - - if item is None: - return - - sess = state.session - if sess: - if sess._warn_on_events: - sess._flush_warning("collection append") - - prop = state.manager.mapper._props[key] - item_state = attributes.instance_state(item) - if prop._cascade.save_update and \ - (prop.cascade_backrefs or key == initiator.key) and \ - not sess._contains_state(item_state): - sess._save_or_update_state(item_state) - return item - - def remove(state, item, initiator): - if item is None: - return - - sess = state.session - if sess: - - prop = state.manager.mapper._props[key] - - if sess._warn_on_events: - sess._flush_warning( - "collection remove" - if prop.uselist - else "related attribute delete") - - # expunge pending orphans - item_state = attributes.instance_state(item) - if prop._cascade.delete_orphan and \ - item_state in sess._new and \ - prop.mapper._is_orphan(item_state): - sess.expunge(item) - - def set_(state, newvalue, oldvalue, initiator): - # process "save_update" cascade rules for when an instance - # is attached to another instance - if oldvalue is newvalue: - return newvalue - - sess = state.session - if sess: - - if sess._warn_on_events: - sess._flush_warning("related attribute set") - - prop = state.manager.mapper._props[key] - if newvalue is not None: - newvalue_state = attributes.instance_state(newvalue) - if prop._cascade.save_update and \ - (prop.cascade_backrefs or key == initiator.key) and \ - not sess._contains_state(newvalue_state): - sess._save_or_update_state(newvalue_state) - - if oldvalue is not None and \ - oldvalue is not attributes.NEVER_SET and \ - oldvalue is not attributes.PASSIVE_NO_RESULT and \ - prop._cascade.delete_orphan: - # possible to reach here with attributes.NEVER_SET ? - oldvalue_state = attributes.instance_state(oldvalue) - - if oldvalue_state in sess._new and \ - prop.mapper._is_orphan(oldvalue_state): - sess.expunge(oldvalue) - return newvalue - - event.listen(descriptor, 'append', append, raw=True, retval=True) - event.listen(descriptor, 'remove', remove, raw=True, retval=True) - event.listen(descriptor, 'set', set_, raw=True, retval=True) - - -class UOWTransaction(object): - def __init__(self, session): - self.session = session - - # dictionary used by external actors to - # store arbitrary state information. - self.attributes = {} - - # dictionary of mappers to sets of - # DependencyProcessors, which are also - # set to be part of the sorted flush actions, - # which have that mapper as a parent. - self.deps = util.defaultdict(set) - - # dictionary of mappers to sets of InstanceState - # items pending for flush which have that mapper - # as a parent. - self.mappers = util.defaultdict(set) - - # a dictionary of Preprocess objects, which gather - # additional states impacted by the flush - # and determine if a flush action is needed - self.presort_actions = {} - - # dictionary of PostSortRec objects, each - # one issues work during the flush within - # a certain ordering. - self.postsort_actions = {} - - # a set of 2-tuples, each containing two - # PostSortRec objects where the second - # is dependent on the first being executed - # first - self.dependencies = set() - - # dictionary of InstanceState-> (isdelete, listonly) - # tuples, indicating if this state is to be deleted - # or insert/updated, or just refreshed - self.states = {} - - # tracks InstanceStates which will be receiving - # a "post update" call. Keys are mappers, - # values are a set of states and a set of the - # columns which should be included in the update. - self.post_update_states = util.defaultdict(lambda: (set(), set())) - - @property - def has_work(self): - return bool(self.states) - - def is_deleted(self, state): - """return true if the given state is marked as deleted - within this uowtransaction.""" - - return state in self.states and self.states[state][0] - - def memo(self, key, callable_): - if key in self.attributes: - return self.attributes[key] - else: - self.attributes[key] = ret = callable_() - return ret - - def remove_state_actions(self, state): - """remove pending actions for a state from the uowtransaction.""" - - isdelete = self.states[state][0] - - self.states[state] = (isdelete, True) - - def get_attribute_history(self, state, key, - passive=attributes.PASSIVE_NO_INITIALIZE): - """facade to attributes.get_state_history(), including - caching of results.""" - - hashkey = ("history", state, key) - - # cache the objects, not the states; the strong reference here - # prevents newly loaded objects from being dereferenced during the - # flush process - - if hashkey in self.attributes: - history, state_history, cached_passive = self.attributes[hashkey] - # if the cached lookup was "passive" and now - # we want non-passive, do a non-passive lookup and re-cache - - if not cached_passive & attributes.SQL_OK \ - and passive & attributes.SQL_OK: - impl = state.manager[key].impl - history = impl.get_history(state, state.dict, - attributes.PASSIVE_OFF | - attributes.LOAD_AGAINST_COMMITTED) - if history and impl.uses_objects: - state_history = history.as_state() - else: - state_history = history - self.attributes[hashkey] = (history, state_history, passive) - else: - impl = state.manager[key].impl - # TODO: store the history as (state, object) tuples - # so we don't have to keep converting here - history = impl.get_history(state, state.dict, passive | - attributes.LOAD_AGAINST_COMMITTED) - if history and impl.uses_objects: - state_history = history.as_state() - else: - state_history = history - self.attributes[hashkey] = (history, state_history, - passive) - - return state_history - - def has_dep(self, processor): - return (processor, True) in self.presort_actions - - def register_preprocessor(self, processor, fromparent): - key = (processor, fromparent) - if key not in self.presort_actions: - self.presort_actions[key] = Preprocess(processor, fromparent) - - def register_object(self, state, isdelete=False, - listonly=False, cancel_delete=False, - operation=None, prop=None): - if not self.session._contains_state(state): - if not state.deleted and operation is not None: - util.warn("Object of type %s not in session, %s operation " - "along '%s' will not proceed" % - (orm_util.state_class_str(state), operation, prop)) - return False - - if state not in self.states: - mapper = state.manager.mapper - - if mapper not in self.mappers: - self._per_mapper_flush_actions(mapper) - - self.mappers[mapper].add(state) - self.states[state] = (isdelete, listonly) - else: - if not listonly and (isdelete or cancel_delete): - self.states[state] = (isdelete, False) - return True - - def issue_post_update(self, state, post_update_cols): - mapper = state.manager.mapper.base_mapper - states, cols = self.post_update_states[mapper] - states.add(state) - cols.update(post_update_cols) - - def _per_mapper_flush_actions(self, mapper): - saves = SaveUpdateAll(self, mapper.base_mapper) - deletes = DeleteAll(self, mapper.base_mapper) - self.dependencies.add((saves, deletes)) - - for dep in mapper._dependency_processors: - dep.per_property_preprocessors(self) - - for prop in mapper.relationships: - if prop.viewonly: - continue - dep = prop._dependency_processor - dep.per_property_preprocessors(self) - - @util.memoized_property - def _mapper_for_dep(self): - """return a dynamic mapping of (Mapper, DependencyProcessor) to - True or False, indicating if the DependencyProcessor operates - on objects of that Mapper. - - The result is stored in the dictionary persistently once - calculated. - - """ - return util.PopulateDict( - lambda tup: tup[0]._props.get(tup[1].key) is tup[1].prop - ) - - def filter_states_for_dep(self, dep, states): - """Filter the given list of InstanceStates to those relevant to the - given DependencyProcessor. - - """ - mapper_for_dep = self._mapper_for_dep - return [s for s in states if mapper_for_dep[(s.manager.mapper, dep)]] - - def states_for_mapper_hierarchy(self, mapper, isdelete, listonly): - checktup = (isdelete, listonly) - for mapper in mapper.base_mapper.self_and_descendants: - for state in self.mappers[mapper]: - if self.states[state] == checktup: - yield state - - def _generate_actions(self): - """Generate the full, unsorted collection of PostSortRecs as - well as dependency pairs for this UOWTransaction. - - """ - # execute presort_actions, until all states - # have been processed. a presort_action might - # add new states to the uow. - while True: - ret = False - for action in list(self.presort_actions.values()): - if action.execute(self): - ret = True - if not ret: - break - - # see if the graph of mapper dependencies has cycles. - self.cycles = cycles = topological.find_cycles( - self.dependencies, - list(self.postsort_actions.values())) - - if cycles: - # if yes, break the per-mapper actions into - # per-state actions - convert = dict( - (rec, set(rec.per_state_flush_actions(self))) - for rec in cycles - ) - - # rewrite the existing dependencies to point to - # the per-state actions for those per-mapper actions - # that were broken up. - for edge in list(self.dependencies): - if None in edge or \ - edge[0].disabled or edge[1].disabled or \ - cycles.issuperset(edge): - self.dependencies.remove(edge) - elif edge[0] in cycles: - self.dependencies.remove(edge) - for dep in convert[edge[0]]: - self.dependencies.add((dep, edge[1])) - elif edge[1] in cycles: - self.dependencies.remove(edge) - for dep in convert[edge[1]]: - self.dependencies.add((edge[0], dep)) - - return set([a for a in self.postsort_actions.values() - if not a.disabled - ] - ).difference(cycles) - - def execute(self): - postsort_actions = self._generate_actions() - - # sort = topological.sort(self.dependencies, postsort_actions) - # print "--------------" - # print "\ndependencies:", self.dependencies - # print "\ncycles:", self.cycles - # print "\nsort:", list(sort) - # print "\nCOUNT OF POSTSORT ACTIONS", len(postsort_actions) - - # execute - if self.cycles: - for set_ in topological.sort_as_subsets( - self.dependencies, - postsort_actions): - while set_: - n = set_.pop() - n.execute_aggregate(self, set_) - else: - for rec in topological.sort( - self.dependencies, - postsort_actions): - rec.execute(self) - - def finalize_flush_changes(self): - """mark processed objects as clean / deleted after a successful - flush(). - - this method is called within the flush() method after the - execute() method has succeeded and the transaction has been committed. - - """ - if not self.states: - return - - states = set(self.states) - isdel = set( - s for (s, (isdelete, listonly)) in self.states.items() - if isdelete - ) - other = states.difference(isdel) - if isdel: - self.session._remove_newly_deleted(isdel) - if other: - self.session._register_newly_persistent(other) - - -class IterateMappersMixin(object): - def _mappers(self, uow): - if self.fromparent: - return iter( - m for m in - self.dependency_processor.parent.self_and_descendants - if uow._mapper_for_dep[(m, self.dependency_processor)] - ) - else: - return self.dependency_processor.mapper.self_and_descendants - - -class Preprocess(IterateMappersMixin): - def __init__(self, dependency_processor, fromparent): - self.dependency_processor = dependency_processor - self.fromparent = fromparent - self.processed = set() - self.setup_flush_actions = False - - def execute(self, uow): - delete_states = set() - save_states = set() - - for mapper in self._mappers(uow): - for state in uow.mappers[mapper].difference(self.processed): - (isdelete, listonly) = uow.states[state] - if not listonly: - if isdelete: - delete_states.add(state) - else: - save_states.add(state) - - if delete_states: - self.dependency_processor.presort_deletes(uow, delete_states) - self.processed.update(delete_states) - if save_states: - self.dependency_processor.presort_saves(uow, save_states) - self.processed.update(save_states) - - if (delete_states or save_states): - if not self.setup_flush_actions and ( - self.dependency_processor. - prop_has_changes(uow, delete_states, True) or - self.dependency_processor. - prop_has_changes(uow, save_states, False) - ): - self.dependency_processor.per_property_flush_actions(uow) - self.setup_flush_actions = True - return True - else: - return False - - -class PostSortRec(object): - disabled = False - - def __new__(cls, uow, *args): - key = (cls, ) + args - if key in uow.postsort_actions: - return uow.postsort_actions[key] - else: - uow.postsort_actions[key] = \ - ret = \ - object.__new__(cls) - return ret - - def execute_aggregate(self, uow, recs): - self.execute(uow) - - def __repr__(self): - return "%s(%s)" % ( - self.__class__.__name__, - ",".join(str(x) for x in self.__dict__.values()) - ) - - -class ProcessAll(IterateMappersMixin, PostSortRec): - def __init__(self, uow, dependency_processor, delete, fromparent): - self.dependency_processor = dependency_processor - self.delete = delete - self.fromparent = fromparent - uow.deps[dependency_processor.parent.base_mapper].\ - add(dependency_processor) - - def execute(self, uow): - states = self._elements(uow) - if self.delete: - self.dependency_processor.process_deletes(uow, states) - else: - self.dependency_processor.process_saves(uow, states) - - def per_state_flush_actions(self, uow): - # this is handled by SaveUpdateAll and DeleteAll, - # since a ProcessAll should unconditionally be pulled - # into per-state if either the parent/child mappers - # are part of a cycle - return iter([]) - - def __repr__(self): - return "%s(%s, delete=%s)" % ( - self.__class__.__name__, - self.dependency_processor, - self.delete - ) - - def _elements(self, uow): - for mapper in self._mappers(uow): - for state in uow.mappers[mapper]: - (isdelete, listonly) = uow.states[state] - if isdelete == self.delete and not listonly: - yield state - - -class IssuePostUpdate(PostSortRec): - def __init__(self, uow, mapper, isdelete): - self.mapper = mapper - self.isdelete = isdelete - - def execute(self, uow): - states, cols = uow.post_update_states[self.mapper] - states = [s for s in states if uow.states[s][0] == self.isdelete] - - persistence.post_update(self.mapper, states, uow, cols) - - -class SaveUpdateAll(PostSortRec): - def __init__(self, uow, mapper): - self.mapper = mapper - assert mapper is mapper.base_mapper - - def execute(self, uow): - persistence.save_obj(self.mapper, - uow.states_for_mapper_hierarchy( - self.mapper, False, False), - uow - ) - - def per_state_flush_actions(self, uow): - states = list(uow.states_for_mapper_hierarchy( - self.mapper, False, False)) - base_mapper = self.mapper.base_mapper - delete_all = DeleteAll(uow, base_mapper) - for state in states: - # keep saves before deletes - - # this ensures 'row switch' operations work - action = SaveUpdateState(uow, state, base_mapper) - uow.dependencies.add((action, delete_all)) - yield action - - for dep in uow.deps[self.mapper]: - states_for_prop = uow.filter_states_for_dep(dep, states) - dep.per_state_flush_actions(uow, states_for_prop, False) - - -class DeleteAll(PostSortRec): - def __init__(self, uow, mapper): - self.mapper = mapper - assert mapper is mapper.base_mapper - - def execute(self, uow): - persistence.delete_obj(self.mapper, - uow.states_for_mapper_hierarchy( - self.mapper, True, False), - uow - ) - - def per_state_flush_actions(self, uow): - states = list(uow.states_for_mapper_hierarchy( - self.mapper, True, False)) - base_mapper = self.mapper.base_mapper - save_all = SaveUpdateAll(uow, base_mapper) - for state in states: - # keep saves before deletes - - # this ensures 'row switch' operations work - action = DeleteState(uow, state, base_mapper) - uow.dependencies.add((save_all, action)) - yield action - - for dep in uow.deps[self.mapper]: - states_for_prop = uow.filter_states_for_dep(dep, states) - dep.per_state_flush_actions(uow, states_for_prop, True) - - -class ProcessState(PostSortRec): - def __init__(self, uow, dependency_processor, delete, state): - self.dependency_processor = dependency_processor - self.delete = delete - self.state = state - - def execute_aggregate(self, uow, recs): - cls_ = self.__class__ - dependency_processor = self.dependency_processor - delete = self.delete - our_recs = [r for r in recs - if r.__class__ is cls_ and - r.dependency_processor is dependency_processor and - r.delete is delete] - recs.difference_update(our_recs) - states = [self.state] + [r.state for r in our_recs] - if delete: - dependency_processor.process_deletes(uow, states) - else: - dependency_processor.process_saves(uow, states) - - def __repr__(self): - return "%s(%s, %s, delete=%s)" % ( - self.__class__.__name__, - self.dependency_processor, - orm_util.state_str(self.state), - self.delete - ) - - -class SaveUpdateState(PostSortRec): - def __init__(self, uow, state, mapper): - self.state = state - self.mapper = mapper - - def execute_aggregate(self, uow, recs): - cls_ = self.__class__ - mapper = self.mapper - our_recs = [r for r in recs - if r.__class__ is cls_ and - r.mapper is mapper] - recs.difference_update(our_recs) - persistence.save_obj(mapper, - [self.state] + - [r.state for r in our_recs], - uow) - - def __repr__(self): - return "%s(%s)" % ( - self.__class__.__name__, - orm_util.state_str(self.state) - ) - - -class DeleteState(PostSortRec): - def __init__(self, uow, state, mapper): - self.state = state - self.mapper = mapper - - def execute_aggregate(self, uow, recs): - cls_ = self.__class__ - mapper = self.mapper - our_recs = [r for r in recs - if r.__class__ is cls_ and - r.mapper is mapper] - recs.difference_update(our_recs) - states = [self.state] + [r.state for r in our_recs] - persistence.delete_obj(mapper, - [s for s in states if uow.states[s][0]], - uow) - - def __repr__(self): - return "%s(%s)" % ( - self.__class__.__name__, - orm_util.state_str(self.state) - ) diff --git a/python/sqlalchemy/orm/util.py b/python/sqlalchemy/orm/util.py deleted file mode 100644 index 6d386967..00000000 --- a/python/sqlalchemy/orm/util.py +++ /dev/null @@ -1,1030 +0,0 @@ -# orm/util.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - - -from .. import sql, util, event, exc as sa_exc, inspection -from ..sql import expression, util as sql_util, operators -from .interfaces import PropComparator, MapperProperty -from . import attributes -import re - -from .base import instance_str, state_str, state_class_str, attribute_str, \ - state_attribute_str, object_mapper, object_state, _none_set, _never_set -from .base import class_mapper, _class_to_mapper -from .base import InspectionAttr -from .path_registry import PathRegistry - -all_cascades = frozenset(("delete", "delete-orphan", "all", "merge", - "expunge", "save-update", "refresh-expire", - "none")) - - -class CascadeOptions(frozenset): - """Keeps track of the options sent to relationship().cascade""" - - _add_w_all_cascades = all_cascades.difference([ - 'all', 'none', 'delete-orphan']) - _allowed_cascades = all_cascades - - __slots__ = ( - 'save_update', 'delete', 'refresh_expire', 'merge', - 'expunge', 'delete_orphan') - - def __new__(cls, value_list): - if isinstance(value_list, util.string_types) or value_list is None: - return cls.from_string(value_list) - values = set(value_list) - if values.difference(cls._allowed_cascades): - raise sa_exc.ArgumentError( - "Invalid cascade option(s): %s" % - ", ".join([repr(x) for x in - sorted(values.difference(cls._allowed_cascades))])) - - if "all" in values: - values.update(cls._add_w_all_cascades) - if "none" in values: - values.clear() - values.discard('all') - - self = frozenset.__new__(CascadeOptions, values) - self.save_update = 'save-update' in values - self.delete = 'delete' in values - self.refresh_expire = 'refresh-expire' in values - self.merge = 'merge' in values - self.expunge = 'expunge' in values - self.delete_orphan = "delete-orphan" in values - - if self.delete_orphan and not self.delete: - util.warn("The 'delete-orphan' cascade " - "option requires 'delete'.") - return self - - def __repr__(self): - return "CascadeOptions(%r)" % ( - ",".join([x for x in sorted(self)]) - ) - - @classmethod - def from_string(cls, arg): - values = [ - c for c - in re.split('\s*,\s*', arg or "") - if c - ] - return cls(values) - - -def _validator_events( - desc, key, validator, include_removes, include_backrefs): - """Runs a validation method on an attribute value to be set or - appended. - """ - - if not include_backrefs: - def detect_is_backref(state, initiator): - impl = state.manager[key].impl - return initiator.impl is not impl - - if include_removes: - def append(state, value, initiator): - if include_backrefs or not detect_is_backref(state, initiator): - return validator(state.obj(), key, value, False) - else: - return value - - def set_(state, value, oldvalue, initiator): - if include_backrefs or not detect_is_backref(state, initiator): - return validator(state.obj(), key, value, False) - else: - return value - - def remove(state, value, initiator): - if include_backrefs or not detect_is_backref(state, initiator): - validator(state.obj(), key, value, True) - - else: - def append(state, value, initiator): - if include_backrefs or not detect_is_backref(state, initiator): - return validator(state.obj(), key, value) - else: - return value - - def set_(state, value, oldvalue, initiator): - if include_backrefs or not detect_is_backref(state, initiator): - return validator(state.obj(), key, value) - else: - return value - - event.listen(desc, 'append', append, raw=True, retval=True) - event.listen(desc, 'set', set_, raw=True, retval=True) - if include_removes: - event.listen(desc, "remove", remove, raw=True, retval=True) - - -def polymorphic_union(table_map, typecolname, - aliasname='p_union', cast_nulls=True): - """Create a ``UNION`` statement used by a polymorphic mapper. - - See :ref:`concrete_inheritance` for an example of how - this is used. - - :param table_map: mapping of polymorphic identities to - :class:`.Table` objects. - :param typecolname: string name of a "discriminator" column, which will be - derived from the query, producing the polymorphic identity for - each row. If ``None``, no polymorphic discriminator is generated. - :param aliasname: name of the :func:`~sqlalchemy.sql.expression.alias()` - construct generated. - :param cast_nulls: if True, non-existent columns, which are represented - as labeled NULLs, will be passed into CAST. This is a legacy behavior - that is problematic on some backends such as Oracle - in which case it - can be set to False. - - """ - - colnames = util.OrderedSet() - colnamemaps = {} - types = {} - for key in table_map: - table = table_map[key] - - # mysql doesn't like selecting from a select; - # make it an alias of the select - if isinstance(table, sql.Select): - table = table.alias() - table_map[key] = table - - m = {} - for c in table.c: - colnames.add(c.key) - m[c.key] = c - types[c.key] = c.type - colnamemaps[table] = m - - def col(name, table): - try: - return colnamemaps[table][name] - except KeyError: - if cast_nulls: - return sql.cast(sql.null(), types[name]).label(name) - else: - return sql.type_coerce(sql.null(), types[name]).label(name) - - result = [] - for type, table in table_map.items(): - if typecolname is not None: - result.append( - sql.select([col(name, table) for name in colnames] + - [sql.literal_column( - sql_util._quote_ddl_expr(type)). - label(typecolname)], - from_obj=[table])) - else: - result.append(sql.select([col(name, table) for name in colnames], - from_obj=[table])) - return sql.union_all(*result).alias(aliasname) - - -def identity_key(*args, **kwargs): - """Generate "identity key" tuples, as are used as keys in the - :attr:`.Session.identity_map` dictionary. - - This function has several call styles: - - * ``identity_key(class, ident)`` - - This form receives a mapped class and a primary key scalar or - tuple as an argument. - - E.g.:: - - >>> identity_key(MyClass, (1, 2)) - (, (1, 2)) - - :param class: mapped class (must be a positional argument) - :param ident: primary key, may be a scalar or tuple argument. - - - * ``identity_key(instance=instance)`` - - This form will produce the identity key for a given instance. The - instance need not be persistent, only that its primary key attributes - are populated (else the key will contain ``None`` for those missing - values). - - E.g.:: - - >>> instance = MyClass(1, 2) - >>> identity_key(instance=instance) - (, (1, 2)) - - In this form, the given instance is ultimately run though - :meth:`.Mapper.identity_key_from_instance`, which will have the - effect of performing a database check for the corresponding row - if the object is expired. - - :param instance: object instance (must be given as a keyword arg) - - * ``identity_key(class, row=row)`` - - This form is similar to the class/tuple form, except is passed a - database result row as a :class:`.RowProxy` object. - - E.g.:: - - >>> row = engine.execute("select * from table where a=1 and b=2").\ -first() - >>> identity_key(MyClass, row=row) - (, (1, 2)) - - :param class: mapped class (must be a positional argument) - :param row: :class:`.RowProxy` row returned by a :class:`.ResultProxy` - (must be given as a keyword arg) - - """ - if args: - if len(args) == 1: - class_ = args[0] - try: - row = kwargs.pop("row") - except KeyError: - ident = kwargs.pop("ident") - elif len(args) == 2: - class_, ident = args - elif len(args) == 3: - class_, ident = args - else: - raise sa_exc.ArgumentError( - "expected up to three positional arguments, " - "got %s" % len(args)) - if kwargs: - raise sa_exc.ArgumentError("unknown keyword arguments: %s" - % ", ".join(kwargs)) - mapper = class_mapper(class_) - if "ident" in locals(): - return mapper.identity_key_from_primary_key(util.to_list(ident)) - return mapper.identity_key_from_row(row) - instance = kwargs.pop("instance") - if kwargs: - raise sa_exc.ArgumentError("unknown keyword arguments: %s" - % ", ".join(kwargs.keys)) - mapper = object_mapper(instance) - return mapper.identity_key_from_instance(instance) - - -class ORMAdapter(sql_util.ColumnAdapter): - """ColumnAdapter subclass which excludes adaptation of entities from - non-matching mappers. - - """ - - def __init__(self, entity, equivalents=None, adapt_required=False, - chain_to=None, allow_label_resolve=True, - anonymize_labels=False): - info = inspection.inspect(entity) - - self.mapper = info.mapper - selectable = info.selectable - is_aliased_class = info.is_aliased_class - if is_aliased_class: - self.aliased_class = entity - else: - self.aliased_class = None - - sql_util.ColumnAdapter.__init__( - self, selectable, equivalents, chain_to, - adapt_required=adapt_required, - allow_label_resolve=allow_label_resolve, - anonymize_labels=anonymize_labels, - include_fn=self._include_fn - ) - - def _include_fn(self, elem): - entity = elem._annotations.get('parentmapper', None) - return not entity or entity.isa(self.mapper) - - -class AliasedClass(object): - """Represents an "aliased" form of a mapped class for usage with Query. - - The ORM equivalent of a :func:`sqlalchemy.sql.expression.alias` - construct, this object mimics the mapped class using a - __getattr__ scheme and maintains a reference to a - real :class:`~sqlalchemy.sql.expression.Alias` object. - - Usage is via the :func:`.orm.aliased` function, or alternatively - via the :func:`.orm.with_polymorphic` function. - - Usage example:: - - # find all pairs of users with the same name - user_alias = aliased(User) - session.query(User, user_alias).\\ - join((user_alias, User.id > user_alias.id)).\\ - filter(User.name==user_alias.name) - - The resulting object is an instance of :class:`.AliasedClass`. - This object implements an attribute scheme which produces the - same attribute and method interface as the original mapped - class, allowing :class:`.AliasedClass` to be compatible - with any attribute technique which works on the original class, - including hybrid attributes (see :ref:`hybrids_toplevel`). - - The :class:`.AliasedClass` can be inspected for its underlying - :class:`.Mapper`, aliased selectable, and other information - using :func:`.inspect`:: - - from sqlalchemy import inspect - my_alias = aliased(MyClass) - insp = inspect(my_alias) - - The resulting inspection object is an instance of :class:`.AliasedInsp`. - - See :func:`.aliased` and :func:`.with_polymorphic` for construction - argument descriptions. - - """ - - def __init__(self, cls, alias=None, - name=None, - flat=False, - adapt_on_names=False, - # TODO: None for default here? - with_polymorphic_mappers=(), - with_polymorphic_discriminator=None, - base_alias=None, - use_mapper_path=False): - mapper = _class_to_mapper(cls) - if alias is None: - alias = mapper._with_polymorphic_selectable.alias( - name=name, flat=flat) - - self._aliased_insp = AliasedInsp( - self, - mapper, - alias, - name, - with_polymorphic_mappers - if with_polymorphic_mappers - else mapper.with_polymorphic_mappers, - with_polymorphic_discriminator - if with_polymorphic_discriminator is not None - else mapper.polymorphic_on, - base_alias, - use_mapper_path, - adapt_on_names - ) - - self.__name__ = 'AliasedClass_%s' % mapper.class_.__name__ - - def __getattr__(self, key): - try: - _aliased_insp = self.__dict__['_aliased_insp'] - except KeyError: - raise AttributeError() - else: - for base in _aliased_insp._target.__mro__: - try: - attr = object.__getattribute__(base, key) - except AttributeError: - continue - else: - break - else: - raise AttributeError(key) - - if isinstance(attr, PropComparator): - ret = attr.adapt_to_entity(_aliased_insp) - setattr(self, key, ret) - return ret - elif hasattr(attr, 'func_code'): - is_method = getattr(_aliased_insp._target, key, None) - if is_method and is_method.__self__ is not None: - return util.types.MethodType(attr.__func__, self, self) - else: - return None - elif hasattr(attr, '__get__'): - ret = attr.__get__(None, self) - if isinstance(ret, PropComparator): - return ret.adapt_to_entity(_aliased_insp) - else: - return ret - else: - return attr - - def __repr__(self): - return '' % ( - id(self), self._aliased_insp._target.__name__) - - -class AliasedInsp(InspectionAttr): - """Provide an inspection interface for an - :class:`.AliasedClass` object. - - The :class:`.AliasedInsp` object is returned - given an :class:`.AliasedClass` using the - :func:`.inspect` function:: - - from sqlalchemy import inspect - from sqlalchemy.orm import aliased - - my_alias = aliased(MyMappedClass) - insp = inspect(my_alias) - - Attributes on :class:`.AliasedInsp` - include: - - * ``entity`` - the :class:`.AliasedClass` represented. - * ``mapper`` - the :class:`.Mapper` mapping the underlying class. - * ``selectable`` - the :class:`.Alias` construct which ultimately - represents an aliased :class:`.Table` or :class:`.Select` - construct. - * ``name`` - the name of the alias. Also is used as the attribute - name when returned in a result tuple from :class:`.Query`. - * ``with_polymorphic_mappers`` - collection of :class:`.Mapper` objects - indicating all those mappers expressed in the select construct - for the :class:`.AliasedClass`. - * ``polymorphic_on`` - an alternate column or SQL expression which - will be used as the "discriminator" for a polymorphic load. - - .. seealso:: - - :ref:`inspection_toplevel` - - """ - - def __init__(self, entity, mapper, selectable, name, - with_polymorphic_mappers, polymorphic_on, - _base_alias, _use_mapper_path, adapt_on_names): - self.entity = entity - self.mapper = mapper - self.selectable = selectable - self.name = name - self.with_polymorphic_mappers = with_polymorphic_mappers - self.polymorphic_on = polymorphic_on - self._base_alias = _base_alias or self - self._use_mapper_path = _use_mapper_path - - self._adapter = sql_util.ColumnAdapter( - selectable, equivalents=mapper._equivalent_columns, - adapt_on_names=adapt_on_names, anonymize_labels=True) - - self._adapt_on_names = adapt_on_names - self._target = mapper.class_ - - for poly in self.with_polymorphic_mappers: - if poly is not mapper: - setattr(self.entity, poly.class_.__name__, - AliasedClass(poly.class_, selectable, base_alias=self, - adapt_on_names=adapt_on_names, - use_mapper_path=_use_mapper_path)) - - is_aliased_class = True - "always returns True" - - @property - def class_(self): - """Return the mapped class ultimately represented by this - :class:`.AliasedInsp`.""" - return self.mapper.class_ - - @util.memoized_property - def _path_registry(self): - if self._use_mapper_path: - return self.mapper._path_registry - else: - return PathRegistry.per_mapper(self) - - def __getstate__(self): - return { - 'entity': self.entity, - 'mapper': self.mapper, - 'alias': self.selectable, - 'name': self.name, - 'adapt_on_names': self._adapt_on_names, - 'with_polymorphic_mappers': - self.with_polymorphic_mappers, - 'with_polymorphic_discriminator': - self.polymorphic_on, - 'base_alias': self._base_alias, - 'use_mapper_path': self._use_mapper_path - } - - def __setstate__(self, state): - self.__init__( - state['entity'], - state['mapper'], - state['alias'], - state['name'], - state['with_polymorphic_mappers'], - state['with_polymorphic_discriminator'], - state['base_alias'], - state['use_mapper_path'], - state['adapt_on_names'] - ) - - def _adapt_element(self, elem): - return self._adapter.traverse(elem).\ - _annotate({ - 'parententity': self, - 'parentmapper': self.mapper} - ) - - def _entity_for_mapper(self, mapper): - self_poly = self.with_polymorphic_mappers - if mapper in self_poly: - return getattr(self.entity, mapper.class_.__name__)._aliased_insp - elif mapper.isa(self.mapper): - return self - else: - assert False, "mapper %s doesn't correspond to %s" % ( - mapper, self) - - def __repr__(self): - if self.with_polymorphic_mappers: - with_poly = "(%s)" % ", ".join( - mp.class_.__name__ for mp in self.with_polymorphic_mappers) - else: - with_poly = "" - return '' % ( - id(self), self.class_.__name__, with_poly) - - -inspection._inspects(AliasedClass)(lambda target: target._aliased_insp) -inspection._inspects(AliasedInsp)(lambda target: target) - - -def aliased(element, alias=None, name=None, flat=False, adapt_on_names=False): - """Produce an alias of the given element, usually an :class:`.AliasedClass` - instance. - - E.g.:: - - my_alias = aliased(MyClass) - - session.query(MyClass, my_alias).filter(MyClass.id > my_alias.id) - - The :func:`.aliased` function is used to create an ad-hoc mapping - of a mapped class to a new selectable. By default, a selectable - is generated from the normally mapped selectable (typically a - :class:`.Table`) using the :meth:`.FromClause.alias` method. - However, :func:`.aliased` can also be used to link the class to - a new :func:`.select` statement. Also, the :func:`.with_polymorphic` - function is a variant of :func:`.aliased` that is intended to specify - a so-called "polymorphic selectable", that corresponds to the union - of several joined-inheritance subclasses at once. - - For convenience, the :func:`.aliased` function also accepts plain - :class:`.FromClause` constructs, such as a :class:`.Table` or - :func:`.select` construct. In those cases, the :meth:`.FromClause.alias` - method is called on the object and the new :class:`.Alias` object - returned. The returned :class:`.Alias` is not ORM-mapped in this case. - - :param element: element to be aliased. Is normally a mapped class, - but for convenience can also be a :class:`.FromClause` element. - - :param alias: Optional selectable unit to map the element to. This should - normally be a :class:`.Alias` object corresponding to the :class:`.Table` - to which the class is mapped, or to a :func:`.select` construct that - is compatible with the mapping. By default, a simple anonymous - alias of the mapped table is generated. - - :param name: optional string name to use for the alias, if not specified - by the ``alias`` parameter. The name, among other things, forms the - attribute name that will be accessible via tuples returned by a - :class:`.Query` object. - - :param flat: Boolean, will be passed through to the - :meth:`.FromClause.alias` call so that aliases of :class:`.Join` objects - don't include an enclosing SELECT. This can lead to more efficient - queries in many circumstances. A JOIN against a nested JOIN will be - rewritten as a JOIN against an aliased SELECT subquery on backends that - don't support this syntax. - - .. versionadded:: 0.9.0 - - .. seealso:: :meth:`.Join.alias` - - :param adapt_on_names: if True, more liberal "matching" will be used when - mapping the mapped columns of the ORM entity to those of the - given selectable - a name-based match will be performed if the - given selectable doesn't otherwise have a column that corresponds - to one on the entity. The use case for this is when associating - an entity with some derived selectable such as one that uses - aggregate functions:: - - class UnitPrice(Base): - __tablename__ = 'unit_price' - ... - unit_id = Column(Integer) - price = Column(Numeric) - - aggregated_unit_price = Session.query( - func.sum(UnitPrice.price).label('price') - ).group_by(UnitPrice.unit_id).subquery() - - aggregated_unit_price = aliased(UnitPrice, - alias=aggregated_unit_price, adapt_on_names=True) - - Above, functions on ``aggregated_unit_price`` which refer to - ``.price`` will return the - ``fund.sum(UnitPrice.price).label('price')`` column, as it is - matched on the name "price". Ordinarily, the "price" function - wouldn't have any "column correspondence" to the actual - ``UnitPrice.price`` column as it is not a proxy of the original. - - .. versionadded:: 0.7.3 - - - """ - if isinstance(element, expression.FromClause): - if adapt_on_names: - raise sa_exc.ArgumentError( - "adapt_on_names only applies to ORM elements" - ) - return element.alias(name, flat=flat) - else: - return AliasedClass(element, alias=alias, flat=flat, - name=name, adapt_on_names=adapt_on_names) - - -def with_polymorphic(base, classes, selectable=False, - flat=False, - polymorphic_on=None, aliased=False, - innerjoin=False, _use_mapper_path=False, - _existing_alias=None): - """Produce an :class:`.AliasedClass` construct which specifies - columns for descendant mappers of the given base. - - .. versionadded:: 0.8 - :func:`.orm.with_polymorphic` is in addition to the existing - :class:`.Query` method :meth:`.Query.with_polymorphic`, - which has the same purpose but is not as flexible in its usage. - - Using this method will ensure that each descendant mapper's - tables are included in the FROM clause, and will allow filter() - criterion to be used against those tables. The resulting - instances will also have those columns already loaded so that - no "post fetch" of those columns will be required. - - See the examples at :ref:`with_polymorphic`. - - :param base: Base class to be aliased. - - :param classes: a single class or mapper, or list of - class/mappers, which inherit from the base class. - Alternatively, it may also be the string ``'*'``, in which case - all descending mapped classes will be added to the FROM clause. - - :param aliased: when True, the selectable will be wrapped in an - alias, that is ``(SELECT * FROM ) AS anon_1``. - This can be important when using the with_polymorphic() - to create the target of a JOIN on a backend that does not - support parenthesized joins, such as SQLite and older - versions of MySQL. - - :param flat: Boolean, will be passed through to the - :meth:`.FromClause.alias` call so that aliases of :class:`.Join` - objects don't include an enclosing SELECT. This can lead to more - efficient queries in many circumstances. A JOIN against a nested JOIN - will be rewritten as a JOIN against an aliased SELECT subquery on - backends that don't support this syntax. - - Setting ``flat`` to ``True`` implies the ``aliased`` flag is - also ``True``. - - .. versionadded:: 0.9.0 - - .. seealso:: :meth:`.Join.alias` - - :param selectable: a table or select() statement that will - be used in place of the generated FROM clause. This argument is - required if any of the desired classes use concrete table - inheritance, since SQLAlchemy currently cannot generate UNIONs - among tables automatically. If used, the ``selectable`` argument - must represent the full set of tables and columns mapped by every - mapped class. Otherwise, the unaccounted mapped columns will - result in their table being appended directly to the FROM clause - which will usually lead to incorrect results. - - :param polymorphic_on: a column to be used as the "discriminator" - column for the given selectable. If not given, the polymorphic_on - attribute of the base classes' mapper will be used, if any. This - is useful for mappings that don't have polymorphic loading - behavior by default. - - :param innerjoin: if True, an INNER JOIN will be used. This should - only be specified if querying for one specific subtype only - """ - primary_mapper = _class_to_mapper(base) - if _existing_alias: - assert _existing_alias.mapper is primary_mapper - classes = util.to_set(classes) - new_classes = set([ - mp.class_ for mp in - _existing_alias.with_polymorphic_mappers]) - if classes == new_classes: - return _existing_alias - else: - classes = classes.union(new_classes) - mappers, selectable = primary_mapper.\ - _with_polymorphic_args(classes, selectable, - innerjoin=innerjoin) - if aliased or flat: - selectable = selectable.alias(flat=flat) - return AliasedClass(base, - selectable, - with_polymorphic_mappers=mappers, - with_polymorphic_discriminator=polymorphic_on, - use_mapper_path=_use_mapper_path) - - -def _orm_annotate(element, exclude=None): - """Deep copy the given ClauseElement, annotating each element with the - "_orm_adapt" flag. - - Elements within the exclude collection will be cloned but not annotated. - - """ - return sql_util._deep_annotate(element, {'_orm_adapt': True}, exclude) - - -def _orm_deannotate(element): - """Remove annotations that link a column to a particular mapping. - - Note this doesn't affect "remote" and "foreign" annotations - passed by the :func:`.orm.foreign` and :func:`.orm.remote` - annotators. - - """ - - return sql_util._deep_deannotate(element, - values=("_orm_adapt", "parententity") - ) - - -def _orm_full_deannotate(element): - return sql_util._deep_deannotate(element) - - -class _ORMJoin(expression.Join): - """Extend Join to support ORM constructs as input.""" - - __visit_name__ = expression.Join.__visit_name__ - - def __init__( - self, - left, right, onclause=None, isouter=False, - _left_memo=None, _right_memo=None): - - left_info = inspection.inspect(left) - left_orm_info = getattr(left, '_joined_from_info', left_info) - - right_info = inspection.inspect(right) - adapt_to = right_info.selectable - - self._joined_from_info = right_info - - self._left_memo = _left_memo - self._right_memo = _right_memo - - if isinstance(onclause, util.string_types): - onclause = getattr(left_orm_info.entity, onclause) - - if isinstance(onclause, attributes.QueryableAttribute): - on_selectable = onclause.comparator._source_selectable() - prop = onclause.property - elif isinstance(onclause, MapperProperty): - prop = onclause - on_selectable = prop.parent.selectable - else: - prop = None - - if prop: - if sql_util.clause_is_present( - on_selectable, left_info.selectable): - adapt_from = on_selectable - else: - adapt_from = left_info.selectable - - pj, sj, source, dest, \ - secondary, target_adapter = prop._create_joins( - source_selectable=adapt_from, - dest_selectable=adapt_to, - source_polymorphic=True, - dest_polymorphic=True, - of_type=right_info.mapper) - - if sj is not None: - if isouter: - # note this is an inner join from secondary->right - right = sql.join(secondary, right, sj) - onclause = pj - else: - left = sql.join(left, secondary, pj, isouter) - onclause = sj - else: - onclause = pj - self._target_adapter = target_adapter - - expression.Join.__init__(self, left, right, onclause, isouter) - - if not prop and getattr(right_info, 'mapper', None) \ - and right_info.mapper.single: - # if single inheritance target and we are using a manual - # or implicit ON clause, augment it the same way we'd augment the - # WHERE. - single_crit = right_info.mapper._single_table_criterion - if single_crit is not None: - if right_info.is_aliased_class: - single_crit = right_info._adapter.traverse(single_crit) - self.onclause = self.onclause & single_crit - - def _splice_into_center(self, other): - """Splice a join into the center. - - Given join(a, b) and join(b, c), return join(a, b).join(c) - - """ - leftmost = other - while isinstance(leftmost, sql.Join): - leftmost = leftmost.left - - assert self.right is leftmost - - left = _ORMJoin( - self.left, other.left, - self.onclause, isouter=self.isouter, - _left_memo=self._left_memo, - _right_memo=other._left_memo - ) - - return _ORMJoin( - left, - other.right, - other.onclause, isouter=other.isouter, - _right_memo=other._right_memo - ) - - def join(self, right, onclause=None, isouter=False, join_to_left=None): - return _ORMJoin(self, right, onclause, isouter) - - def outerjoin(self, right, onclause=None, join_to_left=None): - return _ORMJoin(self, right, onclause, True) - - -def join(left, right, onclause=None, isouter=False, join_to_left=None): - """Produce an inner join between left and right clauses. - - :func:`.orm.join` is an extension to the core join interface - provided by :func:`.sql.expression.join()`, where the - left and right selectables may be not only core selectable - objects such as :class:`.Table`, but also mapped classes or - :class:`.AliasedClass` instances. The "on" clause can - be a SQL expression, or an attribute or string name - referencing a configured :func:`.relationship`. - - :func:`.orm.join` is not commonly needed in modern usage, - as its functionality is encapsulated within that of the - :meth:`.Query.join` method, which features a - significant amount of automation beyond :func:`.orm.join` - by itself. Explicit usage of :func:`.orm.join` - with :class:`.Query` involves usage of the - :meth:`.Query.select_from` method, as in:: - - from sqlalchemy.orm import join - session.query(User).\\ - select_from(join(User, Address, User.addresses)).\\ - filter(Address.email_address=='foo@bar.com') - - In modern SQLAlchemy the above join can be written more - succinctly as:: - - session.query(User).\\ - join(User.addresses).\\ - filter(Address.email_address=='foo@bar.com') - - See :meth:`.Query.join` for information on modern usage - of ORM level joins. - - .. versionchanged:: 0.8.1 - the ``join_to_left`` parameter - is no longer used, and is deprecated. - - """ - return _ORMJoin(left, right, onclause, isouter) - - -def outerjoin(left, right, onclause=None, join_to_left=None): - """Produce a left outer join between left and right clauses. - - This is the "outer join" version of the :func:`.orm.join` function, - featuring the same behavior except that an OUTER JOIN is generated. - See that function's documentation for other usage details. - - """ - return _ORMJoin(left, right, onclause, True) - - -def with_parent(instance, prop): - """Create filtering criterion that relates this query's primary entity - to the given related instance, using established :func:`.relationship()` - configuration. - - The SQL rendered is the same as that rendered when a lazy loader - would fire off from the given parent on that attribute, meaning - that the appropriate state is taken from the parent object in - Python without the need to render joins to the parent table - in the rendered statement. - - .. versionchanged:: 0.6.4 - This method accepts parent instances in all - persistence states, including transient, persistent, and detached. - Only the requisite primary key/foreign key attributes need to - be populated. Previous versions didn't work with transient - instances. - - :param instance: - An instance which has some :func:`.relationship`. - - :param property: - String property name, or class-bound attribute, which indicates - what relationship from the instance should be used to reconcile the - parent/child relationship. - - """ - if isinstance(prop, util.string_types): - mapper = object_mapper(instance) - prop = getattr(mapper.class_, prop).property - elif isinstance(prop, attributes.QueryableAttribute): - prop = prop.property - - return prop._with_parent(instance) - - -def has_identity(object): - """Return True if the given object has a database - identity. - - This typically corresponds to the object being - in either the persistent or detached state. - - .. seealso:: - - :func:`.was_deleted` - - """ - state = attributes.instance_state(object) - return state.has_identity - - -def was_deleted(object): - """Return True if the given object was deleted - within a session flush. - - .. versionadded:: 0.8.0 - - """ - - state = attributes.instance_state(object) - return state.deleted - - -def randomize_unitofwork(): - """Use random-ordering sets within the unit of work in order - to detect unit of work sorting issues. - - This is a utility function that can be used to help reproduce - inconsistent unit of work sorting issues. For example, - if two kinds of objects A and B are being inserted, and - B has a foreign key reference to A - the A must be inserted first. - However, if there is no relationship between A and B, the unit of work - won't know to perform this sorting, and an operation may or may not - fail, depending on how the ordering works out. Since Python sets - and dictionaries have non-deterministic ordering, such an issue may - occur on some runs and not on others, and in practice it tends to - have a great dependence on the state of the interpreter. This leads - to so-called "heisenbugs" where changing entirely irrelevant aspects - of the test program still cause the failure behavior to change. - - By calling ``randomize_unitofwork()`` when a script first runs, the - ordering of a key series of sets within the unit of work implementation - are randomized, so that the script can be minimized down to the - fundamental mapping and operation that's failing, while still reproducing - the issue on at least some runs. - - This utility is also available when running the test suite via the - ``--reversetop`` flag. - - .. versionadded:: 0.8.1 created a standalone version of the - ``--reversetop`` feature. - - """ - from sqlalchemy.orm import unitofwork, session, mapper, dependency - from sqlalchemy.util import topological - from sqlalchemy.testing.util import RandomSet - topological.set = unitofwork.set = session.set = mapper.set = \ - dependency.set = RandomSet diff --git a/python/sqlalchemy/pool.py b/python/sqlalchemy/pool.py deleted file mode 100644 index 4dd954fc..00000000 --- a/python/sqlalchemy/pool.py +++ /dev/null @@ -1,1367 +0,0 @@ -# sqlalchemy/pool.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - - -"""Connection pooling for DB-API connections. - -Provides a number of connection pool implementations for a variety of -usage scenarios and thread behavior requirements imposed by the -application, DB-API or database itself. - -Also provides a DB-API 2.0 connection proxying mechanism allowing -regular DB-API connect() methods to be transparently managed by a -SQLAlchemy connection pool. -""" - -import time -import traceback -import weakref - -from . import exc, log, event, interfaces, util -from .util import queue as sqla_queue -from .util import threading, memoized_property, \ - chop_traceback - -from collections import deque -proxies = {} - - -def manage(module, **params): - """Return a proxy for a DB-API module that automatically - pools connections. - - Given a DB-API 2.0 module and pool management parameters, returns - a proxy for the module that will automatically pool connections, - creating new connection pools for each distinct set of connection - arguments sent to the decorated module's connect() function. - - :param module: a DB-API 2.0 database module - - :param poolclass: the class used by the pool module to provide - pooling. Defaults to :class:`.QueuePool`. - - :param \*\*params: will be passed through to *poolclass* - - """ - try: - return proxies[module] - except KeyError: - return proxies.setdefault(module, _DBProxy(module, **params)) - - -def clear_managers(): - """Remove all current DB-API 2.0 managers. - - All pools and connections are disposed. - """ - - for manager in proxies.values(): - manager.close() - proxies.clear() - -reset_rollback = util.symbol('reset_rollback') -reset_commit = util.symbol('reset_commit') -reset_none = util.symbol('reset_none') - - -class _ConnDialect(object): - - """partial implementation of :class:`.Dialect` - which provides DBAPI connection methods. - - When a :class:`.Pool` is combined with an :class:`.Engine`, - the :class:`.Engine` replaces this with its own - :class:`.Dialect`. - - """ - - def do_rollback(self, dbapi_connection): - dbapi_connection.rollback() - - def do_commit(self, dbapi_connection): - dbapi_connection.commit() - - def do_close(self, dbapi_connection): - dbapi_connection.close() - - -class Pool(log.Identified): - - """Abstract base class for connection pools.""" - - _dialect = _ConnDialect() - - def __init__(self, - creator, recycle=-1, echo=None, - use_threadlocal=False, - logging_name=None, - reset_on_return=True, - listeners=None, - events=None, - _dispatch=None, - _dialect=None): - """ - Construct a Pool. - - :param creator: a callable function that returns a DB-API - connection object. The function will be called with - parameters. - - :param recycle: If set to non -1, number of seconds between - connection recycling, which means upon checkout, if this - timeout is surpassed the connection will be closed and - replaced with a newly opened connection. Defaults to -1. - - :param logging_name: String identifier which will be used within - the "name" field of logging records generated within the - "sqlalchemy.pool" logger. Defaults to a hexstring of the object's - id. - - :param echo: If True, connections being pulled and retrieved - from the pool will be logged to the standard output, as well - as pool sizing information. Echoing can also be achieved by - enabling logging for the "sqlalchemy.pool" - namespace. Defaults to False. - - :param use_threadlocal: If set to True, repeated calls to - :meth:`connect` within the same application thread will be - guaranteed to return the same connection object, if one has - already been retrieved from the pool and has not been - returned yet. Offers a slight performance advantage at the - cost of individual transactions by default. The - :meth:`.Pool.unique_connection` method is provided to return - a consistenty unique connection to bypass this behavior - when the flag is set. - - .. warning:: The :paramref:`.Pool.use_threadlocal` flag - **does not affect the behavior** of :meth:`.Engine.connect`. - :meth:`.Engine.connect` makes use of the - :meth:`.Pool.unique_connection` method which **does not use thread - local context**. To produce a :class:`.Connection` which refers - to the :meth:`.Pool.connect` method, use - :meth:`.Engine.contextual_connect`. - - Note that other SQLAlchemy connectivity systems such as - :meth:`.Engine.execute` as well as the orm - :class:`.Session` make use of - :meth:`.Engine.contextual_connect` internally, so these functions - are compatible with the :paramref:`.Pool.use_threadlocal` setting. - - .. seealso:: - - :ref:`threadlocal_strategy` - contains detail on the - "threadlocal" engine strategy, which provides a more comprehensive - approach to "threadlocal" connectivity for the specific - use case of using :class:`.Engine` and :class:`.Connection` objects - directly. - - :param reset_on_return: Determine steps to take on - connections as they are returned to the pool. - reset_on_return can have any of these values: - - * ``"rollback"`` - call rollback() on the connection, - to release locks and transaction resources. - This is the default value. The vast majority - of use cases should leave this value set. - * ``True`` - same as 'rollback', this is here for - backwards compatibility. - * ``"commit"`` - call commit() on the connection, - to release locks and transaction resources. - A commit here may be desirable for databases that - cache query plans if a commit is emitted, - such as Microsoft SQL Server. However, this - value is more dangerous than 'rollback' because - any data changes present on the transaction - are committed unconditionally. - * ``None`` - don't do anything on the connection. - This setting should only be made on a database - that has no transaction support at all, - namely MySQL MyISAM. By not doing anything, - performance can be improved. This - setting should **never be selected** for a - database that supports transactions, - as it will lead to deadlocks and stale - state. - * ``"none"`` - same as ``None`` - - .. versionadded:: 0.9.10 - - * ``False`` - same as None, this is here for - backwards compatibility. - - .. versionchanged:: 0.7.6 - :paramref:`.Pool.reset_on_return` accepts ``"rollback"`` - and ``"commit"`` arguments. - - :param events: a list of 2-tuples, each of the form - ``(callable, target)`` which will be passed to :func:`.event.listen` - upon construction. Provided here so that event listeners - can be assigned via :func:`.create_engine` before dialect-level - listeners are applied. - - :param listeners: Deprecated. A list of - :class:`~sqlalchemy.interfaces.PoolListener`-like objects or - dictionaries of callables that receive events when DB-API - connections are created, checked out and checked in to the - pool. This has been superseded by - :func:`~sqlalchemy.event.listen`. - - """ - if logging_name: - self.logging_name = self._orig_logging_name = logging_name - else: - self._orig_logging_name = None - - log.instance_logger(self, echoflag=echo) - self._threadconns = threading.local() - self._creator = creator - self._recycle = recycle - self._invalidate_time = 0 - self._use_threadlocal = use_threadlocal - if reset_on_return in ('rollback', True, reset_rollback): - self._reset_on_return = reset_rollback - elif reset_on_return in ('none', None, False, reset_none): - self._reset_on_return = reset_none - elif reset_on_return in ('commit', reset_commit): - self._reset_on_return = reset_commit - else: - raise exc.ArgumentError( - "Invalid value for 'reset_on_return': %r" - % reset_on_return) - - self.echo = echo - - if _dispatch: - self.dispatch._update(_dispatch, only_propagate=False) - if _dialect: - self._dialect = _dialect - if events: - for fn, target in events: - event.listen(self, target, fn) - if listeners: - util.warn_deprecated( - "The 'listeners' argument to Pool (and " - "create_engine()) is deprecated. Use event.listen().") - for l in listeners: - self.add_listener(l) - - @property - def _creator(self): - return self.__dict__['_creator'] - - @_creator.setter - def _creator(self, creator): - self.__dict__['_creator'] = creator - self._invoke_creator = self._should_wrap_creator(creator) - - def _should_wrap_creator(self, creator): - """Detect if creator accepts a single argument, or is sent - as a legacy style no-arg function. - - """ - - try: - argspec = util.get_callable_argspec(self._creator, no_self=True) - except TypeError: - return lambda crec: creator() - - defaulted = argspec[3] is not None and len(argspec[3]) or 0 - positionals = len(argspec[0]) - defaulted - - # look for the exact arg signature that DefaultStrategy - # sends us - if (argspec[0], argspec[3]) == (['connection_record'], (None,)): - return creator - # or just a single positional - elif positionals == 1: - return creator - # all other cases, just wrap and assume legacy "creator" callable - # thing - else: - return lambda crec: creator() - - def _close_connection(self, connection): - self.logger.debug("Closing connection %r", connection) - try: - self._dialect.do_close(connection) - except Exception: - self.logger.error("Exception closing connection %r", - connection, exc_info=True) - - @util.deprecated( - 2.7, "Pool.add_listener is deprecated. Use event.listen()") - def add_listener(self, listener): - """Add a :class:`.PoolListener`-like object to this pool. - - ``listener`` may be an object that implements some or all of - PoolListener, or a dictionary of callables containing implementations - of some or all of the named methods in PoolListener. - - """ - interfaces.PoolListener._adapt_listener(self, listener) - - def unique_connection(self): - """Produce a DBAPI connection that is not referenced by any - thread-local context. - - This method is equivalent to :meth:`.Pool.connect` when the - :paramref:`.Pool.use_threadlocal` flag is not set to True. - When :paramref:`.Pool.use_threadlocal` is True, the - :meth:`.Pool.unique_connection` method provides a means of bypassing - the threadlocal context. - - """ - return _ConnectionFairy._checkout(self) - - def _create_connection(self): - """Called by subclasses to create a new ConnectionRecord.""" - - return _ConnectionRecord(self) - - def _invalidate(self, connection, exception=None): - """Mark all connections established within the generation - of the given connection as invalidated. - - If this pool's last invalidate time is before when the given - connection was created, update the timestamp til now. Otherwise, - no action is performed. - - Connections with a start time prior to this pool's invalidation - time will be recycled upon next checkout. - """ - rec = getattr(connection, "_connection_record", None) - if not rec or self._invalidate_time < rec.starttime: - self._invalidate_time = time.time() - if getattr(connection, 'is_valid', False): - connection.invalidate(exception) - - def recreate(self): - """Return a new :class:`.Pool`, of the same class as this one - and configured with identical creation arguments. - - This method is used in conjunction with :meth:`dispose` - to close out an entire :class:`.Pool` and create a new one in - its place. - - """ - - raise NotImplementedError() - - def dispose(self): - """Dispose of this pool. - - This method leaves the possibility of checked-out connections - remaining open, as it only affects connections that are - idle in the pool. - - See also the :meth:`Pool.recreate` method. - - """ - - raise NotImplementedError() - - def connect(self): - """Return a DBAPI connection from the pool. - - The connection is instrumented such that when its - ``close()`` method is called, the connection will be returned to - the pool. - - """ - if not self._use_threadlocal: - return _ConnectionFairy._checkout(self) - - try: - rec = self._threadconns.current() - except AttributeError: - pass - else: - if rec is not None: - return rec._checkout_existing() - - return _ConnectionFairy._checkout(self, self._threadconns) - - def _return_conn(self, record): - """Given a _ConnectionRecord, return it to the :class:`.Pool`. - - This method is called when an instrumented DBAPI connection - has its ``close()`` method called. - - """ - if self._use_threadlocal: - try: - del self._threadconns.current - except AttributeError: - pass - self._do_return_conn(record) - - def _do_get(self): - """Implementation for :meth:`get`, supplied by subclasses.""" - - raise NotImplementedError() - - def _do_return_conn(self, conn): - """Implementation for :meth:`return_conn`, supplied by subclasses.""" - - raise NotImplementedError() - - def status(self): - raise NotImplementedError() - - -class _ConnectionRecord(object): - - """Internal object which maintains an individual DBAPI connection - referenced by a :class:`.Pool`. - - The :class:`._ConnectionRecord` object always exists for any particular - DBAPI connection whether or not that DBAPI connection has been - "checked out". This is in contrast to the :class:`._ConnectionFairy` - which is only a public facade to the DBAPI connection while it is checked - out. - - A :class:`._ConnectionRecord` may exist for a span longer than that - of a single DBAPI connection. For example, if the - :meth:`._ConnectionRecord.invalidate` - method is called, the DBAPI connection associated with this - :class:`._ConnectionRecord` - will be discarded, but the :class:`._ConnectionRecord` may be used again, - in which case a new DBAPI connection is produced when the :class:`.Pool` - next uses this record. - - The :class:`._ConnectionRecord` is delivered along with connection - pool events, including :meth:`.PoolEvents.connect` and - :meth:`.PoolEvents.checkout`, however :class:`._ConnectionRecord` still - remains an internal object whose API and internals may change. - - .. seealso:: - - :class:`._ConnectionFairy` - - """ - - def __init__(self, pool): - self.__pool = pool - self.connection = self.__connect() - self.finalize_callback = deque() - - pool.dispatch.first_connect.\ - for_modify(pool.dispatch).\ - exec_once(self.connection, self) - pool.dispatch.connect(self.connection, self) - - connection = None - """A reference to the actual DBAPI connection being tracked. - - May be ``None`` if this :class:`._ConnectionRecord` has been marked - as invalidated; a new DBAPI connection may replace it if the owning - pool calls upon this :class:`._ConnectionRecord` to reconnect. - - """ - - _soft_invalidate_time = 0 - - @util.memoized_property - def info(self): - """The ``.info`` dictionary associated with the DBAPI connection. - - This dictionary is shared among the :attr:`._ConnectionFairy.info` - and :attr:`.Connection.info` accessors. - - """ - return {} - - @classmethod - def checkout(cls, pool): - rec = pool._do_get() - try: - dbapi_connection = rec.get_connection() - except: - with util.safe_reraise(): - rec.checkin() - echo = pool._should_log_debug() - fairy = _ConnectionFairy(dbapi_connection, rec, echo) - rec.fairy_ref = weakref.ref( - fairy, - lambda ref: _finalize_fairy and - _finalize_fairy( - dbapi_connection, - rec, pool, ref, echo) - ) - _refs.add(rec) - if echo: - pool.logger.debug("Connection %r checked out from pool", - dbapi_connection) - return fairy - - def checkin(self): - self.fairy_ref = None - connection = self.connection - pool = self.__pool - while self.finalize_callback: - finalizer = self.finalize_callback.pop() - finalizer(connection) - if pool.dispatch.checkin: - pool.dispatch.checkin(connection, self) - pool._return_conn(self) - - def close(self): - if self.connection is not None: - self.__close() - - def invalidate(self, e=None, soft=False): - """Invalidate the DBAPI connection held by this :class:`._ConnectionRecord`. - - This method is called for all connection invalidations, including - when the :meth:`._ConnectionFairy.invalidate` or - :meth:`.Connection.invalidate` methods are called, as well as when any - so-called "automatic invalidation" condition occurs. - - :param e: an exception object indicating a reason for the invalidation. - - :param soft: if True, the connection isn't closed; instead, this - connection will be recycled on next checkout. - - .. versionadded:: 1.0.3 - - .. seealso:: - - :ref:`pool_connection_invalidation` - - """ - # already invalidated - if self.connection is None: - return - if soft: - self.__pool.dispatch.soft_invalidate(self.connection, self, e) - else: - self.__pool.dispatch.invalidate(self.connection, self, e) - if e is not None: - self.__pool.logger.info( - "%sInvalidate connection %r (reason: %s:%s)", - "Soft " if soft else "", - self.connection, e.__class__.__name__, e) - else: - self.__pool.logger.info( - "%sInvalidate connection %r", - "Soft " if soft else "", - self.connection) - if soft: - self._soft_invalidate_time = time.time() - else: - self.__close() - self.connection = None - - def get_connection(self): - recycle = False - if self.connection is None: - self.info.clear() - self.connection = self.__connect() - if self.__pool.dispatch.connect: - self.__pool.dispatch.connect(self.connection, self) - elif self.__pool._recycle > -1 and \ - time.time() - self.starttime > self.__pool._recycle: - self.__pool.logger.info( - "Connection %r exceeded timeout; recycling", - self.connection) - recycle = True - elif self.__pool._invalidate_time > self.starttime: - self.__pool.logger.info( - "Connection %r invalidated due to pool invalidation; " + - "recycling", - self.connection - ) - recycle = True - elif self._soft_invalidate_time > self.starttime: - self.__pool.logger.info( - "Connection %r invalidated due to local soft invalidation; " + - "recycling", - self.connection - ) - recycle = True - - if recycle: - self.__close() - self.info.clear() - - # ensure that if self.__connect() fails, - # we are not referring to the previous stale connection here - self.connection = None - self.connection = self.__connect() - - if self.__pool.dispatch.connect: - self.__pool.dispatch.connect(self.connection, self) - return self.connection - - def __close(self): - self.finalize_callback.clear() - self.__pool._close_connection(self.connection) - - def __connect(self): - try: - self.starttime = time.time() - connection = self.__pool._invoke_creator(self) - self.__pool.logger.debug("Created new connection %r", connection) - return connection - except Exception as e: - self.__pool.logger.debug("Error on connect(): %s", e) - raise - - -def _finalize_fairy(connection, connection_record, - pool, ref, echo, fairy=None): - """Cleanup for a :class:`._ConnectionFairy` whether or not it's already - been garbage collected. - - """ - _refs.discard(connection_record) - - if ref is not None and \ - connection_record.fairy_ref is not ref: - return - - if connection is not None: - if connection_record and echo: - pool.logger.debug("Connection %r being returned to pool", - connection) - - try: - fairy = fairy or _ConnectionFairy( - connection, connection_record, echo) - assert fairy.connection is connection - fairy._reset(pool) - - # Immediately close detached instances - if not connection_record: - pool._close_connection(connection) - except BaseException as e: - pool.logger.error( - "Exception during reset or similar", exc_info=True) - if connection_record: - connection_record.invalidate(e=e) - if not isinstance(e, Exception): - raise - - if connection_record: - connection_record.checkin() - - -_refs = set() - - -class _ConnectionFairy(object): - - """Proxies a DBAPI connection and provides return-on-dereference - support. - - This is an internal object used by the :class:`.Pool` implementation - to provide context management to a DBAPI connection delivered by - that :class:`.Pool`. - - The name "fairy" is inspired by the fact that the - :class:`._ConnectionFairy` object's lifespan is transitory, as it lasts - only for the length of a specific DBAPI connection being checked out from - the pool, and additionally that as a transparent proxy, it is mostly - invisible. - - .. seealso:: - - :class:`._ConnectionRecord` - - """ - - def __init__(self, dbapi_connection, connection_record, echo): - self.connection = dbapi_connection - self._connection_record = connection_record - self._echo = echo - - connection = None - """A reference to the actual DBAPI connection being tracked.""" - - _connection_record = None - """A reference to the :class:`._ConnectionRecord` object associated - with the DBAPI connection. - - This is currently an internal accessor which is subject to change. - - """ - - _reset_agent = None - """Refer to an object with a ``.commit()`` and ``.rollback()`` method; - if non-None, the "reset-on-return" feature will call upon this object - rather than directly against the dialect-level do_rollback() and - do_commit() methods. - - In practice, a :class:`.Connection` assigns a :class:`.Transaction` object - to this variable when one is in scope so that the :class:`.Transaction` - takes the job of committing or rolling back on return if - :meth:`.Connection.close` is called while the :class:`.Transaction` - still exists. - - This is essentially an "event handler" of sorts but is simplified as an - instance variable both for performance/simplicity as well as that there - can only be one "reset agent" at a time. - """ - - @classmethod - def _checkout(cls, pool, threadconns=None, fairy=None): - if not fairy: - fairy = _ConnectionRecord.checkout(pool) - - fairy._pool = pool - fairy._counter = 0 - - if threadconns is not None: - threadconns.current = weakref.ref(fairy) - - if fairy.connection is None: - raise exc.InvalidRequestError("This connection is closed") - fairy._counter += 1 - - if not pool.dispatch.checkout or fairy._counter != 1: - return fairy - - # Pool listeners can trigger a reconnection on checkout - attempts = 2 - while attempts > 0: - try: - pool.dispatch.checkout(fairy.connection, - fairy._connection_record, - fairy) - return fairy - except exc.DisconnectionError as e: - pool.logger.info( - "Disconnection detected on checkout: %s", e) - fairy._connection_record.invalidate(e) - try: - fairy.connection = \ - fairy._connection_record.get_connection() - except: - with util.safe_reraise(): - fairy._connection_record.checkin() - - attempts -= 1 - - pool.logger.info("Reconnection attempts exhausted on checkout") - fairy.invalidate() - raise exc.InvalidRequestError("This connection is closed") - - def _checkout_existing(self): - return _ConnectionFairy._checkout(self._pool, fairy=self) - - def _checkin(self): - _finalize_fairy(self.connection, self._connection_record, - self._pool, None, self._echo, fairy=self) - self.connection = None - self._connection_record = None - - _close = _checkin - - def _reset(self, pool): - if pool.dispatch.reset: - pool.dispatch.reset(self, self._connection_record) - if pool._reset_on_return is reset_rollback: - if self._echo: - pool.logger.debug("Connection %s rollback-on-return%s", - self.connection, - ", via agent" - if self._reset_agent else "") - if self._reset_agent: - self._reset_agent.rollback() - else: - pool._dialect.do_rollback(self) - elif pool._reset_on_return is reset_commit: - if self._echo: - pool.logger.debug("Connection %s commit-on-return%s", - self.connection, - ", via agent" - if self._reset_agent else "") - if self._reset_agent: - self._reset_agent.commit() - else: - pool._dialect.do_commit(self) - - @property - def _logger(self): - return self._pool.logger - - @property - def is_valid(self): - """Return True if this :class:`._ConnectionFairy` still refers - to an active DBAPI connection.""" - - return self.connection is not None - - @util.memoized_property - def info(self): - """Info dictionary associated with the underlying DBAPI connection - referred to by this :class:`.ConnectionFairy`, allowing user-defined - data to be associated with the connection. - - The data here will follow along with the DBAPI connection including - after it is returned to the connection pool and used again - in subsequent instances of :class:`._ConnectionFairy`. It is shared - with the :attr:`._ConnectionRecord.info` and :attr:`.Connection.info` - accessors. - - """ - return self._connection_record.info - - def invalidate(self, e=None, soft=False): - """Mark this connection as invalidated. - - This method can be called directly, and is also called as a result - of the :meth:`.Connection.invalidate` method. When invoked, - the DBAPI connection is immediately closed and discarded from - further use by the pool. The invalidation mechanism proceeds - via the :meth:`._ConnectionRecord.invalidate` internal method. - - :param e: an exception object indicating a reason for the invalidation. - - :param soft: if True, the connection isn't closed; instead, this - connection will be recycled on next checkout. - - .. versionadded:: 1.0.3 - - .. seealso:: - - :ref:`pool_connection_invalidation` - - """ - - if self.connection is None: - util.warn("Can't invalidate an already-closed connection.") - return - if self._connection_record: - self._connection_record.invalidate(e=e, soft=soft) - if not soft: - self.connection = None - self._checkin() - - def cursor(self, *args, **kwargs): - """Return a new DBAPI cursor for the underlying connection. - - This method is a proxy for the ``connection.cursor()`` DBAPI - method. - - """ - return self.connection.cursor(*args, **kwargs) - - def __getattr__(self, key): - return getattr(self.connection, key) - - def detach(self): - """Separate this connection from its Pool. - - This means that the connection will no longer be returned to the - pool when closed, and will instead be literally closed. The - containing ConnectionRecord is separated from the DB-API connection, - and will create a new connection when next used. - - Note that any overall connection limiting constraints imposed by a - Pool implementation may be violated after a detach, as the detached - connection is removed from the pool's knowledge and control. - """ - - if self._connection_record is not None: - _refs.remove(self._connection_record) - self._connection_record.fairy_ref = None - self._connection_record.connection = None - # TODO: should this be _return_conn? - self._pool._do_return_conn(self._connection_record) - self.info = self.info.copy() - self._connection_record = None - - def close(self): - self._counter -= 1 - if self._counter == 0: - self._checkin() - - -class SingletonThreadPool(Pool): - - """A Pool that maintains one connection per thread. - - Maintains one connection per each thread, never moving a connection to a - thread other than the one which it was created in. - - .. warning:: the :class:`.SingletonThreadPool` will call ``.close()`` - on arbitrary connections that exist beyond the size setting of - ``pool_size``, e.g. if more unique **thread identities** - than what ``pool_size`` states are used. This cleanup is - non-deterministic and not sensitive to whether or not the connections - linked to those thread identities are currently in use. - - :class:`.SingletonThreadPool` may be improved in a future release, - however in its current status it is generally used only for test - scenarios using a SQLite ``:memory:`` database and is not recommended - for production use. - - - Options are the same as those of :class:`.Pool`, as well as: - - :param pool_size: The number of threads in which to maintain connections - at once. Defaults to five. - - :class:`.SingletonThreadPool` is used by the SQLite dialect - automatically when a memory-based database is used. - See :ref:`sqlite_toplevel`. - - """ - - def __init__(self, creator, pool_size=5, **kw): - kw['use_threadlocal'] = True - Pool.__init__(self, creator, **kw) - self._conn = threading.local() - self._all_conns = set() - self.size = pool_size - - def recreate(self): - self.logger.info("Pool recreating") - return self.__class__(self._creator, - pool_size=self.size, - recycle=self._recycle, - echo=self.echo, - logging_name=self._orig_logging_name, - use_threadlocal=self._use_threadlocal, - reset_on_return=self._reset_on_return, - _dispatch=self.dispatch, - _dialect=self._dialect) - - def dispose(self): - """Dispose of this pool.""" - - for conn in self._all_conns: - try: - conn.close() - except Exception: - # pysqlite won't even let you close a conn from a thread - # that didn't create it - pass - - self._all_conns.clear() - - def _cleanup(self): - while len(self._all_conns) >= self.size: - c = self._all_conns.pop() - c.close() - - def status(self): - return "SingletonThreadPool id:%d size: %d" % \ - (id(self), len(self._all_conns)) - - def _do_return_conn(self, conn): - pass - - def _do_get(self): - try: - c = self._conn.current() - if c: - return c - except AttributeError: - pass - c = self._create_connection() - self._conn.current = weakref.ref(c) - if len(self._all_conns) >= self.size: - self._cleanup() - self._all_conns.add(c) - return c - - -class QueuePool(Pool): - - """A :class:`.Pool` that imposes a limit on the number of open connections. - - :class:`.QueuePool` is the default pooling implementation used for - all :class:`.Engine` objects, unless the SQLite dialect is in use. - - """ - - def __init__(self, creator, pool_size=5, max_overflow=10, timeout=30, - **kw): - """ - Construct a QueuePool. - - :param creator: a callable function that returns a DB-API - connection object, same as that of :paramref:`.Pool.creator`. - - :param pool_size: The size of the pool to be maintained, - defaults to 5. This is the largest number of connections that - will be kept persistently in the pool. Note that the pool - begins with no connections; once this number of connections - is requested, that number of connections will remain. - ``pool_size`` can be set to 0 to indicate no size limit; to - disable pooling, use a :class:`~sqlalchemy.pool.NullPool` - instead. - - :param max_overflow: The maximum overflow size of the - pool. When the number of checked-out connections reaches the - size set in pool_size, additional connections will be - returned up to this limit. When those additional connections - are returned to the pool, they are disconnected and - discarded. It follows then that the total number of - simultaneous connections the pool will allow is pool_size + - `max_overflow`, and the total number of "sleeping" - connections the pool will allow is pool_size. `max_overflow` - can be set to -1 to indicate no overflow limit; no limit - will be placed on the total number of concurrent - connections. Defaults to 10. - - :param timeout: The number of seconds to wait before giving up - on returning a connection. Defaults to 30. - - :param \**kw: Other keyword arguments including - :paramref:`.Pool.recycle`, :paramref:`.Pool.echo`, - :paramref:`.Pool.reset_on_return` and others are passed to the - :class:`.Pool` constructor. - - """ - Pool.__init__(self, creator, **kw) - self._pool = sqla_queue.Queue(pool_size) - self._overflow = 0 - pool_size - self._max_overflow = max_overflow - self._timeout = timeout - self._overflow_lock = threading.Lock() - - def _do_return_conn(self, conn): - try: - self._pool.put(conn, False) - except sqla_queue.Full: - try: - conn.close() - finally: - self._dec_overflow() - - def _do_get(self): - use_overflow = self._max_overflow > -1 - - try: - wait = use_overflow and self._overflow >= self._max_overflow - return self._pool.get(wait, self._timeout) - except sqla_queue.Empty: - if use_overflow and self._overflow >= self._max_overflow: - if not wait: - return self._do_get() - else: - raise exc.TimeoutError( - "QueuePool limit of size %d overflow %d reached, " - "connection timed out, timeout %d" % - (self.size(), self.overflow(), self._timeout)) - - if self._inc_overflow(): - try: - return self._create_connection() - except: - with util.safe_reraise(): - self._dec_overflow() - else: - return self._do_get() - - def _inc_overflow(self): - if self._max_overflow == -1: - self._overflow += 1 - return True - with self._overflow_lock: - if self._overflow < self._max_overflow: - self._overflow += 1 - return True - else: - return False - - def _dec_overflow(self): - if self._max_overflow == -1: - self._overflow -= 1 - return True - with self._overflow_lock: - self._overflow -= 1 - return True - - def recreate(self): - self.logger.info("Pool recreating") - return self.__class__(self._creator, pool_size=self._pool.maxsize, - max_overflow=self._max_overflow, - timeout=self._timeout, - recycle=self._recycle, echo=self.echo, - logging_name=self._orig_logging_name, - use_threadlocal=self._use_threadlocal, - reset_on_return=self._reset_on_return, - _dispatch=self.dispatch, - _dialect=self._dialect) - - def dispose(self): - while True: - try: - conn = self._pool.get(False) - conn.close() - except sqla_queue.Empty: - break - - self._overflow = 0 - self.size() - self.logger.info("Pool disposed. %s", self.status()) - - def status(self): - return "Pool size: %d Connections in pool: %d "\ - "Current Overflow: %d Current Checked out "\ - "connections: %d" % (self.size(), - self.checkedin(), - self.overflow(), - self.checkedout()) - - def size(self): - return self._pool.maxsize - - def checkedin(self): - return self._pool.qsize() - - def overflow(self): - return self._overflow - - def checkedout(self): - return self._pool.maxsize - self._pool.qsize() + self._overflow - - -class NullPool(Pool): - - """A Pool which does not pool connections. - - Instead it literally opens and closes the underlying DB-API connection - per each connection open/close. - - Reconnect-related functions such as ``recycle`` and connection - invalidation are not supported by this Pool implementation, since - no connections are held persistently. - - .. versionchanged:: 0.7 - :class:`.NullPool` is used by the SQlite dialect automatically - when a file-based database is used. See :ref:`sqlite_toplevel`. - - """ - - def status(self): - return "NullPool" - - def _do_return_conn(self, conn): - conn.close() - - def _do_get(self): - return self._create_connection() - - def recreate(self): - self.logger.info("Pool recreating") - - return self.__class__(self._creator, - recycle=self._recycle, - echo=self.echo, - logging_name=self._orig_logging_name, - use_threadlocal=self._use_threadlocal, - reset_on_return=self._reset_on_return, - _dispatch=self.dispatch, - _dialect=self._dialect) - - def dispose(self): - pass - - -class StaticPool(Pool): - - """A Pool of exactly one connection, used for all requests. - - Reconnect-related functions such as ``recycle`` and connection - invalidation (which is also used to support auto-reconnect) are not - currently supported by this Pool implementation but may be implemented - in a future release. - - """ - - @memoized_property - def _conn(self): - return self._creator() - - @memoized_property - def connection(self): - return _ConnectionRecord(self) - - def status(self): - return "StaticPool" - - def dispose(self): - if '_conn' in self.__dict__: - self._conn.close() - self._conn = None - - def recreate(self): - self.logger.info("Pool recreating") - return self.__class__(creator=self._creator, - recycle=self._recycle, - use_threadlocal=self._use_threadlocal, - reset_on_return=self._reset_on_return, - echo=self.echo, - logging_name=self._orig_logging_name, - _dispatch=self.dispatch, - _dialect=self._dialect) - - def _create_connection(self): - return self._conn - - def _do_return_conn(self, conn): - pass - - def _do_get(self): - return self.connection - - -class AssertionPool(Pool): - - """A :class:`.Pool` that allows at most one checked out connection at - any given time. - - This will raise an exception if more than one connection is checked out - at a time. Useful for debugging code that is using more connections - than desired. - - .. versionchanged:: 0.7 - :class:`.AssertionPool` also logs a traceback of where - the original connection was checked out, and reports - this in the assertion error raised. - - """ - - def __init__(self, *args, **kw): - self._conn = None - self._checked_out = False - self._store_traceback = kw.pop('store_traceback', True) - self._checkout_traceback = None - Pool.__init__(self, *args, **kw) - - def status(self): - return "AssertionPool" - - def _do_return_conn(self, conn): - if not self._checked_out: - raise AssertionError("connection is not checked out") - self._checked_out = False - assert conn is self._conn - - def dispose(self): - self._checked_out = False - if self._conn: - self._conn.close() - - def recreate(self): - self.logger.info("Pool recreating") - return self.__class__(self._creator, echo=self.echo, - logging_name=self._orig_logging_name, - _dispatch=self.dispatch, - _dialect=self._dialect) - - def _do_get(self): - if self._checked_out: - if self._checkout_traceback: - suffix = ' at:\n%s' % ''.join( - chop_traceback(self._checkout_traceback)) - else: - suffix = '' - raise AssertionError("connection is already checked out" + suffix) - - if not self._conn: - self._conn = self._create_connection() - - self._checked_out = True - if self._store_traceback: - self._checkout_traceback = traceback.format_stack() - return self._conn - - -class _DBProxy(object): - - """Layers connection pooling behavior on top of a standard DB-API module. - - Proxies a DB-API 2.0 connect() call to a connection pool keyed to the - specific connect parameters. Other functions and attributes are delegated - to the underlying DB-API module. - """ - - def __init__(self, module, poolclass=QueuePool, **kw): - """Initializes a new proxy. - - module - a DB-API 2.0 module - - poolclass - a Pool class, defaulting to QueuePool - - Other parameters are sent to the Pool object's constructor. - - """ - - self.module = module - self.kw = kw - self.poolclass = poolclass - self.pools = {} - self._create_pool_mutex = threading.Lock() - - def close(self): - for key in list(self.pools): - del self.pools[key] - - def __del__(self): - self.close() - - def __getattr__(self, key): - return getattr(self.module, key) - - def get_pool(self, *args, **kw): - key = self._serialize(*args, **kw) - try: - return self.pools[key] - except KeyError: - self._create_pool_mutex.acquire() - try: - if key not in self.pools: - kw.pop('sa_pool_key', None) - pool = self.poolclass( - lambda: self.module.connect(*args, **kw), **self.kw) - self.pools[key] = pool - return pool - else: - return self.pools[key] - finally: - self._create_pool_mutex.release() - - def connect(self, *args, **kw): - """Activate a connection to the database. - - Connect to the database using this DBProxy's module and the given - connect arguments. If the arguments match an existing pool, the - connection will be returned from the pool's current thread-local - connection instance, or if there is no thread-local connection - instance it will be checked out from the set of pooled connections. - - If the pool has no available connections and allows new connections - to be created, a new database connection will be made. - - """ - - return self.get_pool(*args, **kw).connect() - - def dispose(self, *args, **kw): - """Dispose the pool referenced by the given connect arguments.""" - - key = self._serialize(*args, **kw) - try: - del self.pools[key] - except KeyError: - pass - - def _serialize(self, *args, **kw): - if "sa_pool_key" in kw: - return kw['sa_pool_key'] - - return tuple( - list(args) + - [(k, kw[k]) for k in sorted(kw)] - ) diff --git a/python/sqlalchemy/processors.py b/python/sqlalchemy/processors.py deleted file mode 100644 index 6575fad1..00000000 --- a/python/sqlalchemy/processors.py +++ /dev/null @@ -1,155 +0,0 @@ -# sqlalchemy/processors.py -# Copyright (C) 2010-2015 the SQLAlchemy authors and contributors -# -# Copyright (C) 2010 Gaetan de Menten gdementen@gmail.com -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""defines generic type conversion functions, as used in bind and result -processors. - -They all share one common characteristic: None is passed through unchanged. - -""" - -import codecs -import re -import datetime -from . import util - - -def str_to_datetime_processor_factory(regexp, type_): - rmatch = regexp.match - # Even on python2.6 datetime.strptime is both slower than this code - # and it does not support microseconds. - has_named_groups = bool(regexp.groupindex) - - def process(value): - if value is None: - return None - else: - try: - m = rmatch(value) - except TypeError: - raise ValueError("Couldn't parse %s string '%r' " - "- value is not a string." % - (type_.__name__, value)) - if m is None: - raise ValueError("Couldn't parse %s string: " - "'%s'" % (type_.__name__, value)) - if has_named_groups: - groups = m.groupdict(0) - return type_(**dict(list(zip( - iter(groups.keys()), - list(map(int, iter(groups.values()))) - )))) - else: - return type_(*list(map(int, m.groups(0)))) - return process - - -def boolean_to_int(value): - if value is None: - return None - else: - return int(value) - - -def py_fallback(): - def to_unicode_processor_factory(encoding, errors=None): - decoder = codecs.getdecoder(encoding) - - def process(value): - if value is None: - return None - else: - # decoder returns a tuple: (value, len). Simply dropping the - # len part is safe: it is done that way in the normal - # 'xx'.decode(encoding) code path. - return decoder(value, errors)[0] - return process - - def to_conditional_unicode_processor_factory(encoding, errors=None): - decoder = codecs.getdecoder(encoding) - - def process(value): - if value is None: - return None - elif isinstance(value, util.text_type): - return value - else: - # decoder returns a tuple: (value, len). Simply dropping the - # len part is safe: it is done that way in the normal - # 'xx'.decode(encoding) code path. - return decoder(value, errors)[0] - return process - - def to_decimal_processor_factory(target_class, scale): - fstring = "%%.%df" % scale - - def process(value): - if value is None: - return None - else: - return target_class(fstring % value) - return process - - def to_float(value): - if value is None: - return None - else: - return float(value) - - def to_str(value): - if value is None: - return None - else: - return str(value) - - def int_to_boolean(value): - if value is None: - return None - else: - return value and True or False - - DATETIME_RE = re.compile( - "(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)(?:\.(\d+))?") - TIME_RE = re.compile("(\d+):(\d+):(\d+)(?:\.(\d+))?") - DATE_RE = re.compile("(\d+)-(\d+)-(\d+)") - - str_to_datetime = str_to_datetime_processor_factory(DATETIME_RE, - datetime.datetime) - str_to_time = str_to_datetime_processor_factory(TIME_RE, datetime.time) - str_to_date = str_to_datetime_processor_factory(DATE_RE, datetime.date) - return locals() - -try: - from sqlalchemy.cprocessors import UnicodeResultProcessor, \ - DecimalResultProcessor, \ - to_float, to_str, int_to_boolean, \ - str_to_datetime, str_to_time, \ - str_to_date - - def to_unicode_processor_factory(encoding, errors=None): - if errors is not None: - return UnicodeResultProcessor(encoding, errors).process - else: - return UnicodeResultProcessor(encoding).process - - def to_conditional_unicode_processor_factory(encoding, errors=None): - if errors is not None: - return UnicodeResultProcessor(encoding, errors).conditional_process - else: - return UnicodeResultProcessor(encoding).conditional_process - - def to_decimal_processor_factory(target_class, scale): - # Note that the scale argument is not taken into account for integer - # values in the C implementation while it is in the Python one. - # For example, the Python implementation might return - # Decimal('5.00000') whereas the C implementation will - # return Decimal('5'). These are equivalent of course. - return DecimalResultProcessor(target_class, "%%.%df" % scale).process - -except ImportError: - globals().update(py_fallback()) diff --git a/python/sqlalchemy/schema.py b/python/sqlalchemy/schema.py deleted file mode 100644 index 327498fc..00000000 --- a/python/sqlalchemy/schema.py +++ /dev/null @@ -1,65 +0,0 @@ -# schema.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Compatibility namespace for sqlalchemy.sql.schema and related. - -""" - -from .sql.base import ( - SchemaVisitor - ) - - -from .sql.schema import ( - CheckConstraint, - Column, - ColumnDefault, - Constraint, - DefaultClause, - DefaultGenerator, - FetchedValue, - ForeignKey, - ForeignKeyConstraint, - Index, - MetaData, - PassiveDefault, - PrimaryKeyConstraint, - SchemaItem, - Sequence, - Table, - ThreadLocalMetaData, - UniqueConstraint, - _get_table_key, - ColumnCollectionConstraint, - ColumnCollectionMixin - ) - - -from .sql.naming import conv - - -from .sql.ddl import ( - DDL, - CreateTable, - DropTable, - CreateSequence, - DropSequence, - CreateIndex, - DropIndex, - CreateSchema, - DropSchema, - _DropView, - CreateColumn, - AddConstraint, - DropConstraint, - DDLBase, - DDLElement, - _CreateDropBase, - _DDLCompiles, - sort_tables, - sort_tables_and_constraints -) diff --git a/python/sqlalchemy/sql/__init__.py b/python/sqlalchemy/sql/__init__.py deleted file mode 100644 index e8b70061..00000000 --- a/python/sqlalchemy/sql/__init__.py +++ /dev/null @@ -1,92 +0,0 @@ -# sql/__init__.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -from .expression import ( - Alias, - ClauseElement, - ColumnCollection, - ColumnElement, - CompoundSelect, - Delete, - FromClause, - Insert, - Join, - Select, - Selectable, - TableClause, - Update, - alias, - and_, - asc, - between, - bindparam, - case, - cast, - collate, - column, - delete, - desc, - distinct, - except_, - except_all, - exists, - extract, - false, - False_, - func, - funcfilter, - insert, - intersect, - intersect_all, - join, - label, - literal, - literal_column, - modifier, - not_, - null, - or_, - outerjoin, - outparam, - over, - select, - subquery, - table, - text, - true, - True_, - tuple_, - type_coerce, - union, - union_all, - update, -) - -from .visitors import ClauseVisitor - - -def __go(lcls): - global __all__ - from .. import util as _sa_util - - import inspect as _inspect - - __all__ = sorted(name for name, obj in lcls.items() - if not (name.startswith('_') or _inspect.ismodule(obj))) - - from .annotation import _prepare_annotations, Annotated - from .elements import AnnotatedColumnElement, ClauseList - from .selectable import AnnotatedFromClause - _prepare_annotations(ColumnElement, AnnotatedColumnElement) - _prepare_annotations(FromClause, AnnotatedFromClause) - _prepare_annotations(ClauseList, Annotated) - - _sa_util.dependencies.resolve_all("sqlalchemy.sql") - - from . import naming - -__go(locals()) diff --git a/python/sqlalchemy/sql/annotation.py b/python/sqlalchemy/sql/annotation.py deleted file mode 100644 index 8fec5039..00000000 --- a/python/sqlalchemy/sql/annotation.py +++ /dev/null @@ -1,196 +0,0 @@ -# sql/annotation.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""The :class:`.Annotated` class and related routines; creates hash-equivalent -copies of SQL constructs which contain context-specific markers and -associations. - -""" - -from .. import util -from . import operators - - -class Annotated(object): - """clones a ClauseElement and applies an 'annotations' dictionary. - - Unlike regular clones, this clone also mimics __hash__() and - __cmp__() of the original element so that it takes its place - in hashed collections. - - A reference to the original element is maintained, for the important - reason of keeping its hash value current. When GC'ed, the - hash value may be reused, causing conflicts. - - """ - - def __new__(cls, *args): - if not args: - # clone constructor - return object.__new__(cls) - else: - element, values = args - # pull appropriate subclass from registry of annotated - # classes - try: - cls = annotated_classes[element.__class__] - except KeyError: - cls = _new_annotation_type(element.__class__, cls) - return object.__new__(cls) - - def __init__(self, element, values): - self.__dict__ = element.__dict__.copy() - self.__element = element - self._annotations = values - self._hash = hash(element) - - def _annotate(self, values): - _values = self._annotations.copy() - _values.update(values) - return self._with_annotations(_values) - - def _with_annotations(self, values): - clone = self.__class__.__new__(self.__class__) - clone.__dict__ = self.__dict__.copy() - clone._annotations = values - return clone - - def _deannotate(self, values=None, clone=True): - if values is None: - return self.__element - else: - _values = self._annotations.copy() - for v in values: - _values.pop(v, None) - return self._with_annotations(_values) - - def _compiler_dispatch(self, visitor, **kw): - return self.__element.__class__._compiler_dispatch( - self, visitor, **kw) - - @property - def _constructor(self): - return self.__element._constructor - - def _clone(self): - clone = self.__element._clone() - if clone is self.__element: - # detect immutable, don't change anything - return self - else: - # update the clone with any changes that have occurred - # to this object's __dict__. - clone.__dict__.update(self.__dict__) - return self.__class__(clone, self._annotations) - - def __hash__(self): - return self._hash - - def __eq__(self, other): - if isinstance(self.__element, operators.ColumnOperators): - return self.__element.__class__.__eq__(self, other) - else: - return hash(other) == hash(self) - - -# hard-generate Annotated subclasses. this technique -# is used instead of on-the-fly types (i.e. type.__new__()) -# so that the resulting objects are pickleable. -annotated_classes = {} - - -def _deep_annotate(element, annotations, exclude=None): - """Deep copy the given ClauseElement, annotating each element - with the given annotations dictionary. - - Elements within the exclude collection will be cloned but not annotated. - - """ - def clone(elem): - if exclude and \ - hasattr(elem, 'proxy_set') and \ - elem.proxy_set.intersection(exclude): - newelem = elem._clone() - elif annotations != elem._annotations: - newelem = elem._annotate(annotations) - else: - newelem = elem - newelem._copy_internals(clone=clone) - return newelem - - if element is not None: - element = clone(element) - return element - - -def _deep_deannotate(element, values=None): - """Deep copy the given element, removing annotations.""" - - cloned = util.column_dict() - - def clone(elem): - # if a values dict is given, - # the elem must be cloned each time it appears, - # as there may be different annotations in source - # elements that are remaining. if totally - # removing all annotations, can assume the same - # slate... - if values or elem not in cloned: - newelem = elem._deannotate(values=values, clone=True) - newelem._copy_internals(clone=clone) - if not values: - cloned[elem] = newelem - return newelem - else: - return cloned[elem] - - if element is not None: - element = clone(element) - return element - - -def _shallow_annotate(element, annotations): - """Annotate the given ClauseElement and copy its internals so that - internal objects refer to the new annotated object. - - Basically used to apply a "dont traverse" annotation to a - selectable, without digging throughout the whole - structure wasting time. - """ - element = element._annotate(annotations) - element._copy_internals() - return element - - -def _new_annotation_type(cls, base_cls): - if issubclass(cls, Annotated): - return cls - elif cls in annotated_classes: - return annotated_classes[cls] - - for super_ in cls.__mro__: - # check if an Annotated subclass more specific than - # the given base_cls is already registered, such - # as AnnotatedColumnElement. - if super_ in annotated_classes: - base_cls = annotated_classes[super_] - break - - annotated_classes[cls] = anno_cls = type( - "Annotated%s" % cls.__name__, - (base_cls, cls), {}) - globals()["Annotated%s" % cls.__name__] = anno_cls - return anno_cls - - -def _prepare_annotations(target_hierarchy, base_cls): - stack = [target_hierarchy] - while stack: - cls = stack.pop() - stack.extend(cls.__subclasses__()) - - _new_annotation_type(cls, base_cls) diff --git a/python/sqlalchemy/sql/base.py b/python/sqlalchemy/sql/base.py deleted file mode 100644 index eed07923..00000000 --- a/python/sqlalchemy/sql/base.py +++ /dev/null @@ -1,647 +0,0 @@ -# sql/base.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Foundational utilities common to many sql modules. - -""" - - -from .. import util, exc -import itertools -from .visitors import ClauseVisitor -import re -import collections - -PARSE_AUTOCOMMIT = util.symbol('PARSE_AUTOCOMMIT') -NO_ARG = util.symbol('NO_ARG') - - -class Immutable(object): - """mark a ClauseElement as 'immutable' when expressions are cloned.""" - - def unique_params(self, *optionaldict, **kwargs): - raise NotImplementedError("Immutable objects do not support copying") - - def params(self, *optionaldict, **kwargs): - raise NotImplementedError("Immutable objects do not support copying") - - def _clone(self): - return self - - -def _from_objects(*elements): - return itertools.chain(*[element._from_objects for element in elements]) - - -@util.decorator -def _generative(fn, *args, **kw): - """Mark a method as generative.""" - - self = args[0]._generate() - fn(self, *args[1:], **kw) - return self - - -class _DialectArgView(collections.MutableMapping): - """A dictionary view of dialect-level arguments in the form - _. - - """ - - def __init__(self, obj): - self.obj = obj - - def _key(self, key): - try: - dialect, value_key = key.split("_", 1) - except ValueError: - raise KeyError(key) - else: - return dialect, value_key - - def __getitem__(self, key): - dialect, value_key = self._key(key) - - try: - opt = self.obj.dialect_options[dialect] - except exc.NoSuchModuleError: - raise KeyError(key) - else: - return opt[value_key] - - def __setitem__(self, key, value): - try: - dialect, value_key = self._key(key) - except KeyError: - raise exc.ArgumentError( - "Keys must be of the form _") - else: - self.obj.dialect_options[dialect][value_key] = value - - def __delitem__(self, key): - dialect, value_key = self._key(key) - del self.obj.dialect_options[dialect][value_key] - - def __len__(self): - return sum(len(args._non_defaults) for args in - self.obj.dialect_options.values()) - - def __iter__(self): - return ( - util.safe_kwarg("%s_%s" % (dialect_name, value_name)) - for dialect_name in self.obj.dialect_options - for value_name in - self.obj.dialect_options[dialect_name]._non_defaults - ) - - -class _DialectArgDict(collections.MutableMapping): - """A dictionary view of dialect-level arguments for a specific - dialect. - - Maintains a separate collection of user-specified arguments - and dialect-specified default arguments. - - """ - - def __init__(self): - self._non_defaults = {} - self._defaults = {} - - def __len__(self): - return len(set(self._non_defaults).union(self._defaults)) - - def __iter__(self): - return iter(set(self._non_defaults).union(self._defaults)) - - def __getitem__(self, key): - if key in self._non_defaults: - return self._non_defaults[key] - else: - return self._defaults[key] - - def __setitem__(self, key, value): - self._non_defaults[key] = value - - def __delitem__(self, key): - del self._non_defaults[key] - - -class DialectKWArgs(object): - """Establish the ability for a class to have dialect-specific arguments - with defaults and constructor validation. - - The :class:`.DialectKWArgs` interacts with the - :attr:`.DefaultDialect.construct_arguments` present on a dialect. - - .. seealso:: - - :attr:`.DefaultDialect.construct_arguments` - - """ - - @classmethod - def argument_for(cls, dialect_name, argument_name, default): - """Add a new kind of dialect-specific keyword argument for this class. - - E.g.:: - - Index.argument_for("mydialect", "length", None) - - some_index = Index('a', 'b', mydialect_length=5) - - The :meth:`.DialectKWArgs.argument_for` method is a per-argument - way adding extra arguments to the - :attr:`.DefaultDialect.construct_arguments` dictionary. This - dictionary provides a list of argument names accepted by various - schema-level constructs on behalf of a dialect. - - New dialects should typically specify this dictionary all at once as a - data member of the dialect class. The use case for ad-hoc addition of - argument names is typically for end-user code that is also using - a custom compilation scheme which consumes the additional arguments. - - :param dialect_name: name of a dialect. The dialect must be - locatable, else a :class:`.NoSuchModuleError` is raised. The - dialect must also include an existing - :attr:`.DefaultDialect.construct_arguments` collection, indicating - that it participates in the keyword-argument validation and default - system, else :class:`.ArgumentError` is raised. If the dialect does - not include this collection, then any keyword argument can be - specified on behalf of this dialect already. All dialects packaged - within SQLAlchemy include this collection, however for third party - dialects, support may vary. - - :param argument_name: name of the parameter. - - :param default: default value of the parameter. - - .. versionadded:: 0.9.4 - - """ - - construct_arg_dictionary = DialectKWArgs._kw_registry[dialect_name] - if construct_arg_dictionary is None: - raise exc.ArgumentError( - "Dialect '%s' does have keyword-argument " - "validation and defaults enabled configured" % - dialect_name) - if cls not in construct_arg_dictionary: - construct_arg_dictionary[cls] = {} - construct_arg_dictionary[cls][argument_name] = default - - @util.memoized_property - def dialect_kwargs(self): - """A collection of keyword arguments specified as dialect-specific - options to this construct. - - The arguments are present here in their original ``_`` - format. Only arguments that were actually passed are included; - unlike the :attr:`.DialectKWArgs.dialect_options` collection, which - contains all options known by this dialect including defaults. - - The collection is also writable; keys are accepted of the - form ``_`` where the value will be assembled - into the list of options. - - .. versionadded:: 0.9.2 - - .. versionchanged:: 0.9.4 The :attr:`.DialectKWArgs.dialect_kwargs` - collection is now writable. - - .. seealso:: - - :attr:`.DialectKWArgs.dialect_options` - nested dictionary form - - """ - return _DialectArgView(self) - - @property - def kwargs(self): - """A synonym for :attr:`.DialectKWArgs.dialect_kwargs`.""" - return self.dialect_kwargs - - @util.dependencies("sqlalchemy.dialects") - def _kw_reg_for_dialect(dialects, dialect_name): - dialect_cls = dialects.registry.load(dialect_name) - if dialect_cls.construct_arguments is None: - return None - return dict(dialect_cls.construct_arguments) - _kw_registry = util.PopulateDict(_kw_reg_for_dialect) - - def _kw_reg_for_dialect_cls(self, dialect_name): - construct_arg_dictionary = DialectKWArgs._kw_registry[dialect_name] - d = _DialectArgDict() - - if construct_arg_dictionary is None: - d._defaults.update({"*": None}) - else: - for cls in reversed(self.__class__.__mro__): - if cls in construct_arg_dictionary: - d._defaults.update(construct_arg_dictionary[cls]) - return d - - @util.memoized_property - def dialect_options(self): - """A collection of keyword arguments specified as dialect-specific - options to this construct. - - This is a two-level nested registry, keyed to ```` - and ````. For example, the ``postgresql_where`` - argument would be locatable as:: - - arg = my_object.dialect_options['postgresql']['where'] - - .. versionadded:: 0.9.2 - - .. seealso:: - - :attr:`.DialectKWArgs.dialect_kwargs` - flat dictionary form - - """ - - return util.PopulateDict( - util.portable_instancemethod(self._kw_reg_for_dialect_cls) - ) - - def _validate_dialect_kwargs(self, kwargs): - # validate remaining kwargs that they all specify DB prefixes - - if not kwargs: - return - - for k in kwargs: - m = re.match('^(.+?)_(.+)$', k) - if not m: - raise TypeError( - "Additional arguments should be " - "named _, got '%s'" % k) - dialect_name, arg_name = m.group(1, 2) - - try: - construct_arg_dictionary = self.dialect_options[dialect_name] - except exc.NoSuchModuleError: - util.warn( - "Can't validate argument %r; can't " - "locate any SQLAlchemy dialect named %r" % - (k, dialect_name)) - self.dialect_options[dialect_name] = d = _DialectArgDict() - d._defaults.update({"*": None}) - d._non_defaults[arg_name] = kwargs[k] - else: - if "*" not in construct_arg_dictionary and \ - arg_name not in construct_arg_dictionary: - raise exc.ArgumentError( - "Argument %r is not accepted by " - "dialect %r on behalf of %r" % ( - k, - dialect_name, self.__class__ - )) - else: - construct_arg_dictionary[arg_name] = kwargs[k] - - -class Generative(object): - """Allow a ClauseElement to generate itself via the - @_generative decorator. - - """ - - def _generate(self): - s = self.__class__.__new__(self.__class__) - s.__dict__ = self.__dict__.copy() - return s - - -class Executable(Generative): - """Mark a ClauseElement as supporting execution. - - :class:`.Executable` is a superclass for all "statement" types - of objects, including :func:`select`, :func:`delete`, :func:`update`, - :func:`insert`, :func:`text`. - - """ - - supports_execution = True - _execution_options = util.immutabledict() - _bind = None - - @_generative - def execution_options(self, **kw): - """ Set non-SQL options for the statement which take effect during - execution. - - Execution options can be set on a per-statement or - per :class:`.Connection` basis. Additionally, the - :class:`.Engine` and ORM :class:`~.orm.query.Query` objects provide - access to execution options which they in turn configure upon - connections. - - The :meth:`execution_options` method is generative. A new - instance of this statement is returned that contains the options:: - - statement = select([table.c.x, table.c.y]) - statement = statement.execution_options(autocommit=True) - - Note that only a subset of possible execution options can be applied - to a statement - these include "autocommit" and "stream_results", - but not "isolation_level" or "compiled_cache". - See :meth:`.Connection.execution_options` for a full list of - possible options. - - .. seealso:: - - :meth:`.Connection.execution_options()` - - :meth:`.Query.execution_options()` - - """ - if 'isolation_level' in kw: - raise exc.ArgumentError( - "'isolation_level' execution option may only be specified " - "on Connection.execution_options(), or " - "per-engine using the isolation_level " - "argument to create_engine()." - ) - if 'compiled_cache' in kw: - raise exc.ArgumentError( - "'compiled_cache' execution option may only be specified " - "on Connection.execution_options(), not per statement." - ) - self._execution_options = self._execution_options.union(kw) - - def execute(self, *multiparams, **params): - """Compile and execute this :class:`.Executable`.""" - e = self.bind - if e is None: - label = getattr(self, 'description', self.__class__.__name__) - msg = ('This %s is not directly bound to a Connection or Engine.' - 'Use the .execute() method of a Connection or Engine ' - 'to execute this construct.' % label) - raise exc.UnboundExecutionError(msg) - return e._execute_clauseelement(self, multiparams, params) - - def scalar(self, *multiparams, **params): - """Compile and execute this :class:`.Executable`, returning the - result's scalar representation. - - """ - return self.execute(*multiparams, **params).scalar() - - @property - def bind(self): - """Returns the :class:`.Engine` or :class:`.Connection` to - which this :class:`.Executable` is bound, or None if none found. - - This is a traversal which checks locally, then - checks among the "from" clauses of associated objects - until a bound engine or connection is found. - - """ - if self._bind is not None: - return self._bind - - for f in _from_objects(self): - if f is self: - continue - engine = f.bind - if engine is not None: - return engine - else: - return None - - -class SchemaEventTarget(object): - """Base class for elements that are the targets of :class:`.DDLEvents` - events. - - This includes :class:`.SchemaItem` as well as :class:`.SchemaType`. - - """ - - def _set_parent(self, parent): - """Associate with this SchemaEvent's parent object.""" - - raise NotImplementedError() - - def _set_parent_with_dispatch(self, parent): - self.dispatch.before_parent_attach(self, parent) - self._set_parent(parent) - self.dispatch.after_parent_attach(self, parent) - - -class SchemaVisitor(ClauseVisitor): - """Define the visiting for ``SchemaItem`` objects.""" - - __traverse_options__ = {'schema_visitor': True} - - -class ColumnCollection(util.OrderedProperties): - """An ordered dictionary that stores a list of ColumnElement - instances. - - Overrides the ``__eq__()`` method to produce SQL clauses between - sets of correlated columns. - - """ - - __slots__ = '_all_col_set', '_all_columns' - - def __init__(self, *columns): - super(ColumnCollection, self).__init__() - object.__setattr__(self, '_all_col_set', util.column_set()) - object.__setattr__(self, '_all_columns', []) - for c in columns: - self.add(c) - - def __str__(self): - return repr([str(c) for c in self]) - - def replace(self, column): - """add the given column to this collection, removing unaliased - versions of this column as well as existing columns with the - same key. - - e.g.:: - - t = Table('sometable', metadata, Column('col1', Integer)) - t.columns.replace(Column('col1', Integer, key='columnone')) - - will remove the original 'col1' from the collection, and add - the new column under the name 'columnname'. - - Used by schema.Column to override columns during table reflection. - - """ - remove_col = None - if column.name in self and column.key != column.name: - other = self[column.name] - if other.name == other.key: - remove_col = other - self._all_col_set.remove(other) - del self._data[other.key] - - if column.key in self._data: - remove_col = self._data[column.key] - self._all_col_set.remove(remove_col) - - self._all_col_set.add(column) - self._data[column.key] = column - if remove_col is not None: - self._all_columns[:] = [column if c is remove_col - else c for c in self._all_columns] - else: - self._all_columns.append(column) - - def add(self, column): - """Add a column to this collection. - - The key attribute of the column will be used as the hash key - for this dictionary. - - """ - if not column.key: - raise exc.ArgumentError( - "Can't add unnamed column to column collection") - self[column.key] = column - - def __delitem__(self, key): - raise NotImplementedError() - - def __setattr__(self, key, object): - raise NotImplementedError() - - def __setitem__(self, key, value): - if key in self: - - # this warning is primarily to catch select() statements - # which have conflicting column names in their exported - # columns collection - - existing = self[key] - if not existing.shares_lineage(value): - util.warn('Column %r on table %r being replaced by ' - '%r, which has the same key. Consider ' - 'use_labels for select() statements.' % - (key, getattr(existing, 'table', None), value)) - - # pop out memoized proxy_set as this - # operation may very well be occurring - # in a _make_proxy operation - util.memoized_property.reset(value, "proxy_set") - - self._all_col_set.add(value) - self._all_columns.append(value) - self._data[key] = value - - def clear(self): - raise NotImplementedError() - - def remove(self, column): - del self._data[column.key] - self._all_col_set.remove(column) - self._all_columns[:] = [ - c for c in self._all_columns if c is not column] - - def update(self, iter): - cols = list(iter) - self._all_columns.extend( - c for label, c in cols if c not in self._all_col_set) - self._all_col_set.update(c for label, c in cols) - self._data.update((label, c) for label, c in cols) - - def extend(self, iter): - cols = list(iter) - self._all_columns.extend(c for c in cols if c not in - self._all_col_set) - self._all_col_set.update(cols) - self._data.update((c.key, c) for c in cols) - - __hash__ = None - - @util.dependencies("sqlalchemy.sql.elements") - def __eq__(self, elements, other): - l = [] - for c in getattr(other, "_all_columns", other): - for local in self._all_columns: - if c.shares_lineage(local): - l.append(c == local) - return elements.and_(*l) - - def __contains__(self, other): - if not isinstance(other, util.string_types): - raise exc.ArgumentError("__contains__ requires a string argument") - return util.OrderedProperties.__contains__(self, other) - - def __getstate__(self): - return {'_data': self._data, - '_all_columns': self._all_columns} - - def __setstate__(self, state): - object.__setattr__(self, '_data', state['_data']) - object.__setattr__(self, '_all_columns', state['_all_columns']) - object.__setattr__( - self, '_all_col_set', util.column_set(state['_all_columns'])) - - def contains_column(self, col): - # this has to be done via set() membership - return col in self._all_col_set - - def as_immutable(self): - return ImmutableColumnCollection( - self._data, self._all_col_set, self._all_columns) - - -class ImmutableColumnCollection(util.ImmutableProperties, ColumnCollection): - def __init__(self, data, colset, all_columns): - util.ImmutableProperties.__init__(self, data) - object.__setattr__(self, '_all_col_set', colset) - object.__setattr__(self, '_all_columns', all_columns) - - extend = remove = util.ImmutableProperties._immutable - - -class ColumnSet(util.ordered_column_set): - def contains_column(self, col): - return col in self - - def extend(self, cols): - for col in cols: - self.add(col) - - def __add__(self, other): - return list(self) + list(other) - - @util.dependencies("sqlalchemy.sql.elements") - def __eq__(self, elements, other): - l = [] - for c in other: - for local in self: - if c.shares_lineage(local): - l.append(c == local) - return elements.and_(*l) - - def __hash__(self): - return hash(tuple(x for x in self)) - - -def _bind_or_error(schemaitem, msg=None): - bind = schemaitem.bind - if not bind: - name = schemaitem.__class__.__name__ - label = getattr(schemaitem, 'fullname', - getattr(schemaitem, 'name', None)) - if label: - item = '%s object %r' % (name, label) - else: - item = '%s object' % name - if msg is None: - msg = "%s is not bound to an Engine or Connection. "\ - "Execution can not proceed without a database to execute "\ - "against." % item - raise exc.UnboundExecutionError(msg) - return bind diff --git a/python/sqlalchemy/sql/compiler.py b/python/sqlalchemy/sql/compiler.py deleted file mode 100644 index a036dcc4..00000000 --- a/python/sqlalchemy/sql/compiler.py +++ /dev/null @@ -1,2816 +0,0 @@ -# sql/compiler.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Base SQL and DDL compiler implementations. - -Classes provided include: - -:class:`.compiler.SQLCompiler` - renders SQL -strings - -:class:`.compiler.DDLCompiler` - renders DDL -(data definition language) strings - -:class:`.compiler.GenericTypeCompiler` - renders -type specification strings. - -To generate user-defined SQL strings, see -:doc:`/ext/compiler`. - -""" - -import contextlib -import re -from . import schema, sqltypes, operators, functions, visitors, \ - elements, selectable, crud -from .. import util, exc -import itertools - -RESERVED_WORDS = set([ - 'all', 'analyse', 'analyze', 'and', 'any', 'array', - 'as', 'asc', 'asymmetric', 'authorization', 'between', - 'binary', 'both', 'case', 'cast', 'check', 'collate', - 'column', 'constraint', 'create', 'cross', 'current_date', - 'current_role', 'current_time', 'current_timestamp', - 'current_user', 'default', 'deferrable', 'desc', - 'distinct', 'do', 'else', 'end', 'except', 'false', - 'for', 'foreign', 'freeze', 'from', 'full', 'grant', - 'group', 'having', 'ilike', 'in', 'initially', 'inner', - 'intersect', 'into', 'is', 'isnull', 'join', 'leading', - 'left', 'like', 'limit', 'localtime', 'localtimestamp', - 'natural', 'new', 'not', 'notnull', 'null', 'off', 'offset', - 'old', 'on', 'only', 'or', 'order', 'outer', 'overlaps', - 'placing', 'primary', 'references', 'right', 'select', - 'session_user', 'set', 'similar', 'some', 'symmetric', 'table', - 'then', 'to', 'trailing', 'true', 'union', 'unique', 'user', - 'using', 'verbose', 'when', 'where']) - -LEGAL_CHARACTERS = re.compile(r'^[A-Z0-9_$]+$', re.I) -ILLEGAL_INITIAL_CHARACTERS = set([str(x) for x in range(0, 10)]).union(['$']) - -BIND_PARAMS = re.compile(r'(? ', - operators.ge: ' >= ', - operators.eq: ' = ', - operators.concat_op: ' || ', - operators.match_op: ' MATCH ', - operators.notmatch_op: ' NOT MATCH ', - operators.in_op: ' IN ', - operators.notin_op: ' NOT IN ', - operators.comma_op: ', ', - operators.from_: ' FROM ', - operators.as_: ' AS ', - operators.is_: ' IS ', - operators.isnot: ' IS NOT ', - operators.collate: ' COLLATE ', - - # unary - operators.exists: 'EXISTS ', - operators.distinct_op: 'DISTINCT ', - operators.inv: 'NOT ', - - # modifiers - operators.desc_op: ' DESC', - operators.asc_op: ' ASC', - operators.nullsfirst_op: ' NULLS FIRST', - operators.nullslast_op: ' NULLS LAST', - -} - -FUNCTIONS = { - functions.coalesce: 'coalesce%(expr)s', - functions.current_date: 'CURRENT_DATE', - functions.current_time: 'CURRENT_TIME', - functions.current_timestamp: 'CURRENT_TIMESTAMP', - functions.current_user: 'CURRENT_USER', - functions.localtime: 'LOCALTIME', - functions.localtimestamp: 'LOCALTIMESTAMP', - functions.random: 'random%(expr)s', - functions.sysdate: 'sysdate', - functions.session_user: 'SESSION_USER', - functions.user: 'USER' -} - -EXTRACT_MAP = { - 'month': 'month', - 'day': 'day', - 'year': 'year', - 'second': 'second', - 'hour': 'hour', - 'doy': 'doy', - 'minute': 'minute', - 'quarter': 'quarter', - 'dow': 'dow', - 'week': 'week', - 'epoch': 'epoch', - 'milliseconds': 'milliseconds', - 'microseconds': 'microseconds', - 'timezone_hour': 'timezone_hour', - 'timezone_minute': 'timezone_minute' -} - -COMPOUND_KEYWORDS = { - selectable.CompoundSelect.UNION: 'UNION', - selectable.CompoundSelect.UNION_ALL: 'UNION ALL', - selectable.CompoundSelect.EXCEPT: 'EXCEPT', - selectable.CompoundSelect.EXCEPT_ALL: 'EXCEPT ALL', - selectable.CompoundSelect.INTERSECT: 'INTERSECT', - selectable.CompoundSelect.INTERSECT_ALL: 'INTERSECT ALL' -} - - -class Compiled(object): - - """Represent a compiled SQL or DDL expression. - - The ``__str__`` method of the ``Compiled`` object should produce - the actual text of the statement. ``Compiled`` objects are - specific to their underlying database dialect, and also may - or may not be specific to the columns referenced within a - particular set of bind parameters. In no case should the - ``Compiled`` object be dependent on the actual values of those - bind parameters, even though it may reference those values as - defaults. - """ - - _cached_metadata = None - - def __init__(self, dialect, statement, bind=None, - compile_kwargs=util.immutabledict()): - """Construct a new ``Compiled`` object. - - :param dialect: ``Dialect`` to compile against. - - :param statement: ``ClauseElement`` to be compiled. - - :param bind: Optional Engine or Connection to compile this - statement against. - - :param compile_kwargs: additional kwargs that will be - passed to the initial call to :meth:`.Compiled.process`. - - .. versionadded:: 0.8 - - """ - - self.dialect = dialect - self.bind = bind - if statement is not None: - self.statement = statement - self.can_execute = statement.supports_execution - self.string = self.process(self.statement, **compile_kwargs) - - @util.deprecated("0.7", ":class:`.Compiled` objects now compile " - "within the constructor.") - def compile(self): - """Produce the internal string representation of this element. - """ - pass - - def _execute_on_connection(self, connection, multiparams, params): - return connection._execute_compiled(self, multiparams, params) - - @property - def sql_compiler(self): - """Return a Compiled that is capable of processing SQL expressions. - - If this compiler is one, it would likely just return 'self'. - - """ - - raise NotImplementedError() - - def process(self, obj, **kwargs): - return obj._compiler_dispatch(self, **kwargs) - - def __str__(self): - """Return the string text of the generated SQL or DDL.""" - - return self.string or '' - - def construct_params(self, params=None): - """Return the bind params for this compiled object. - - :param params: a dict of string/object pairs whose values will - override bind values compiled in to the - statement. - """ - - raise NotImplementedError() - - @property - def params(self): - """Return the bind params for this compiled object.""" - return self.construct_params() - - def execute(self, *multiparams, **params): - """Execute this compiled object.""" - - e = self.bind - if e is None: - raise exc.UnboundExecutionError( - "This Compiled object is not bound to any Engine " - "or Connection.") - return e._execute_compiled(self, multiparams, params) - - def scalar(self, *multiparams, **params): - """Execute this compiled object and return the result's - scalar value.""" - - return self.execute(*multiparams, **params).scalar() - - -class TypeCompiler(util.with_metaclass(util.EnsureKWArgType, object)): - """Produces DDL specification for TypeEngine objects.""" - - ensure_kwarg = 'visit_\w+' - - def __init__(self, dialect): - self.dialect = dialect - - def process(self, type_, **kw): - return type_._compiler_dispatch(self, **kw) - - -class _CompileLabel(visitors.Visitable): - - """lightweight label object which acts as an expression.Label.""" - - __visit_name__ = 'label' - __slots__ = 'element', 'name' - - def __init__(self, col, name, alt_names=()): - self.element = col - self.name = name - self._alt_names = (col,) + alt_names - - @property - def proxy_set(self): - return self.element.proxy_set - - @property - def type(self): - return self.element.type - - -class SQLCompiler(Compiled): - - """Default implementation of Compiled. - - Compiles ClauseElements into SQL strings. Uses a similar visit - paradigm as visitors.ClauseVisitor but implements its own traversal. - - """ - - extract_map = EXTRACT_MAP - - compound_keywords = COMPOUND_KEYWORDS - - isdelete = isinsert = isupdate = False - """class-level defaults which can be set at the instance - level to define if this Compiled instance represents - INSERT/UPDATE/DELETE - """ - - returning = None - """holds the "returning" collection of columns if - the statement is CRUD and defines returning columns - either implicitly or explicitly - """ - - returning_precedes_values = False - """set to True classwide to generate RETURNING - clauses before the VALUES or WHERE clause (i.e. MSSQL) - """ - - render_table_with_column_in_update_from = False - """set to True classwide to indicate the SET clause - in a multi-table UPDATE statement should qualify - columns with the table name (i.e. MySQL only) - """ - - ansi_bind_rules = False - """SQL 92 doesn't allow bind parameters to be used - in the columns clause of a SELECT, nor does it allow - ambiguous expressions like "? = ?". A compiler - subclass can set this flag to False if the target - driver/DB enforces this - """ - - def __init__(self, dialect, statement, column_keys=None, - inline=False, **kwargs): - """Construct a new ``DefaultCompiler`` object. - - dialect - Dialect to be used - - statement - ClauseElement to be compiled - - column_keys - a list of column names to be compiled into an INSERT or UPDATE - statement. - - """ - self.column_keys = column_keys - - # compile INSERT/UPDATE defaults/sequences inlined (no pre- - # execute) - self.inline = inline or getattr(statement, 'inline', False) - - # a dictionary of bind parameter keys to BindParameter - # instances. - self.binds = {} - - # a dictionary of BindParameter instances to "compiled" names - # that are actually present in the generated SQL - self.bind_names = util.column_dict() - - # stack which keeps track of nested SELECT statements - self.stack = [] - - # relates label names in the final SQL to a tuple of local - # column/label name, ColumnElement object (if any) and - # TypeEngine. ResultProxy uses this for type processing and - # column targeting - self._result_columns = [] - - # if False, means we can't be sure the list of entries - # in _result_columns is actually the rendered order. This - # gets flipped when we use TextAsFrom, for example. - self._ordered_columns = True - - # true if the paramstyle is positional - self.positional = dialect.positional - if self.positional: - self.positiontup = [] - self.bindtemplate = BIND_TEMPLATES[dialect.paramstyle] - - self.ctes = None - - # an IdentifierPreparer that formats the quoting of identifiers - self.preparer = dialect.identifier_preparer - self.label_length = dialect.label_length \ - or dialect.max_identifier_length - - # a map which tracks "anonymous" identifiers that are created on - # the fly here - self.anon_map = util.PopulateDict(self._process_anon) - - # a map which tracks "truncated" names based on - # dialect.label_length or dialect.max_identifier_length - self.truncated_names = {} - Compiled.__init__(self, dialect, statement, **kwargs) - - if self.positional and dialect.paramstyle == 'numeric': - self._apply_numbered_params() - - @util.memoized_instancemethod - def _init_cte_state(self): - """Initialize collections related to CTEs only if - a CTE is located, to save on the overhead of - these collections otherwise. - - """ - # collect CTEs to tack on top of a SELECT - self.ctes = util.OrderedDict() - self.ctes_by_name = {} - self.ctes_recursive = False - if self.positional: - self.cte_positional = {} - - @contextlib.contextmanager - def _nested_result(self): - """special API to support the use case of 'nested result sets'""" - result_columns, ordered_columns = ( - self._result_columns, self._ordered_columns) - self._result_columns, self._ordered_columns = [], False - - try: - if self.stack: - entry = self.stack[-1] - entry['need_result_map_for_nested'] = True - else: - entry = None - yield self._result_columns, self._ordered_columns - finally: - if entry: - entry.pop('need_result_map_for_nested') - self._result_columns, self._ordered_columns = ( - result_columns, ordered_columns) - - def _apply_numbered_params(self): - poscount = itertools.count(1) - self.string = re.sub( - r'\[_POSITION\]', - lambda m: str(util.next(poscount)), - self.string) - - @util.memoized_property - def _bind_processors(self): - return dict( - (key, value) for key, value in - ((self.bind_names[bindparam], - bindparam.type._cached_bind_processor(self.dialect)) - for bindparam in self.bind_names) - if value is not None - ) - - def is_subquery(self): - return len(self.stack) > 1 - - @property - def sql_compiler(self): - return self - - def construct_params(self, params=None, _group_number=None, _check=True): - """return a dictionary of bind parameter keys and values""" - - if params: - pd = {} - for bindparam in self.bind_names: - name = self.bind_names[bindparam] - if bindparam.key in params: - pd[name] = params[bindparam.key] - elif name in params: - pd[name] = params[name] - - elif _check and bindparam.required: - if _group_number: - raise exc.InvalidRequestError( - "A value is required for bind parameter %r, " - "in parameter group %d" % - (bindparam.key, _group_number)) - else: - raise exc.InvalidRequestError( - "A value is required for bind parameter %r" - % bindparam.key) - - elif bindparam.callable: - pd[name] = bindparam.effective_value - else: - pd[name] = bindparam.value - return pd - else: - pd = {} - for bindparam in self.bind_names: - if _check and bindparam.required: - if _group_number: - raise exc.InvalidRequestError( - "A value is required for bind parameter %r, " - "in parameter group %d" % - (bindparam.key, _group_number)) - else: - raise exc.InvalidRequestError( - "A value is required for bind parameter %r" - % bindparam.key) - - if bindparam.callable: - pd[self.bind_names[bindparam]] = bindparam.effective_value - else: - pd[self.bind_names[bindparam]] = bindparam.value - return pd - - @property - def params(self): - """Return the bind param dictionary embedded into this - compiled object, for those values that are present.""" - return self.construct_params(_check=False) - - @util.dependencies("sqlalchemy.engine.result") - def _create_result_map(self, result): - """utility method used for unit tests only.""" - return result.ResultMetaData._create_result_map(self._result_columns) - - def default_from(self): - """Called when a SELECT statement has no froms, and no FROM clause is - to be appended. - - Gives Oracle a chance to tack on a ``FROM DUAL`` to the string output. - - """ - return "" - - def visit_grouping(self, grouping, asfrom=False, **kwargs): - return "(" + grouping.element._compiler_dispatch(self, **kwargs) + ")" - - def visit_label_reference( - self, element, within_columns_clause=False, **kwargs): - if self.stack and self.dialect.supports_simple_order_by_label: - selectable = self.stack[-1]['selectable'] - - with_cols, only_froms = selectable._label_resolve_dict - if within_columns_clause: - resolve_dict = only_froms - else: - resolve_dict = with_cols - - # this can be None in the case that a _label_reference() - # were subject to a replacement operation, in which case - # the replacement of the Label element may have changed - # to something else like a ColumnClause expression. - order_by_elem = element.element._order_by_label_element - - if order_by_elem is not None and order_by_elem.name in \ - resolve_dict: - - kwargs['render_label_as_label'] = \ - element.element._order_by_label_element - - return self.process( - element.element, within_columns_clause=within_columns_clause, - **kwargs) - - def visit_textual_label_reference( - self, element, within_columns_clause=False, **kwargs): - if not self.stack: - # compiling the element outside of the context of a SELECT - return self.process( - element._text_clause - ) - - selectable = self.stack[-1]['selectable'] - with_cols, only_froms = selectable._label_resolve_dict - try: - if within_columns_clause: - col = only_froms[element.element] - else: - col = with_cols[element.element] - except KeyError: - # treat it like text() - util.warn_limited( - "Can't resolve label reference %r; converting to text()", - util.ellipses_string(element.element)) - return self.process( - element._text_clause - ) - else: - kwargs['render_label_as_label'] = col - return self.process( - col, within_columns_clause=within_columns_clause, **kwargs) - - def visit_label(self, label, - add_to_result_map=None, - within_label_clause=False, - within_columns_clause=False, - render_label_as_label=None, - **kw): - # only render labels within the columns clause - # or ORDER BY clause of a select. dialect-specific compilers - # can modify this behavior. - render_label_with_as = (within_columns_clause and not - within_label_clause) - render_label_only = render_label_as_label is label - - if render_label_only or render_label_with_as: - if isinstance(label.name, elements._truncated_label): - labelname = self._truncated_identifier("colident", label.name) - else: - labelname = label.name - - if render_label_with_as: - if add_to_result_map is not None: - add_to_result_map( - labelname, - label.name, - (label, labelname, ) + label._alt_names, - label.type - ) - - return label.element._compiler_dispatch( - self, within_columns_clause=True, - within_label_clause=True, **kw) + \ - OPERATORS[operators.as_] + \ - self.preparer.format_label(label, labelname) - elif render_label_only: - return self.preparer.format_label(label, labelname) - else: - return label.element._compiler_dispatch( - self, within_columns_clause=False, **kw) - - def visit_column(self, column, add_to_result_map=None, - include_table=True, **kwargs): - name = orig_name = column.name - if name is None: - raise exc.CompileError("Cannot compile Column object until " - "its 'name' is assigned.") - - is_literal = column.is_literal - if not is_literal and isinstance(name, elements._truncated_label): - name = self._truncated_identifier("colident", name) - - if add_to_result_map is not None: - add_to_result_map( - name, - orig_name, - (column, name, column.key), - column.type - ) - - if is_literal: - name = self.escape_literal_column(name) - else: - name = self.preparer.quote(name) - - table = column.table - if table is None or not include_table or not table.named_with_column: - return name - else: - if table.schema: - schema_prefix = self.preparer.quote_schema(table.schema) + '.' - else: - schema_prefix = '' - tablename = table.name - if isinstance(tablename, elements._truncated_label): - tablename = self._truncated_identifier("alias", tablename) - - return schema_prefix + \ - self.preparer.quote(tablename) + \ - "." + name - - def escape_literal_column(self, text): - """provide escaping for the literal_column() construct.""" - - # TODO: some dialects might need different behavior here - return text.replace('%', '%%') - - def visit_fromclause(self, fromclause, **kwargs): - return fromclause.name - - def visit_index(self, index, **kwargs): - return index.name - - def visit_typeclause(self, typeclause, **kw): - kw['type_expression'] = typeclause - return self.dialect.type_compiler.process(typeclause.type, **kw) - - def post_process_text(self, text): - return text - - def visit_textclause(self, textclause, **kw): - def do_bindparam(m): - name = m.group(1) - if name in textclause._bindparams: - return self.process(textclause._bindparams[name], **kw) - else: - return self.bindparam_string(name, **kw) - - # un-escape any \:params - return BIND_PARAMS_ESC.sub( - lambda m: m.group(1), - BIND_PARAMS.sub( - do_bindparam, - self.post_process_text(textclause.text)) - ) - - def visit_text_as_from(self, taf, - compound_index=None, - asfrom=False, - parens=True, **kw): - - toplevel = not self.stack - entry = self._default_stack_entry if toplevel else self.stack[-1] - - populate_result_map = toplevel or \ - ( - compound_index == 0 and entry.get( - 'need_result_map_for_compound', False) - ) or entry.get('need_result_map_for_nested', False) - - if populate_result_map: - self._ordered_columns = False - for c in taf.column_args: - self.process(c, within_columns_clause=True, - add_to_result_map=self._add_to_result_map) - - text = self.process(taf.element, **kw) - if asfrom and parens: - text = "(%s)" % text - return text - - def visit_null(self, expr, **kw): - return 'NULL' - - def visit_true(self, expr, **kw): - if self.dialect.supports_native_boolean: - return 'true' - else: - return "1" - - def visit_false(self, expr, **kw): - if self.dialect.supports_native_boolean: - return 'false' - else: - return "0" - - def visit_clauselist(self, clauselist, **kw): - sep = clauselist.operator - if sep is None: - sep = " " - else: - sep = OPERATORS[clauselist.operator] - return sep.join( - s for s in - ( - c._compiler_dispatch(self, **kw) - for c in clauselist.clauses) - if s) - - def visit_case(self, clause, **kwargs): - x = "CASE " - if clause.value is not None: - x += clause.value._compiler_dispatch(self, **kwargs) + " " - for cond, result in clause.whens: - x += "WHEN " + cond._compiler_dispatch( - self, **kwargs - ) + " THEN " + result._compiler_dispatch( - self, **kwargs) + " " - if clause.else_ is not None: - x += "ELSE " + clause.else_._compiler_dispatch( - self, **kwargs - ) + " " - x += "END" - return x - - def visit_cast(self, cast, **kwargs): - return "CAST(%s AS %s)" % \ - (cast.clause._compiler_dispatch(self, **kwargs), - cast.typeclause._compiler_dispatch(self, **kwargs)) - - def visit_over(self, over, **kwargs): - return "%s OVER (%s)" % ( - over.func._compiler_dispatch(self, **kwargs), - ' '.join( - '%s BY %s' % (word, clause._compiler_dispatch(self, **kwargs)) - for word, clause in ( - ('PARTITION', over.partition_by), - ('ORDER', over.order_by) - ) - if clause is not None and len(clause) - ) - ) - - def visit_funcfilter(self, funcfilter, **kwargs): - return "%s FILTER (WHERE %s)" % ( - funcfilter.func._compiler_dispatch(self, **kwargs), - funcfilter.criterion._compiler_dispatch(self, **kwargs) - ) - - def visit_extract(self, extract, **kwargs): - field = self.extract_map.get(extract.field, extract.field) - return "EXTRACT(%s FROM %s)" % ( - field, extract.expr._compiler_dispatch(self, **kwargs)) - - def visit_function(self, func, add_to_result_map=None, **kwargs): - if add_to_result_map is not None: - add_to_result_map( - func.name, func.name, (), func.type - ) - - disp = getattr(self, "visit_%s_func" % func.name.lower(), None) - if disp: - return disp(func, **kwargs) - else: - name = FUNCTIONS.get(func.__class__, func.name + "%(expr)s") - return ".".join(list(func.packagenames) + [name]) % \ - {'expr': self.function_argspec(func, **kwargs)} - - def visit_next_value_func(self, next_value, **kw): - return self.visit_sequence(next_value.sequence) - - def visit_sequence(self, sequence): - raise NotImplementedError( - "Dialect '%s' does not support sequence increments." % - self.dialect.name - ) - - def function_argspec(self, func, **kwargs): - return func.clause_expr._compiler_dispatch(self, **kwargs) - - def visit_compound_select(self, cs, asfrom=False, - parens=True, compound_index=0, **kwargs): - toplevel = not self.stack - entry = self._default_stack_entry if toplevel else self.stack[-1] - need_result_map = toplevel or \ - (compound_index == 0 - and entry.get('need_result_map_for_compound', False)) - - self.stack.append( - { - 'correlate_froms': entry['correlate_froms'], - 'asfrom_froms': entry['asfrom_froms'], - 'selectable': cs, - 'need_result_map_for_compound': need_result_map - }) - - keyword = self.compound_keywords.get(cs.keyword) - - text = (" " + keyword + " ").join( - (c._compiler_dispatch(self, - asfrom=asfrom, parens=False, - compound_index=i, **kwargs) - for i, c in enumerate(cs.selects)) - ) - - group_by = cs._group_by_clause._compiler_dispatch( - self, asfrom=asfrom, **kwargs) - if group_by: - text += " GROUP BY " + group_by - - text += self.order_by_clause(cs, **kwargs) - text += (cs._limit_clause is not None - or cs._offset_clause is not None) and \ - self.limit_clause(cs, **kwargs) or "" - - if self.ctes and toplevel: - text = self._render_cte_clause() + text - - self.stack.pop(-1) - if asfrom and parens: - return "(" + text + ")" - else: - return text - - def visit_unary(self, unary, **kw): - if unary.operator: - if unary.modifier: - raise exc.CompileError( - "Unary expression does not support operator " - "and modifier simultaneously") - disp = getattr(self, "visit_%s_unary_operator" % - unary.operator.__name__, None) - if disp: - return disp(unary, unary.operator, **kw) - else: - return self._generate_generic_unary_operator( - unary, OPERATORS[unary.operator], **kw) - elif unary.modifier: - disp = getattr(self, "visit_%s_unary_modifier" % - unary.modifier.__name__, None) - if disp: - return disp(unary, unary.modifier, **kw) - else: - return self._generate_generic_unary_modifier( - unary, OPERATORS[unary.modifier], **kw) - else: - raise exc.CompileError( - "Unary expression has no operator or modifier") - - def visit_istrue_unary_operator(self, element, operator, **kw): - if self.dialect.supports_native_boolean: - return self.process(element.element, **kw) - else: - return "%s = 1" % self.process(element.element, **kw) - - def visit_isfalse_unary_operator(self, element, operator, **kw): - if self.dialect.supports_native_boolean: - return "NOT %s" % self.process(element.element, **kw) - else: - return "%s = 0" % self.process(element.element, **kw) - - def visit_notmatch_op_binary(self, binary, operator, **kw): - return "NOT %s" % self.visit_binary( - binary, override_operator=operators.match_op) - - def visit_binary(self, binary, override_operator=None, **kw): - # don't allow "? = ?" to render - if self.ansi_bind_rules and \ - isinstance(binary.left, elements.BindParameter) and \ - isinstance(binary.right, elements.BindParameter): - kw['literal_binds'] = True - - operator_ = override_operator or binary.operator - disp = getattr(self, "visit_%s_binary" % operator_.__name__, None) - if disp: - return disp(binary, operator_, **kw) - else: - try: - opstring = OPERATORS[operator_] - except KeyError: - raise exc.UnsupportedCompilationError(self, operator_) - else: - return self._generate_generic_binary(binary, opstring, **kw) - - def visit_custom_op_binary(self, element, operator, **kw): - return self._generate_generic_binary( - element, " " + operator.opstring + " ", **kw) - - def visit_custom_op_unary_operator(self, element, operator, **kw): - return self._generate_generic_unary_operator( - element, operator.opstring + " ", **kw) - - def visit_custom_op_unary_modifier(self, element, operator, **kw): - return self._generate_generic_unary_modifier( - element, " " + operator.opstring, **kw) - - def _generate_generic_binary(self, binary, opstring, **kw): - return binary.left._compiler_dispatch(self, **kw) + \ - opstring + \ - binary.right._compiler_dispatch(self, **kw) - - def _generate_generic_unary_operator(self, unary, opstring, **kw): - return opstring + unary.element._compiler_dispatch(self, **kw) - - def _generate_generic_unary_modifier(self, unary, opstring, **kw): - return unary.element._compiler_dispatch(self, **kw) + opstring - - @util.memoized_property - def _like_percent_literal(self): - return elements.literal_column("'%'", type_=sqltypes.STRINGTYPE) - - def visit_contains_op_binary(self, binary, operator, **kw): - binary = binary._clone() - percent = self._like_percent_literal - binary.right = percent.__add__(binary.right).__add__(percent) - return self.visit_like_op_binary(binary, operator, **kw) - - def visit_notcontains_op_binary(self, binary, operator, **kw): - binary = binary._clone() - percent = self._like_percent_literal - binary.right = percent.__add__(binary.right).__add__(percent) - return self.visit_notlike_op_binary(binary, operator, **kw) - - def visit_startswith_op_binary(self, binary, operator, **kw): - binary = binary._clone() - percent = self._like_percent_literal - binary.right = percent.__radd__( - binary.right - ) - return self.visit_like_op_binary(binary, operator, **kw) - - def visit_notstartswith_op_binary(self, binary, operator, **kw): - binary = binary._clone() - percent = self._like_percent_literal - binary.right = percent.__radd__( - binary.right - ) - return self.visit_notlike_op_binary(binary, operator, **kw) - - def visit_endswith_op_binary(self, binary, operator, **kw): - binary = binary._clone() - percent = self._like_percent_literal - binary.right = percent.__add__(binary.right) - return self.visit_like_op_binary(binary, operator, **kw) - - def visit_notendswith_op_binary(self, binary, operator, **kw): - binary = binary._clone() - percent = self._like_percent_literal - binary.right = percent.__add__(binary.right) - return self.visit_notlike_op_binary(binary, operator, **kw) - - def visit_like_op_binary(self, binary, operator, **kw): - escape = binary.modifiers.get("escape", None) - - # TODO: use ternary here, not "and"/ "or" - return '%s LIKE %s' % ( - binary.left._compiler_dispatch(self, **kw), - binary.right._compiler_dispatch(self, **kw)) \ - + ( - ' ESCAPE ' + - self.render_literal_value(escape, sqltypes.STRINGTYPE) - if escape else '' - ) - - def visit_notlike_op_binary(self, binary, operator, **kw): - escape = binary.modifiers.get("escape", None) - return '%s NOT LIKE %s' % ( - binary.left._compiler_dispatch(self, **kw), - binary.right._compiler_dispatch(self, **kw)) \ - + ( - ' ESCAPE ' + - self.render_literal_value(escape, sqltypes.STRINGTYPE) - if escape else '' - ) - - def visit_ilike_op_binary(self, binary, operator, **kw): - escape = binary.modifiers.get("escape", None) - return 'lower(%s) LIKE lower(%s)' % ( - binary.left._compiler_dispatch(self, **kw), - binary.right._compiler_dispatch(self, **kw)) \ - + ( - ' ESCAPE ' + - self.render_literal_value(escape, sqltypes.STRINGTYPE) - if escape else '' - ) - - def visit_notilike_op_binary(self, binary, operator, **kw): - escape = binary.modifiers.get("escape", None) - return 'lower(%s) NOT LIKE lower(%s)' % ( - binary.left._compiler_dispatch(self, **kw), - binary.right._compiler_dispatch(self, **kw)) \ - + ( - ' ESCAPE ' + - self.render_literal_value(escape, sqltypes.STRINGTYPE) - if escape else '' - ) - - def visit_between_op_binary(self, binary, operator, **kw): - symmetric = binary.modifiers.get("symmetric", False) - return self._generate_generic_binary( - binary, " BETWEEN SYMMETRIC " - if symmetric else " BETWEEN ", **kw) - - def visit_notbetween_op_binary(self, binary, operator, **kw): - symmetric = binary.modifiers.get("symmetric", False) - return self._generate_generic_binary( - binary, " NOT BETWEEN SYMMETRIC " - if symmetric else " NOT BETWEEN ", **kw) - - def visit_bindparam(self, bindparam, within_columns_clause=False, - literal_binds=False, - skip_bind_expression=False, - **kwargs): - if not skip_bind_expression and bindparam.type._has_bind_expression: - bind_expression = bindparam.type.bind_expression(bindparam) - return self.process(bind_expression, - skip_bind_expression=True) - - if literal_binds or \ - (within_columns_clause and - self.ansi_bind_rules): - if bindparam.value is None and bindparam.callable is None: - raise exc.CompileError("Bind parameter '%s' without a " - "renderable value not allowed here." - % bindparam.key) - return self.render_literal_bindparam( - bindparam, within_columns_clause=True, **kwargs) - - name = self._truncate_bindparam(bindparam) - - if name in self.binds: - existing = self.binds[name] - if existing is not bindparam: - if (existing.unique or bindparam.unique) and \ - not existing.proxy_set.intersection( - bindparam.proxy_set): - raise exc.CompileError( - "Bind parameter '%s' conflicts with " - "unique bind parameter of the same name" % - bindparam.key - ) - elif existing._is_crud or bindparam._is_crud: - raise exc.CompileError( - "bindparam() name '%s' is reserved " - "for automatic usage in the VALUES or SET " - "clause of this " - "insert/update statement. Please use a " - "name other than column name when using bindparam() " - "with insert() or update() (for example, 'b_%s')." % - (bindparam.key, bindparam.key) - ) - - self.binds[bindparam.key] = self.binds[name] = bindparam - - return self.bindparam_string(name, **kwargs) - - def render_literal_bindparam(self, bindparam, **kw): - value = bindparam.effective_value - return self.render_literal_value(value, bindparam.type) - - def render_literal_value(self, value, type_): - """Render the value of a bind parameter as a quoted literal. - - This is used for statement sections that do not accept bind parameters - on the target driver/database. - - This should be implemented by subclasses using the quoting services - of the DBAPI. - - """ - - processor = type_._cached_literal_processor(self.dialect) - if processor: - return processor(value) - else: - raise NotImplementedError( - "Don't know how to literal-quote value %r" % value) - - def _truncate_bindparam(self, bindparam): - if bindparam in self.bind_names: - return self.bind_names[bindparam] - - bind_name = bindparam.key - if isinstance(bind_name, elements._truncated_label): - bind_name = self._truncated_identifier("bindparam", bind_name) - - # add to bind_names for translation - self.bind_names[bindparam] = bind_name - - return bind_name - - def _truncated_identifier(self, ident_class, name): - if (ident_class, name) in self.truncated_names: - return self.truncated_names[(ident_class, name)] - - anonname = name.apply_map(self.anon_map) - - if len(anonname) > self.label_length - 6: - counter = self.truncated_names.get(ident_class, 1) - truncname = anonname[0:max(self.label_length - 6, 0)] + \ - "_" + hex(counter)[2:] - self.truncated_names[ident_class] = counter + 1 - else: - truncname = anonname - self.truncated_names[(ident_class, name)] = truncname - return truncname - - def _anonymize(self, name): - return name % self.anon_map - - def _process_anon(self, key): - (ident, derived) = key.split(' ', 1) - anonymous_counter = self.anon_map.get(derived, 1) - self.anon_map[derived] = anonymous_counter + 1 - return derived + "_" + str(anonymous_counter) - - def bindparam_string(self, name, positional_names=None, **kw): - if self.positional: - if positional_names is not None: - positional_names.append(name) - else: - self.positiontup.append(name) - return self.bindtemplate % {'name': name} - - def visit_cte(self, cte, asfrom=False, ashint=False, - fromhints=None, - **kwargs): - self._init_cte_state() - - if isinstance(cte.name, elements._truncated_label): - cte_name = self._truncated_identifier("alias", cte.name) - else: - cte_name = cte.name - - if cte_name in self.ctes_by_name: - existing_cte = self.ctes_by_name[cte_name] - # we've generated a same-named CTE that we are enclosed in, - # or this is the same CTE. just return the name. - if cte in existing_cte._restates or cte is existing_cte: - return self.preparer.format_alias(cte, cte_name) - elif existing_cte in cte._restates: - # we've generated a same-named CTE that is - # enclosed in us - we take precedence, so - # discard the text for the "inner". - del self.ctes[existing_cte] - else: - raise exc.CompileError( - "Multiple, unrelated CTEs found with " - "the same name: %r" % - cte_name) - - self.ctes_by_name[cte_name] = cte - - if cte._cte_alias is not None: - orig_cte = cte._cte_alias - if orig_cte not in self.ctes: - self.visit_cte(orig_cte, **kwargs) - cte_alias_name = cte._cte_alias.name - if isinstance(cte_alias_name, elements._truncated_label): - cte_alias_name = self._truncated_identifier( - "alias", cte_alias_name) - else: - orig_cte = cte - cte_alias_name = None - if not cte_alias_name and cte not in self.ctes: - if cte.recursive: - self.ctes_recursive = True - text = self.preparer.format_alias(cte, cte_name) - if cte.recursive: - if isinstance(cte.original, selectable.Select): - col_source = cte.original - elif isinstance(cte.original, selectable.CompoundSelect): - col_source = cte.original.selects[0] - else: - assert False - recur_cols = [c for c in - util.unique_list(col_source.inner_columns) - if c is not None] - - text += "(%s)" % (", ".join( - self.preparer.format_column(ident) - for ident in recur_cols)) - - if self.positional: - kwargs['positional_names'] = self.cte_positional[cte] = [] - - text += " AS \n" + \ - cte.original._compiler_dispatch( - self, asfrom=True, **kwargs - ) - - if cte._suffixes: - text += " " + self._generate_prefixes( - cte, cte._suffixes, **kwargs) - - self.ctes[cte] = text - - if asfrom: - if cte_alias_name: - text = self.preparer.format_alias(cte, cte_alias_name) - text += self.get_render_as_alias_suffix(cte_name) - else: - return self.preparer.format_alias(cte, cte_name) - return text - - def visit_alias(self, alias, asfrom=False, ashint=False, - iscrud=False, - fromhints=None, **kwargs): - if asfrom or ashint: - if isinstance(alias.name, elements._truncated_label): - alias_name = self._truncated_identifier("alias", alias.name) - else: - alias_name = alias.name - - if ashint: - return self.preparer.format_alias(alias, alias_name) - elif asfrom: - ret = alias.original._compiler_dispatch(self, - asfrom=True, **kwargs) + \ - self.get_render_as_alias_suffix( - self.preparer.format_alias(alias, alias_name)) - - if fromhints and alias in fromhints: - ret = self.format_from_hint_text(ret, alias, - fromhints[alias], iscrud) - - return ret - else: - return alias.original._compiler_dispatch(self, **kwargs) - - def get_render_as_alias_suffix(self, alias_name_text): - return " AS " + alias_name_text - - def _add_to_result_map(self, keyname, name, objects, type_): - self._result_columns.append((keyname, name, objects, type_)) - - def _label_select_column(self, select, column, - populate_result_map, - asfrom, column_clause_args, - name=None, - within_columns_clause=True): - """produce labeled columns present in a select().""" - - if column.type._has_column_expression and \ - populate_result_map: - col_expr = column.type.column_expression(column) - add_to_result_map = lambda keyname, name, objects, type_: \ - self._add_to_result_map( - keyname, name, - objects + (column,), type_) - else: - col_expr = column - if populate_result_map: - add_to_result_map = self._add_to_result_map - else: - add_to_result_map = None - - if not within_columns_clause: - result_expr = col_expr - elif isinstance(column, elements.Label): - if col_expr is not column: - result_expr = _CompileLabel( - col_expr, - column.name, - alt_names=(column.element,) - ) - else: - result_expr = col_expr - - elif select is not None and name: - result_expr = _CompileLabel( - col_expr, - name, - alt_names=(column._key_label,) - ) - - elif \ - asfrom and \ - isinstance(column, elements.ColumnClause) and \ - not column.is_literal and \ - column.table is not None and \ - not isinstance(column.table, selectable.Select): - result_expr = _CompileLabel(col_expr, - elements._as_truncated(column.name), - alt_names=(column.key,)) - elif ( - not isinstance(column, elements.TextClause) and - ( - not isinstance(column, elements.UnaryExpression) or - column.wraps_column_expression - ) and - ( - not hasattr(column, 'name') or - isinstance(column, functions.Function) - ) - ): - result_expr = _CompileLabel(col_expr, column.anon_label) - elif col_expr is not column: - # TODO: are we sure "column" has a .name and .key here ? - # assert isinstance(column, elements.ColumnClause) - result_expr = _CompileLabel(col_expr, - elements._as_truncated(column.name), - alt_names=(column.key,)) - else: - result_expr = col_expr - - column_clause_args.update( - within_columns_clause=within_columns_clause, - add_to_result_map=add_to_result_map - ) - return result_expr._compiler_dispatch( - self, - **column_clause_args - ) - - def format_from_hint_text(self, sqltext, table, hint, iscrud): - hinttext = self.get_from_hint_text(table, hint) - if hinttext: - sqltext += " " + hinttext - return sqltext - - def get_select_hint_text(self, byfroms): - return None - - def get_from_hint_text(self, table, text): - return None - - def get_crud_hint_text(self, table, text): - return None - - def get_statement_hint_text(self, hint_texts): - return " ".join(hint_texts) - - def _transform_select_for_nested_joins(self, select): - """Rewrite any "a JOIN (b JOIN c)" expression as - "a JOIN (select * from b JOIN c) AS anon", to support - databases that can't parse a parenthesized join correctly - (i.e. sqlite the main one). - - """ - cloned = {} - column_translate = [{}] - - def visit(element, **kw): - if element in column_translate[-1]: - return column_translate[-1][element] - - elif element in cloned: - return cloned[element] - - newelem = cloned[element] = element._clone() - - if newelem.is_selectable and newelem._is_join and \ - isinstance(newelem.right, selectable.FromGrouping): - - newelem._reset_exported() - newelem.left = visit(newelem.left, **kw) - - right = visit(newelem.right, **kw) - - selectable_ = selectable.Select( - [right.element], - use_labels=True).alias() - - for c in selectable_.c: - c._key_label = c.key - c._label = c.name - - translate_dict = dict( - zip(newelem.right.element.c, selectable_.c) - ) - - # translating from both the old and the new - # because different select() structures will lead us - # to traverse differently - translate_dict[right.element.left] = selectable_ - translate_dict[right.element.right] = selectable_ - translate_dict[newelem.right.element.left] = selectable_ - translate_dict[newelem.right.element.right] = selectable_ - - # propagate translations that we've gained - # from nested visit(newelem.right) outwards - # to the enclosing select here. this happens - # only when we have more than one level of right - # join nesting, i.e. "a JOIN (b JOIN (c JOIN d))" - for k, v in list(column_translate[-1].items()): - if v in translate_dict: - # remarkably, no current ORM tests (May 2013) - # hit this condition, only test_join_rewriting - # does. - column_translate[-1][k] = translate_dict[v] - - column_translate[-1].update(translate_dict) - - newelem.right = selectable_ - - newelem.onclause = visit(newelem.onclause, **kw) - - elif newelem._is_from_container: - # if we hit an Alias, CompoundSelect or ScalarSelect, put a - # marker in the stack. - kw['transform_clue'] = 'select_container' - newelem._copy_internals(clone=visit, **kw) - elif newelem.is_selectable and newelem._is_select: - barrier_select = kw.get('transform_clue', None) == \ - 'select_container' - # if we're still descended from an - # Alias/CompoundSelect/ScalarSelect, we're - # in a FROM clause, so start with a new translate collection - if barrier_select: - column_translate.append({}) - kw['transform_clue'] = 'inside_select' - newelem._copy_internals(clone=visit, **kw) - if barrier_select: - del column_translate[-1] - else: - newelem._copy_internals(clone=visit, **kw) - - return newelem - - return visit(select) - - def _transform_result_map_for_nested_joins( - self, select, transformed_select): - inner_col = dict((c._key_label, c) for - c in transformed_select.inner_columns) - - d = dict( - (inner_col[c._key_label], c) - for c in select.inner_columns - ) - - self._result_columns = [ - (key, name, tuple([d.get(col, col) for col in objs]), typ) - for key, name, objs, typ in self._result_columns - ] - - _default_stack_entry = util.immutabledict([ - ('correlate_froms', frozenset()), - ('asfrom_froms', frozenset()) - ]) - - def _display_froms_for_select(self, select, asfrom): - # utility method to help external dialects - # get the correct from list for a select. - # specifically the oracle dialect needs this feature - # right now. - toplevel = not self.stack - entry = self._default_stack_entry if toplevel else self.stack[-1] - - correlate_froms = entry['correlate_froms'] - asfrom_froms = entry['asfrom_froms'] - - if asfrom: - froms = select._get_display_froms( - explicit_correlate_froms=correlate_froms.difference( - asfrom_froms), - implicit_correlate_froms=()) - else: - froms = select._get_display_froms( - explicit_correlate_froms=correlate_froms, - implicit_correlate_froms=asfrom_froms) - return froms - - def visit_select(self, select, asfrom=False, parens=True, - fromhints=None, - compound_index=0, - nested_join_translation=False, - select_wraps_for=None, - **kwargs): - - needs_nested_translation = \ - select.use_labels and \ - not nested_join_translation and \ - not self.stack and \ - not self.dialect.supports_right_nested_joins - - if needs_nested_translation: - transformed_select = self._transform_select_for_nested_joins( - select) - text = self.visit_select( - transformed_select, asfrom=asfrom, parens=parens, - fromhints=fromhints, - compound_index=compound_index, - nested_join_translation=True, **kwargs - ) - - toplevel = not self.stack - entry = self._default_stack_entry if toplevel else self.stack[-1] - - populate_result_map = toplevel or \ - ( - compound_index == 0 and entry.get( - 'need_result_map_for_compound', False) - ) or entry.get('need_result_map_for_nested', False) - - # this was first proposed as part of #3372; however, it is not - # reached in current tests and could possibly be an assertion - # instead. - if not populate_result_map and 'add_to_result_map' in kwargs: - del kwargs['add_to_result_map'] - - if needs_nested_translation: - if populate_result_map: - self._transform_result_map_for_nested_joins( - select, transformed_select) - return text - - froms = self._setup_select_stack(select, entry, asfrom) - - column_clause_args = kwargs.copy() - column_clause_args.update({ - 'within_label_clause': False, - 'within_columns_clause': False - }) - - text = "SELECT " # we're off to a good start ! - - if select._hints: - hint_text, byfrom = self._setup_select_hints(select) - if hint_text: - text += hint_text + " " - else: - byfrom = None - - if select._prefixes: - text += self._generate_prefixes( - select, select._prefixes, **kwargs) - - text += self.get_select_precolumns(select, **kwargs) - - # the actual list of columns to print in the SELECT column list. - inner_columns = [ - c for c in [ - self._label_select_column( - select, - column, - populate_result_map, asfrom, - column_clause_args, - name=name) - for name, column in select._columns_plus_names - ] - if c is not None - ] - - if populate_result_map and select_wraps_for is not None: - # if this select is a compiler-generated wrapper, - # rewrite the targeted columns in the result map - wrapped_inner_columns = set(select_wraps_for.inner_columns) - translate = dict( - (outer, inner.pop()) for outer, inner in [ - ( - outer, - outer.proxy_set.intersection(wrapped_inner_columns)) - for outer in select.inner_columns - ] if inner - ) - self._result_columns = [ - (key, name, tuple(translate.get(o, o) for o in obj), type_) - for key, name, obj, type_ in self._result_columns - ] - - text = self._compose_select_body( - text, select, inner_columns, froms, byfrom, kwargs) - - if select._statement_hints: - per_dialect = [ - ht for (dialect_name, ht) - in select._statement_hints - if dialect_name in ('*', self.dialect.name) - ] - if per_dialect: - text += " " + self.get_statement_hint_text(per_dialect) - - if self.ctes and self._is_toplevel_select(select): - text = self._render_cte_clause() + text - - if select._suffixes: - text += " " + self._generate_prefixes( - select, select._suffixes, **kwargs) - - self.stack.pop(-1) - - if asfrom and parens: - return "(" + text + ")" - else: - return text - - def _is_toplevel_select(self, select): - """Return True if the stack is placed at the given select, and - is also the outermost SELECT, meaning there is either no stack - before this one, or the enclosing stack is a topmost INSERT. - - """ - return ( - self.stack[-1]['selectable'] is select and - ( - len(self.stack) == 1 or self.isinsert and len(self.stack) == 2 - and self.statement is self.stack[0]['selectable'] - ) - ) - - def _setup_select_hints(self, select): - byfrom = dict([ - (from_, hinttext % { - 'name': from_._compiler_dispatch( - self, ashint=True) - }) - for (from_, dialect), hinttext in - select._hints.items() - if dialect in ('*', self.dialect.name) - ]) - hint_text = self.get_select_hint_text(byfrom) - return hint_text, byfrom - - def _setup_select_stack(self, select, entry, asfrom): - correlate_froms = entry['correlate_froms'] - asfrom_froms = entry['asfrom_froms'] - - if asfrom: - froms = select._get_display_froms( - explicit_correlate_froms=correlate_froms.difference( - asfrom_froms), - implicit_correlate_froms=()) - else: - froms = select._get_display_froms( - explicit_correlate_froms=correlate_froms, - implicit_correlate_froms=asfrom_froms) - - new_correlate_froms = set(selectable._from_objects(*froms)) - all_correlate_froms = new_correlate_froms.union(correlate_froms) - - new_entry = { - 'asfrom_froms': new_correlate_froms, - 'correlate_froms': all_correlate_froms, - 'selectable': select, - } - self.stack.append(new_entry) - return froms - - def _compose_select_body( - self, text, select, inner_columns, froms, byfrom, kwargs): - text += ', '.join(inner_columns) - - if froms: - text += " \nFROM " - - if select._hints: - text += ', '.join( - [f._compiler_dispatch(self, asfrom=True, - fromhints=byfrom, **kwargs) - for f in froms]) - else: - text += ', '.join( - [f._compiler_dispatch(self, asfrom=True, **kwargs) - for f in froms]) - else: - text += self.default_from() - - if select._whereclause is not None: - t = select._whereclause._compiler_dispatch(self, **kwargs) - if t: - text += " \nWHERE " + t - - if select._group_by_clause.clauses: - group_by = select._group_by_clause._compiler_dispatch( - self, **kwargs) - if group_by: - text += " GROUP BY " + group_by - - if select._having is not None: - t = select._having._compiler_dispatch(self, **kwargs) - if t: - text += " \nHAVING " + t - - if select._order_by_clause.clauses: - text += self.order_by_clause(select, **kwargs) - - if (select._limit_clause is not None or - select._offset_clause is not None): - text += self.limit_clause(select, **kwargs) - - if select._for_update_arg is not None: - text += self.for_update_clause(select, **kwargs) - - return text - - def _generate_prefixes(self, stmt, prefixes, **kw): - clause = " ".join( - prefix._compiler_dispatch(self, **kw) - for prefix, dialect_name in prefixes - if dialect_name is None or - dialect_name == self.dialect.name - ) - if clause: - clause += " " - return clause - - def _render_cte_clause(self): - if self.positional: - self.positiontup = sum([ - self.cte_positional[cte] - for cte in self.ctes], []) + \ - self.positiontup - cte_text = self.get_cte_preamble(self.ctes_recursive) + " " - cte_text += ", \n".join( - [txt for txt in self.ctes.values()] - ) - cte_text += "\n " - return cte_text - - def get_cte_preamble(self, recursive): - if recursive: - return "WITH RECURSIVE" - else: - return "WITH" - - def get_select_precolumns(self, select, **kw): - """Called when building a ``SELECT`` statement, position is just - before column list. - - """ - return select._distinct and "DISTINCT " or "" - - def order_by_clause(self, select, **kw): - order_by = select._order_by_clause._compiler_dispatch(self, **kw) - if order_by: - return " ORDER BY " + order_by - else: - return "" - - def for_update_clause(self, select, **kw): - return " FOR UPDATE" - - def returning_clause(self, stmt, returning_cols): - raise exc.CompileError( - "RETURNING is not supported by this " - "dialect's statement compiler.") - - def limit_clause(self, select, **kw): - text = "" - if select._limit_clause is not None: - text += "\n LIMIT " + self.process(select._limit_clause, **kw) - if select._offset_clause is not None: - if select._limit_clause is None: - text += "\n LIMIT -1" - text += " OFFSET " + self.process(select._offset_clause, **kw) - return text - - def visit_table(self, table, asfrom=False, iscrud=False, ashint=False, - fromhints=None, **kwargs): - if asfrom or ashint: - if getattr(table, "schema", None): - ret = self.preparer.quote_schema(table.schema) + \ - "." + self.preparer.quote(table.name) - else: - ret = self.preparer.quote(table.name) - if fromhints and table in fromhints: - ret = self.format_from_hint_text(ret, table, - fromhints[table], iscrud) - return ret - else: - return "" - - def visit_join(self, join, asfrom=False, **kwargs): - return ( - join.left._compiler_dispatch(self, asfrom=True, **kwargs) + - (join.isouter and " LEFT OUTER JOIN " or " JOIN ") + - join.right._compiler_dispatch(self, asfrom=True, **kwargs) + - " ON " + - join.onclause._compiler_dispatch(self, **kwargs) - ) - - def visit_insert(self, insert_stmt, **kw): - self.stack.append( - {'correlate_froms': set(), - "asfrom_froms": set(), - "selectable": insert_stmt}) - - self.isinsert = True - crud_params = crud._get_crud_params(self, insert_stmt, **kw) - - if not crud_params and \ - not self.dialect.supports_default_values and \ - not self.dialect.supports_empty_insert: - raise exc.CompileError("The '%s' dialect with current database " - "version settings does not support empty " - "inserts." % - self.dialect.name) - - if insert_stmt._has_multi_parameters: - if not self.dialect.supports_multivalues_insert: - raise exc.CompileError( - "The '%s' dialect with current database " - "version settings does not support " - "in-place multirow inserts." % - self.dialect.name) - crud_params_single = crud_params[0] - else: - crud_params_single = crud_params - - preparer = self.preparer - supports_default_values = self.dialect.supports_default_values - - text = "INSERT " - - if insert_stmt._prefixes: - text += self._generate_prefixes(insert_stmt, - insert_stmt._prefixes, **kw) - - text += "INTO " - table_text = preparer.format_table(insert_stmt.table) - - if insert_stmt._hints: - dialect_hints = dict([ - (table, hint_text) - for (table, dialect), hint_text in - insert_stmt._hints.items() - if dialect in ('*', self.dialect.name) - ]) - if insert_stmt.table in dialect_hints: - table_text = self.format_from_hint_text( - table_text, - insert_stmt.table, - dialect_hints[insert_stmt.table], - True - ) - - text += table_text - - if crud_params_single or not supports_default_values: - text += " (%s)" % ', '.join([preparer.format_column(c[0]) - for c in crud_params_single]) - - if self.returning or insert_stmt._returning: - self.returning = self.returning or insert_stmt._returning - returning_clause = self.returning_clause( - insert_stmt, self.returning) - - if self.returning_precedes_values: - text += " " + returning_clause - - if insert_stmt.select is not None: - text += " %s" % self.process(self._insert_from_select, **kw) - elif not crud_params and supports_default_values: - text += " DEFAULT VALUES" - elif insert_stmt._has_multi_parameters: - text += " VALUES %s" % ( - ", ".join( - "(%s)" % ( - ', '.join(c[1] for c in crud_param_set) - ) - for crud_param_set in crud_params - ) - ) - else: - text += " VALUES (%s)" % \ - ', '.join([c[1] for c in crud_params]) - - if self.returning and not self.returning_precedes_values: - text += " " + returning_clause - - self.stack.pop(-1) - - return text - - def update_limit_clause(self, update_stmt): - """Provide a hook for MySQL to add LIMIT to the UPDATE""" - return None - - def update_tables_clause(self, update_stmt, from_table, - extra_froms, **kw): - """Provide a hook to override the initial table clause - in an UPDATE statement. - - MySQL overrides this. - - """ - return from_table._compiler_dispatch(self, asfrom=True, - iscrud=True, **kw) - - def update_from_clause(self, update_stmt, - from_table, extra_froms, - from_hints, - **kw): - """Provide a hook to override the generation of an - UPDATE..FROM clause. - - MySQL and MSSQL override this. - - """ - return "FROM " + ', '.join( - t._compiler_dispatch(self, asfrom=True, - fromhints=from_hints, **kw) - for t in extra_froms) - - def visit_update(self, update_stmt, **kw): - self.stack.append( - {'correlate_froms': set([update_stmt.table]), - "asfrom_froms": set([update_stmt.table]), - "selectable": update_stmt}) - - self.isupdate = True - - extra_froms = update_stmt._extra_froms - - text = "UPDATE " - - if update_stmt._prefixes: - text += self._generate_prefixes(update_stmt, - update_stmt._prefixes, **kw) - - table_text = self.update_tables_clause(update_stmt, update_stmt.table, - extra_froms, **kw) - - crud_params = crud._get_crud_params(self, update_stmt, **kw) - - if update_stmt._hints: - dialect_hints = dict([ - (table, hint_text) - for (table, dialect), hint_text in - update_stmt._hints.items() - if dialect in ('*', self.dialect.name) - ]) - if update_stmt.table in dialect_hints: - table_text = self.format_from_hint_text( - table_text, - update_stmt.table, - dialect_hints[update_stmt.table], - True - ) - else: - dialect_hints = None - - text += table_text - - text += ' SET ' - include_table = extra_froms and \ - self.render_table_with_column_in_update_from - text += ', '.join( - c[0]._compiler_dispatch(self, - include_table=include_table) + - '=' + c[1] for c in crud_params - ) - - if self.returning or update_stmt._returning: - if not self.returning: - self.returning = update_stmt._returning - if self.returning_precedes_values: - text += " " + self.returning_clause( - update_stmt, self.returning) - - if extra_froms: - extra_from_text = self.update_from_clause( - update_stmt, - update_stmt.table, - extra_froms, - dialect_hints, **kw) - if extra_from_text: - text += " " + extra_from_text - - if update_stmt._whereclause is not None: - t = self.process(update_stmt._whereclause) - if t: - text += " WHERE " + t - - limit_clause = self.update_limit_clause(update_stmt) - if limit_clause: - text += " " + limit_clause - - if self.returning and not self.returning_precedes_values: - text += " " + self.returning_clause( - update_stmt, self.returning) - - self.stack.pop(-1) - - return text - - @util.memoized_property - def _key_getters_for_crud_column(self): - return crud._key_getters_for_crud_column(self) - - def visit_delete(self, delete_stmt, **kw): - self.stack.append({'correlate_froms': set([delete_stmt.table]), - "asfrom_froms": set([delete_stmt.table]), - "selectable": delete_stmt}) - self.isdelete = True - - text = "DELETE " - - if delete_stmt._prefixes: - text += self._generate_prefixes(delete_stmt, - delete_stmt._prefixes, **kw) - - text += "FROM " - table_text = delete_stmt.table._compiler_dispatch( - self, asfrom=True, iscrud=True) - - if delete_stmt._hints: - dialect_hints = dict([ - (table, hint_text) - for (table, dialect), hint_text in - delete_stmt._hints.items() - if dialect in ('*', self.dialect.name) - ]) - if delete_stmt.table in dialect_hints: - table_text = self.format_from_hint_text( - table_text, - delete_stmt.table, - dialect_hints[delete_stmt.table], - True - ) - - else: - dialect_hints = None - - text += table_text - - if delete_stmt._returning: - self.returning = delete_stmt._returning - if self.returning_precedes_values: - text += " " + self.returning_clause( - delete_stmt, delete_stmt._returning) - - if delete_stmt._whereclause is not None: - t = delete_stmt._whereclause._compiler_dispatch(self) - if t: - text += " WHERE " + t - - if self.returning and not self.returning_precedes_values: - text += " " + self.returning_clause( - delete_stmt, delete_stmt._returning) - - self.stack.pop(-1) - - return text - - def visit_savepoint(self, savepoint_stmt): - return "SAVEPOINT %s" % self.preparer.format_savepoint(savepoint_stmt) - - def visit_rollback_to_savepoint(self, savepoint_stmt): - return "ROLLBACK TO SAVEPOINT %s" % \ - self.preparer.format_savepoint(savepoint_stmt) - - def visit_release_savepoint(self, savepoint_stmt): - return "RELEASE SAVEPOINT %s" % \ - self.preparer.format_savepoint(savepoint_stmt) - - -class DDLCompiler(Compiled): - - @util.memoized_property - def sql_compiler(self): - return self.dialect.statement_compiler(self.dialect, None) - - @util.memoized_property - def type_compiler(self): - return self.dialect.type_compiler - - @property - def preparer(self): - return self.dialect.identifier_preparer - - def construct_params(self, params=None): - return None - - def visit_ddl(self, ddl, **kwargs): - # table events can substitute table and schema name - context = ddl.context - if isinstance(ddl.target, schema.Table): - context = context.copy() - - preparer = self.dialect.identifier_preparer - path = preparer.format_table_seq(ddl.target) - if len(path) == 1: - table, sch = path[0], '' - else: - table, sch = path[-1], path[0] - - context.setdefault('table', table) - context.setdefault('schema', sch) - context.setdefault('fullname', preparer.format_table(ddl.target)) - - return self.sql_compiler.post_process_text(ddl.statement % context) - - def visit_create_schema(self, create): - schema = self.preparer.format_schema(create.element) - return "CREATE SCHEMA " + schema - - def visit_drop_schema(self, drop): - schema = self.preparer.format_schema(drop.element) - text = "DROP SCHEMA " + schema - if drop.cascade: - text += " CASCADE" - return text - - def visit_create_table(self, create): - table = create.element - preparer = self.dialect.identifier_preparer - - text = "\n" + " ".join(['CREATE'] + - table._prefixes + - ['TABLE', - preparer.format_table(table), - "("]) - separator = "\n" - - # if only one primary key, specify it along with the column - first_pk = False - for create_column in create.columns: - column = create_column.element - try: - processed = self.process(create_column, - first_pk=column.primary_key - and not first_pk) - if processed is not None: - text += separator - separator = ", \n" - text += "\t" + processed - if column.primary_key: - first_pk = True - except exc.CompileError as ce: - util.raise_from_cause( - exc.CompileError( - util.u("(in table '%s', column '%s'): %s") % - (table.description, column.name, ce.args[0]) - )) - - const = self.create_table_constraints( - table, _include_foreign_key_constraints= - create.include_foreign_key_constraints) - if const: - text += ", \n\t" + const - - text += "\n)%s\n\n" % self.post_create_table(table) - return text - - def visit_create_column(self, create, first_pk=False): - column = create.element - - if column.system: - return None - - text = self.get_column_specification( - column, - first_pk=first_pk - ) - const = " ".join(self.process(constraint) - for constraint in column.constraints) - if const: - text += " " + const - - return text - - def create_table_constraints( - self, table, - _include_foreign_key_constraints=None): - - # On some DB order is significant: visit PK first, then the - # other constraints (engine.ReflectionTest.testbasic failed on FB2) - constraints = [] - if table.primary_key: - constraints.append(table.primary_key) - - all_fkcs = table.foreign_key_constraints - if _include_foreign_key_constraints is not None: - omit_fkcs = all_fkcs.difference(_include_foreign_key_constraints) - else: - omit_fkcs = set() - - constraints.extend([c for c in table._sorted_constraints - if c is not table.primary_key and - c not in omit_fkcs]) - - return ", \n\t".join( - p for p in - (self.process(constraint) - for constraint in constraints - if ( - constraint._create_rule is None or - constraint._create_rule(self)) - and ( - not self.dialect.supports_alter or - not getattr(constraint, 'use_alter', False) - )) if p is not None - ) - - def visit_drop_table(self, drop): - return "\nDROP TABLE " + self.preparer.format_table(drop.element) - - def visit_drop_view(self, drop): - return "\nDROP VIEW " + self.preparer.format_table(drop.element) - - def _verify_index_table(self, index): - if index.table is None: - raise exc.CompileError("Index '%s' is not associated " - "with any table." % index.name) - - def visit_create_index(self, create, include_schema=False, - include_table_schema=True): - index = create.element - self._verify_index_table(index) - preparer = self.preparer - text = "CREATE " - if index.unique: - text += "UNIQUE " - text += "INDEX %s ON %s (%s)" \ - % ( - self._prepared_index_name(index, - include_schema=include_schema), - preparer.format_table(index.table, - use_schema=include_table_schema), - ', '.join( - self.sql_compiler.process( - expr, include_table=False, literal_binds=True) for - expr in index.expressions) - ) - return text - - def visit_drop_index(self, drop): - index = drop.element - return "\nDROP INDEX " + self._prepared_index_name( - index, include_schema=True) - - def _prepared_index_name(self, index, include_schema=False): - if include_schema and index.table is not None and index.table.schema: - schema = index.table.schema - schema_name = self.preparer.quote_schema(schema) - else: - schema_name = None - - ident = index.name - if isinstance(ident, elements._truncated_label): - max_ = self.dialect.max_index_name_length or \ - self.dialect.max_identifier_length - if len(ident) > max_: - ident = ident[0:max_ - 8] + \ - "_" + util.md5_hex(ident)[-4:] - else: - self.dialect.validate_identifier(ident) - - index_name = self.preparer.quote(ident) - - if schema_name: - index_name = schema_name + "." + index_name - return index_name - - def visit_add_constraint(self, create): - return "ALTER TABLE %s ADD %s" % ( - self.preparer.format_table(create.element.table), - self.process(create.element) - ) - - def visit_create_sequence(self, create): - text = "CREATE SEQUENCE %s" % \ - self.preparer.format_sequence(create.element) - if create.element.increment is not None: - text += " INCREMENT BY %d" % create.element.increment - if create.element.start is not None: - text += " START WITH %d" % create.element.start - if create.element.minvalue is not None: - text += " MINVALUE %d" % create.element.minvalue - if create.element.maxvalue is not None: - text += " MAXVALUE %d" % create.element.maxvalue - if create.element.nominvalue is not None: - text += " NO MINVALUE" - if create.element.nomaxvalue is not None: - text += " NO MAXVALUE" - if create.element.cycle is not None: - text += " CYCLE" - return text - - def visit_drop_sequence(self, drop): - return "DROP SEQUENCE %s" % \ - self.preparer.format_sequence(drop.element) - - def visit_drop_constraint(self, drop): - constraint = drop.element - if constraint.name is not None: - formatted_name = self.preparer.format_constraint(constraint) - else: - formatted_name = None - - if formatted_name is None: - raise exc.CompileError( - "Can't emit DROP CONSTRAINT for constraint %r; " - "it has no name" % drop.element) - return "ALTER TABLE %s DROP CONSTRAINT %s%s" % ( - self.preparer.format_table(drop.element.table), - formatted_name, - drop.cascade and " CASCADE" or "" - ) - - def get_column_specification(self, column, **kwargs): - colspec = self.preparer.format_column(column) + " " + \ - self.dialect.type_compiler.process( - column.type, type_expression=column) - default = self.get_column_default_string(column) - if default is not None: - colspec += " DEFAULT " + default - - if not column.nullable: - colspec += " NOT NULL" - return colspec - - def post_create_table(self, table): - return '' - - def get_column_default_string(self, column): - if isinstance(column.server_default, schema.DefaultClause): - if isinstance(column.server_default.arg, util.string_types): - return "'%s'" % column.server_default.arg - else: - return self.sql_compiler.process( - column.server_default.arg, literal_binds=True) - else: - return None - - def visit_check_constraint(self, constraint): - text = "" - if constraint.name is not None: - formatted_name = self.preparer.format_constraint(constraint) - if formatted_name is not None: - text += "CONSTRAINT %s " % formatted_name - text += "CHECK (%s)" % self.sql_compiler.process(constraint.sqltext, - include_table=False, - literal_binds=True) - text += self.define_constraint_deferrability(constraint) - return text - - def visit_column_check_constraint(self, constraint): - text = "" - if constraint.name is not None: - formatted_name = self.preparer.format_constraint(constraint) - if formatted_name is not None: - text += "CONSTRAINT %s " % formatted_name - text += "CHECK (%s)" % constraint.sqltext - text += self.define_constraint_deferrability(constraint) - return text - - def visit_primary_key_constraint(self, constraint): - if len(constraint) == 0: - return '' - text = "" - if constraint.name is not None: - formatted_name = self.preparer.format_constraint(constraint) - if formatted_name is not None: - text += "CONSTRAINT %s " % formatted_name - text += "PRIMARY KEY " - text += "(%s)" % ', '.join(self.preparer.quote(c.name) - for c in constraint) - text += self.define_constraint_deferrability(constraint) - return text - - def visit_foreign_key_constraint(self, constraint): - preparer = self.dialect.identifier_preparer - text = "" - if constraint.name is not None: - formatted_name = self.preparer.format_constraint(constraint) - if formatted_name is not None: - text += "CONSTRAINT %s " % formatted_name - remote_table = list(constraint.elements)[0].column.table - text += "FOREIGN KEY(%s) REFERENCES %s (%s)" % ( - ', '.join(preparer.quote(f.parent.name) - for f in constraint.elements), - self.define_constraint_remote_table( - constraint, remote_table, preparer), - ', '.join(preparer.quote(f.column.name) - for f in constraint.elements) - ) - text += self.define_constraint_match(constraint) - text += self.define_constraint_cascades(constraint) - text += self.define_constraint_deferrability(constraint) - return text - - def define_constraint_remote_table(self, constraint, table, preparer): - """Format the remote table clause of a CREATE CONSTRAINT clause.""" - - return preparer.format_table(table) - - def visit_unique_constraint(self, constraint): - if len(constraint) == 0: - return '' - text = "" - if constraint.name is not None: - formatted_name = self.preparer.format_constraint(constraint) - text += "CONSTRAINT %s " % formatted_name - text += "UNIQUE (%s)" % ( - ', '.join(self.preparer.quote(c.name) - for c in constraint)) - text += self.define_constraint_deferrability(constraint) - return text - - def define_constraint_cascades(self, constraint): - text = "" - if constraint.ondelete is not None: - text += " ON DELETE %s" % constraint.ondelete - if constraint.onupdate is not None: - text += " ON UPDATE %s" % constraint.onupdate - return text - - def define_constraint_deferrability(self, constraint): - text = "" - if constraint.deferrable is not None: - if constraint.deferrable: - text += " DEFERRABLE" - else: - text += " NOT DEFERRABLE" - if constraint.initially is not None: - text += " INITIALLY %s" % constraint.initially - return text - - def define_constraint_match(self, constraint): - text = "" - if constraint.match is not None: - text += " MATCH %s" % constraint.match - return text - - -class GenericTypeCompiler(TypeCompiler): - - def visit_FLOAT(self, type_, **kw): - return "FLOAT" - - def visit_REAL(self, type_, **kw): - return "REAL" - - def visit_NUMERIC(self, type_, **kw): - if type_.precision is None: - return "NUMERIC" - elif type_.scale is None: - return "NUMERIC(%(precision)s)" % \ - {'precision': type_.precision} - else: - return "NUMERIC(%(precision)s, %(scale)s)" % \ - {'precision': type_.precision, - 'scale': type_.scale} - - def visit_DECIMAL(self, type_, **kw): - if type_.precision is None: - return "DECIMAL" - elif type_.scale is None: - return "DECIMAL(%(precision)s)" % \ - {'precision': type_.precision} - else: - return "DECIMAL(%(precision)s, %(scale)s)" % \ - {'precision': type_.precision, - 'scale': type_.scale} - - def visit_INTEGER(self, type_, **kw): - return "INTEGER" - - def visit_SMALLINT(self, type_, **kw): - return "SMALLINT" - - def visit_BIGINT(self, type_, **kw): - return "BIGINT" - - def visit_TIMESTAMP(self, type_, **kw): - return 'TIMESTAMP' - - def visit_DATETIME(self, type_, **kw): - return "DATETIME" - - def visit_DATE(self, type_, **kw): - return "DATE" - - def visit_TIME(self, type_, **kw): - return "TIME" - - def visit_CLOB(self, type_, **kw): - return "CLOB" - - def visit_NCLOB(self, type_, **kw): - return "NCLOB" - - def _render_string_type(self, type_, name): - - text = name - if type_.length: - text += "(%d)" % type_.length - if type_.collation: - text += ' COLLATE "%s"' % type_.collation - return text - - def visit_CHAR(self, type_, **kw): - return self._render_string_type(type_, "CHAR") - - def visit_NCHAR(self, type_, **kw): - return self._render_string_type(type_, "NCHAR") - - def visit_VARCHAR(self, type_, **kw): - return self._render_string_type(type_, "VARCHAR") - - def visit_NVARCHAR(self, type_, **kw): - return self._render_string_type(type_, "NVARCHAR") - - def visit_TEXT(self, type_, **kw): - return self._render_string_type(type_, "TEXT") - - def visit_BLOB(self, type_, **kw): - return "BLOB" - - def visit_BINARY(self, type_, **kw): - return "BINARY" + (type_.length and "(%d)" % type_.length or "") - - def visit_VARBINARY(self, type_, **kw): - return "VARBINARY" + (type_.length and "(%d)" % type_.length or "") - - def visit_BOOLEAN(self, type_, **kw): - return "BOOLEAN" - - def visit_large_binary(self, type_, **kw): - return self.visit_BLOB(type_, **kw) - - def visit_boolean(self, type_, **kw): - return self.visit_BOOLEAN(type_, **kw) - - def visit_time(self, type_, **kw): - return self.visit_TIME(type_, **kw) - - def visit_datetime(self, type_, **kw): - return self.visit_DATETIME(type_, **kw) - - def visit_date(self, type_, **kw): - return self.visit_DATE(type_, **kw) - - def visit_big_integer(self, type_, **kw): - return self.visit_BIGINT(type_, **kw) - - def visit_small_integer(self, type_, **kw): - return self.visit_SMALLINT(type_, **kw) - - def visit_integer(self, type_, **kw): - return self.visit_INTEGER(type_, **kw) - - def visit_real(self, type_, **kw): - return self.visit_REAL(type_, **kw) - - def visit_float(self, type_, **kw): - return self.visit_FLOAT(type_, **kw) - - def visit_numeric(self, type_, **kw): - return self.visit_NUMERIC(type_, **kw) - - def visit_string(self, type_, **kw): - return self.visit_VARCHAR(type_, **kw) - - def visit_unicode(self, type_, **kw): - return self.visit_VARCHAR(type_, **kw) - - def visit_text(self, type_, **kw): - return self.visit_TEXT(type_, **kw) - - def visit_unicode_text(self, type_, **kw): - return self.visit_TEXT(type_, **kw) - - def visit_enum(self, type_, **kw): - return self.visit_VARCHAR(type_, **kw) - - def visit_null(self, type_, **kw): - raise exc.CompileError("Can't generate DDL for %r; " - "did you forget to specify a " - "type on this Column?" % type_) - - def visit_type_decorator(self, type_, **kw): - return self.process(type_.type_engine(self.dialect), **kw) - - def visit_user_defined(self, type_, **kw): - return type_.get_col_spec(**kw) - - -class IdentifierPreparer(object): - - """Handle quoting and case-folding of identifiers based on options.""" - - reserved_words = RESERVED_WORDS - - legal_characters = LEGAL_CHARACTERS - - illegal_initial_characters = ILLEGAL_INITIAL_CHARACTERS - - def __init__(self, dialect, initial_quote='"', - final_quote=None, escape_quote='"', omit_schema=False): - """Construct a new ``IdentifierPreparer`` object. - - initial_quote - Character that begins a delimited identifier. - - final_quote - Character that ends a delimited identifier. Defaults to - `initial_quote`. - - omit_schema - Prevent prepending schema name. Useful for databases that do - not support schemae. - """ - - self.dialect = dialect - self.initial_quote = initial_quote - self.final_quote = final_quote or self.initial_quote - self.escape_quote = escape_quote - self.escape_to_quote = self.escape_quote * 2 - self.omit_schema = omit_schema - self._strings = {} - - def _escape_identifier(self, value): - """Escape an identifier. - - Subclasses should override this to provide database-dependent - escaping behavior. - """ - - return value.replace(self.escape_quote, self.escape_to_quote) - - def _unescape_identifier(self, value): - """Canonicalize an escaped identifier. - - Subclasses should override this to provide database-dependent - unescaping behavior that reverses _escape_identifier. - """ - - return value.replace(self.escape_to_quote, self.escape_quote) - - def quote_identifier(self, value): - """Quote an identifier. - - Subclasses should override this to provide database-dependent - quoting behavior. - """ - - return self.initial_quote + \ - self._escape_identifier(value) + \ - self.final_quote - - def _requires_quotes(self, value): - """Return True if the given identifier requires quoting.""" - lc_value = value.lower() - return (lc_value in self.reserved_words - or value[0] in self.illegal_initial_characters - or not self.legal_characters.match(util.text_type(value)) - or (lc_value != value)) - - def quote_schema(self, schema, force=None): - """Conditionally quote a schema. - - Subclasses can override this to provide database-dependent - quoting behavior for schema names. - - the 'force' flag should be considered deprecated. - - """ - return self.quote(schema, force) - - def quote(self, ident, force=None): - """Conditionally quote an identifier. - - the 'force' flag should be considered deprecated. - """ - - force = getattr(ident, "quote", None) - - if force is None: - if ident in self._strings: - return self._strings[ident] - else: - if self._requires_quotes(ident): - self._strings[ident] = self.quote_identifier(ident) - else: - self._strings[ident] = ident - return self._strings[ident] - elif force: - return self.quote_identifier(ident) - else: - return ident - - def format_sequence(self, sequence, use_schema=True): - name = self.quote(sequence.name) - if (not self.omit_schema and use_schema and - sequence.schema is not None): - name = self.quote_schema(sequence.schema) + "." + name - return name - - def format_label(self, label, name=None): - return self.quote(name or label.name) - - def format_alias(self, alias, name=None): - return self.quote(name or alias.name) - - def format_savepoint(self, savepoint, name=None): - return self.quote(name or savepoint.ident) - - @util.dependencies("sqlalchemy.sql.naming") - def format_constraint(self, naming, constraint): - if isinstance(constraint.name, elements._defer_name): - name = naming._constraint_name_for_table( - constraint, constraint.table) - if name: - return self.quote(name) - elif isinstance(constraint.name, elements._defer_none_name): - return None - return self.quote(constraint.name) - - def format_table(self, table, use_schema=True, name=None): - """Prepare a quoted table and schema name.""" - - if name is None: - name = table.name - result = self.quote(name) - if not self.omit_schema and use_schema \ - and getattr(table, "schema", None): - result = self.quote_schema(table.schema) + "." + result - return result - - def format_schema(self, name, quote=None): - """Prepare a quoted schema name.""" - - return self.quote(name, quote) - - def format_column(self, column, use_table=False, - name=None, table_name=None): - """Prepare a quoted column name.""" - - if name is None: - name = column.name - if not getattr(column, 'is_literal', False): - if use_table: - return self.format_table( - column.table, use_schema=False, - name=table_name) + "." + self.quote(name) - else: - return self.quote(name) - else: - # literal textual elements get stuck into ColumnClause a lot, - # which shouldn't get quoted - - if use_table: - return self.format_table( - column.table, use_schema=False, - name=table_name) + '.' + name - else: - return name - - def format_table_seq(self, table, use_schema=True): - """Format table name and schema as a tuple.""" - - # Dialects with more levels in their fully qualified references - # ('database', 'owner', etc.) could override this and return - # a longer sequence. - - if not self.omit_schema and use_schema and \ - getattr(table, 'schema', None): - return (self.quote_schema(table.schema), - self.format_table(table, use_schema=False)) - else: - return (self.format_table(table, use_schema=False), ) - - @util.memoized_property - def _r_identifiers(self): - initial, final, escaped_final = \ - [re.escape(s) for s in - (self.initial_quote, self.final_quote, - self._escape_identifier(self.final_quote))] - r = re.compile( - r'(?:' - r'(?:%(initial)s((?:%(escaped)s|[^%(final)s])+)%(final)s' - r'|([^\.]+))(?=\.|$))+' % - {'initial': initial, - 'final': final, - 'escaped': escaped_final}) - return r - - def unformat_identifiers(self, identifiers): - """Unpack 'schema.table.column'-like strings into components.""" - - r = self._r_identifiers - return [self._unescape_identifier(i) - for i in [a or b for a, b in r.findall(identifiers)]] diff --git a/python/sqlalchemy/sql/crud.py b/python/sqlalchemy/sql/crud.py deleted file mode 100644 index e6f16b69..00000000 --- a/python/sqlalchemy/sql/crud.py +++ /dev/null @@ -1,557 +0,0 @@ -# sql/crud.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Functions used by compiler.py to determine the parameters rendered -within INSERT and UPDATE statements. - -""" -from .. import util -from .. import exc -from . import elements -import operator - -REQUIRED = util.symbol('REQUIRED', """ -Placeholder for the value within a :class:`.BindParameter` -which is required to be present when the statement is passed -to :meth:`.Connection.execute`. - -This symbol is typically used when a :func:`.expression.insert` -or :func:`.expression.update` statement is compiled without parameter -values present. - -""") - - -def _get_crud_params(compiler, stmt, **kw): - """create a set of tuples representing column/string pairs for use - in an INSERT or UPDATE statement. - - Also generates the Compiled object's postfetch, prefetch, and - returning column collections, used for default handling and ultimately - populating the ResultProxy's prefetch_cols() and postfetch_cols() - collections. - - """ - - compiler.postfetch = [] - compiler.prefetch = [] - compiler.returning = [] - - # no parameters in the statement, no parameters in the - # compiled params - return binds for all columns - if compiler.column_keys is None and stmt.parameters is None: - return [ - (c, _create_bind_param( - compiler, c, None, required=True)) - for c in stmt.table.columns - ] - - if stmt._has_multi_parameters: - stmt_parameters = stmt.parameters[0] - else: - stmt_parameters = stmt.parameters - - # getters - these are normally just column.key, - # but in the case of mysql multi-table update, the rules for - # .key must conditionally take tablename into account - _column_as_key, _getattr_col_key, _col_bind_name = \ - _key_getters_for_crud_column(compiler) - - # if we have statement parameters - set defaults in the - # compiled params - if compiler.column_keys is None: - parameters = {} - else: - parameters = dict((_column_as_key(key), REQUIRED) - for key in compiler.column_keys - if not stmt_parameters or - key not in stmt_parameters) - - # create a list of column assignment clauses as tuples - values = [] - - if stmt_parameters is not None: - _get_stmt_parameters_params( - compiler, - parameters, stmt_parameters, _column_as_key, values, kw) - - check_columns = {} - - # special logic that only occurs for multi-table UPDATE - # statements - if compiler.isupdate and stmt._extra_froms and stmt_parameters: - _get_multitable_params( - compiler, stmt, stmt_parameters, check_columns, - _col_bind_name, _getattr_col_key, values, kw) - - if compiler.isinsert and stmt.select_names: - _scan_insert_from_select_cols( - compiler, stmt, parameters, - _getattr_col_key, _column_as_key, - _col_bind_name, check_columns, values, kw) - else: - _scan_cols( - compiler, stmt, parameters, - _getattr_col_key, _column_as_key, - _col_bind_name, check_columns, values, kw) - - if parameters and stmt_parameters: - check = set(parameters).intersection( - _column_as_key(k) for k in stmt.parameters - ).difference(check_columns) - if check: - raise exc.CompileError( - "Unconsumed column names: %s" % - (", ".join("%s" % c for c in check)) - ) - - if stmt._has_multi_parameters: - values = _extend_values_for_multiparams(compiler, stmt, values, kw) - - return values - - -def _create_bind_param( - compiler, col, value, process=True, - required=False, name=None): - if name is None: - name = col.key - bindparam = elements.BindParameter( - name, value, type_=col.type, required=required) - bindparam._is_crud = True - if process: - bindparam = bindparam._compiler_dispatch(compiler) - return bindparam - - -def _key_getters_for_crud_column(compiler): - if compiler.isupdate and compiler.statement._extra_froms: - # when extra tables are present, refer to the columns - # in those extra tables as table-qualified, including in - # dictionaries and when rendering bind param names. - # the "main" table of the statement remains unqualified, - # allowing the most compatibility with a non-multi-table - # statement. - _et = set(compiler.statement._extra_froms) - - def _column_as_key(key): - str_key = elements._column_as_key(key) - if hasattr(key, 'table') and key.table in _et: - return (key.table.name, str_key) - else: - return str_key - - def _getattr_col_key(col): - if col.table in _et: - return (col.table.name, col.key) - else: - return col.key - - def _col_bind_name(col): - if col.table in _et: - return "%s_%s" % (col.table.name, col.key) - else: - return col.key - - else: - _column_as_key = elements._column_as_key - _getattr_col_key = _col_bind_name = operator.attrgetter("key") - - return _column_as_key, _getattr_col_key, _col_bind_name - - -def _scan_insert_from_select_cols( - compiler, stmt, parameters, _getattr_col_key, - _column_as_key, _col_bind_name, check_columns, values, kw): - - need_pks, implicit_returning, \ - implicit_return_defaults, postfetch_lastrowid = \ - _get_returning_modifiers(compiler, stmt) - - cols = [stmt.table.c[_column_as_key(name)] - for name in stmt.select_names] - - compiler._insert_from_select = stmt.select - - add_select_cols = [] - if stmt.include_insert_from_select_defaults: - col_set = set(cols) - for col in stmt.table.columns: - if col not in col_set and col.default: - cols.append(col) - - for c in cols: - col_key = _getattr_col_key(c) - if col_key in parameters and col_key not in check_columns: - parameters.pop(col_key) - values.append((c, None)) - else: - _append_param_insert_select_hasdefault( - compiler, stmt, c, add_select_cols, kw) - - if add_select_cols: - values.extend(add_select_cols) - compiler._insert_from_select = compiler._insert_from_select._generate() - compiler._insert_from_select._raw_columns += tuple( - expr for col, expr in add_select_cols) - - -def _scan_cols( - compiler, stmt, parameters, _getattr_col_key, - _column_as_key, _col_bind_name, check_columns, values, kw): - - need_pks, implicit_returning, \ - implicit_return_defaults, postfetch_lastrowid = \ - _get_returning_modifiers(compiler, stmt) - - cols = stmt.table.columns - - for c in cols: - col_key = _getattr_col_key(c) - if col_key in parameters and col_key not in check_columns: - - _append_param_parameter( - compiler, stmt, c, col_key, parameters, _col_bind_name, - implicit_returning, implicit_return_defaults, values, kw) - - elif compiler.isinsert: - if c.primary_key and \ - need_pks and \ - ( - implicit_returning or - not postfetch_lastrowid or - c is not stmt.table._autoincrement_column - ): - - if implicit_returning: - _append_param_insert_pk_returning( - compiler, stmt, c, values, kw) - else: - _append_param_insert_pk(compiler, stmt, c, values, kw) - - elif c.default is not None: - - _append_param_insert_hasdefault( - compiler, stmt, c, implicit_return_defaults, - values, kw) - - elif c.server_default is not None: - if implicit_return_defaults and \ - c in implicit_return_defaults: - compiler.returning.append(c) - elif not c.primary_key: - compiler.postfetch.append(c) - elif implicit_return_defaults and \ - c in implicit_return_defaults: - compiler.returning.append(c) - - elif compiler.isupdate: - _append_param_update( - compiler, stmt, c, implicit_return_defaults, values, kw) - - -def _append_param_parameter( - compiler, stmt, c, col_key, parameters, _col_bind_name, - implicit_returning, implicit_return_defaults, values, kw): - value = parameters.pop(col_key) - if elements._is_literal(value): - value = _create_bind_param( - compiler, c, value, required=value is REQUIRED, - name=_col_bind_name(c) - if not stmt._has_multi_parameters - else "%s_0" % _col_bind_name(c) - ) - else: - if isinstance(value, elements.BindParameter) and \ - value.type._isnull: - value = value._clone() - value.type = c.type - - if c.primary_key and implicit_returning: - compiler.returning.append(c) - value = compiler.process(value.self_group(), **kw) - elif implicit_return_defaults and \ - c in implicit_return_defaults: - compiler.returning.append(c) - value = compiler.process(value.self_group(), **kw) - else: - compiler.postfetch.append(c) - value = compiler.process(value.self_group(), **kw) - values.append((c, value)) - - -def _append_param_insert_pk_returning(compiler, stmt, c, values, kw): - if c.default is not None: - if c.default.is_sequence: - if compiler.dialect.supports_sequences and \ - (not c.default.optional or - not compiler.dialect.sequences_optional): - proc = compiler.process(c.default, **kw) - values.append((c, proc)) - compiler.returning.append(c) - elif c.default.is_clause_element: - values.append( - (c, compiler.process( - c.default.arg.self_group(), **kw)) - ) - compiler.returning.append(c) - else: - values.append( - (c, _create_prefetch_bind_param(compiler, c)) - ) - - else: - compiler.returning.append(c) - - -def _create_prefetch_bind_param(compiler, c, process=True, name=None): - param = _create_bind_param(compiler, c, None, process=process, name=name) - compiler.prefetch.append(c) - return param - - -class _multiparam_column(elements.ColumnElement): - def __init__(self, original, index): - self.key = "%s_%d" % (original.key, index + 1) - self.original = original - self.default = original.default - self.type = original.type - - def __eq__(self, other): - return isinstance(other, _multiparam_column) and \ - other.key == self.key and \ - other.original == self.original - - -def _process_multiparam_default_bind(compiler, c, index, kw): - - if not c.default: - raise exc.CompileError( - "INSERT value for column %s is explicitly rendered as a bound" - "parameter in the VALUES clause; " - "a Python-side value or SQL expression is required" % c) - elif c.default.is_clause_element: - return compiler.process(c.default.arg.self_group(), **kw) - else: - col = _multiparam_column(c, index) - return _create_prefetch_bind_param(compiler, col) - - -def _append_param_insert_pk(compiler, stmt, c, values, kw): - if ( - (c.default is not None and - (not c.default.is_sequence or - compiler.dialect.supports_sequences)) or - c is stmt.table._autoincrement_column and - (compiler.dialect.supports_sequences or - compiler.dialect. - preexecute_autoincrement_sequences) - ): - values.append( - (c, _create_prefetch_bind_param(compiler, c)) - ) - - -def _append_param_insert_hasdefault( - compiler, stmt, c, implicit_return_defaults, values, kw): - - if c.default.is_sequence: - if compiler.dialect.supports_sequences and \ - (not c.default.optional or - not compiler.dialect.sequences_optional): - proc = compiler.process(c.default, **kw) - values.append((c, proc)) - if implicit_return_defaults and \ - c in implicit_return_defaults: - compiler.returning.append(c) - elif not c.primary_key: - compiler.postfetch.append(c) - elif c.default.is_clause_element: - proc = compiler.process(c.default.arg.self_group(), **kw) - values.append((c, proc)) - - if implicit_return_defaults and \ - c in implicit_return_defaults: - compiler.returning.append(c) - elif not c.primary_key: - # don't add primary key column to postfetch - compiler.postfetch.append(c) - else: - values.append( - (c, _create_prefetch_bind_param(compiler, c)) - ) - - -def _append_param_insert_select_hasdefault( - compiler, stmt, c, values, kw): - - if c.default.is_sequence: - if compiler.dialect.supports_sequences and \ - (not c.default.optional or - not compiler.dialect.sequences_optional): - proc = c.default - values.append((c, proc)) - elif c.default.is_clause_element: - proc = c.default.arg.self_group() - values.append((c, proc)) - else: - values.append( - (c, _create_prefetch_bind_param(compiler, c, process=False)) - ) - - -def _append_param_update( - compiler, stmt, c, implicit_return_defaults, values, kw): - - if c.onupdate is not None and not c.onupdate.is_sequence: - if c.onupdate.is_clause_element: - values.append( - (c, compiler.process( - c.onupdate.arg.self_group(), **kw)) - ) - if implicit_return_defaults and \ - c in implicit_return_defaults: - compiler.returning.append(c) - else: - compiler.postfetch.append(c) - else: - values.append( - (c, _create_prefetch_bind_param(compiler, c)) - ) - elif c.server_onupdate is not None: - if implicit_return_defaults and \ - c in implicit_return_defaults: - compiler.returning.append(c) - else: - compiler.postfetch.append(c) - elif implicit_return_defaults and \ - c in implicit_return_defaults: - compiler.returning.append(c) - - -def _get_multitable_params( - compiler, stmt, stmt_parameters, check_columns, - _col_bind_name, _getattr_col_key, values, kw): - - normalized_params = dict( - (elements._clause_element_as_expr(c), param) - for c, param in stmt_parameters.items() - ) - affected_tables = set() - for t in stmt._extra_froms: - for c in t.c: - if c in normalized_params: - affected_tables.add(t) - check_columns[_getattr_col_key(c)] = c - value = normalized_params[c] - if elements._is_literal(value): - value = _create_bind_param( - compiler, c, value, required=value is REQUIRED, - name=_col_bind_name(c)) - else: - compiler.postfetch.append(c) - value = compiler.process(value.self_group(), **kw) - values.append((c, value)) - # determine tables which are actually to be updated - process onupdate - # and server_onupdate for these - for t in affected_tables: - for c in t.c: - if c in normalized_params: - continue - elif (c.onupdate is not None and not - c.onupdate.is_sequence): - if c.onupdate.is_clause_element: - values.append( - (c, compiler.process( - c.onupdate.arg.self_group(), - **kw) - ) - ) - compiler.postfetch.append(c) - else: - values.append( - (c, _create_prefetch_bind_param( - compiler, c, name=_col_bind_name(c))) - ) - elif c.server_onupdate is not None: - compiler.postfetch.append(c) - - -def _extend_values_for_multiparams(compiler, stmt, values, kw): - values_0 = values - values = [values] - - values.extend( - [ - ( - c, - (_create_bind_param( - compiler, c, row[c.key], - name="%s_%d" % (c.key, i + 1) - ) if elements._is_literal(row[c.key]) - else compiler.process( - row[c.key].self_group(), **kw)) - if c.key in row else - _process_multiparam_default_bind(compiler, c, i, kw) - ) - for (c, param) in values_0 - ] - for i, row in enumerate(stmt.parameters[1:]) - ) - return values - - -def _get_stmt_parameters_params( - compiler, parameters, stmt_parameters, _column_as_key, values, kw): - for k, v in stmt_parameters.items(): - colkey = _column_as_key(k) - if colkey is not None: - parameters.setdefault(colkey, v) - else: - # a non-Column expression on the left side; - # add it to values() in an "as-is" state, - # coercing right side to bound param - if elements._is_literal(v): - v = compiler.process( - elements.BindParameter(None, v, type_=k.type), - **kw) - else: - v = compiler.process(v.self_group(), **kw) - - values.append((k, v)) - - -def _get_returning_modifiers(compiler, stmt): - need_pks = compiler.isinsert and \ - not compiler.inline and \ - not stmt._returning and \ - not stmt._has_multi_parameters - - implicit_returning = need_pks and \ - compiler.dialect.implicit_returning and \ - stmt.table.implicit_returning - - if compiler.isinsert: - implicit_return_defaults = (implicit_returning and - stmt._return_defaults) - elif compiler.isupdate: - implicit_return_defaults = (compiler.dialect.implicit_returning and - stmt.table.implicit_returning and - stmt._return_defaults) - else: - implicit_return_defaults = False - - if implicit_return_defaults: - if stmt._return_defaults is True: - implicit_return_defaults = set(stmt.table.c) - else: - implicit_return_defaults = set(stmt._return_defaults) - - postfetch_lastrowid = need_pks and compiler.dialect.postfetch_lastrowid - - return need_pks, implicit_returning, \ - implicit_return_defaults, postfetch_lastrowid diff --git a/python/sqlalchemy/sql/ddl.py b/python/sqlalchemy/sql/ddl.py deleted file mode 100644 index 71018f13..00000000 --- a/python/sqlalchemy/sql/ddl.py +++ /dev/null @@ -1,1095 +0,0 @@ -# sql/ddl.py -# Copyright (C) 2009-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php -""" -Provides the hierarchy of DDL-defining schema items as well as routines -to invoke them for a create/drop call. - -""" - -from .. import util -from .elements import ClauseElement -from .base import Executable, _generative, SchemaVisitor, _bind_or_error -from ..util import topological -from .. import event -from .. import exc - - -class _DDLCompiles(ClauseElement): - def _compiler(self, dialect, **kw): - """Return a compiler appropriate for this ClauseElement, given a - Dialect.""" - - return dialect.ddl_compiler(dialect, self, **kw) - - -class DDLElement(Executable, _DDLCompiles): - """Base class for DDL expression constructs. - - This class is the base for the general purpose :class:`.DDL` class, - as well as the various create/drop clause constructs such as - :class:`.CreateTable`, :class:`.DropTable`, :class:`.AddConstraint`, - etc. - - :class:`.DDLElement` integrates closely with SQLAlchemy events, - introduced in :ref:`event_toplevel`. An instance of one is - itself an event receiving callable:: - - event.listen( - users, - 'after_create', - AddConstraint(constraint).execute_if(dialect='postgresql') - ) - - .. seealso:: - - :class:`.DDL` - - :class:`.DDLEvents` - - :ref:`event_toplevel` - - :ref:`schema_ddl_sequences` - - """ - - _execution_options = Executable.\ - _execution_options.union({'autocommit': True}) - - target = None - on = None - dialect = None - callable_ = None - - def _execute_on_connection(self, connection, multiparams, params): - return connection._execute_ddl(self, multiparams, params) - - def execute(self, bind=None, target=None): - """Execute this DDL immediately. - - Executes the DDL statement in isolation using the supplied - :class:`.Connectable` or - :class:`.Connectable` assigned to the ``.bind`` - property, if not supplied. If the DDL has a conditional ``on`` - criteria, it will be invoked with None as the event. - - :param bind: - Optional, an ``Engine`` or ``Connection``. If not supplied, a valid - :class:`.Connectable` must be present in the - ``.bind`` property. - - :param target: - Optional, defaults to None. The target SchemaItem for the - execute call. Will be passed to the ``on`` callable if any, - and may also provide string expansion data for the - statement. See ``execute_at`` for more information. - - """ - - if bind is None: - bind = _bind_or_error(self) - - if self._should_execute(target, bind): - return bind.execute(self.against(target)) - else: - bind.engine.logger.info( - "DDL execution skipped, criteria not met.") - - @util.deprecated("0.7", "See :class:`.DDLEvents`, as well as " - ":meth:`.DDLElement.execute_if`.") - def execute_at(self, event_name, target): - """Link execution of this DDL to the DDL lifecycle of a SchemaItem. - - Links this ``DDLElement`` to a ``Table`` or ``MetaData`` instance, - executing it when that schema item is created or dropped. The DDL - statement will be executed using the same Connection and transactional - context as the Table create/drop itself. The ``.bind`` property of - this statement is ignored. - - :param event: - One of the events defined in the schema item's ``.ddl_events``; - e.g. 'before-create', 'after-create', 'before-drop' or 'after-drop' - - :param target: - The Table or MetaData instance for which this DDLElement will - be associated with. - - A DDLElement instance can be linked to any number of schema items. - - ``execute_at`` builds on the ``append_ddl_listener`` interface of - :class:`.MetaData` and :class:`.Table` objects. - - Caveat: Creating or dropping a Table in isolation will also trigger - any DDL set to ``execute_at`` that Table's MetaData. This may change - in a future release. - - """ - - def call_event(target, connection, **kw): - if self._should_execute_deprecated(event_name, - target, connection, **kw): - return connection.execute(self.against(target)) - - event.listen(target, "" + event_name.replace('-', '_'), call_event) - - @_generative - def against(self, target): - """Return a copy of this DDL against a specific schema item.""" - - self.target = target - - @_generative - def execute_if(self, dialect=None, callable_=None, state=None): - """Return a callable that will execute this - DDLElement conditionally. - - Used to provide a wrapper for event listening:: - - event.listen( - metadata, - 'before_create', - DDL("my_ddl").execute_if(dialect='postgresql') - ) - - :param dialect: May be a string, tuple or a callable - predicate. If a string, it will be compared to the name of the - executing database dialect:: - - DDL('something').execute_if(dialect='postgresql') - - If a tuple, specifies multiple dialect names:: - - DDL('something').execute_if(dialect=('postgresql', 'mysql')) - - :param callable_: A callable, which will be invoked with - four positional arguments as well as optional keyword - arguments: - - :ddl: - This DDL element. - - :target: - The :class:`.Table` or :class:`.MetaData` object which is the - target of this event. May be None if the DDL is executed - explicitly. - - :bind: - The :class:`.Connection` being used for DDL execution - - :tables: - Optional keyword argument - a list of Table objects which are to - be created/ dropped within a MetaData.create_all() or drop_all() - method call. - - :state: - Optional keyword argument - will be the ``state`` argument - passed to this function. - - :checkfirst: - Keyword argument, will be True if the 'checkfirst' flag was - set during the call to ``create()``, ``create_all()``, - ``drop()``, ``drop_all()``. - - If the callable returns a true value, the DDL statement will be - executed. - - :param state: any value which will be passed to the callable\_ - as the ``state`` keyword argument. - - .. seealso:: - - :class:`.DDLEvents` - - :ref:`event_toplevel` - - """ - self.dialect = dialect - self.callable_ = callable_ - self.state = state - - def _should_execute(self, target, bind, **kw): - if self.on is not None and \ - not self._should_execute_deprecated(None, target, bind, **kw): - return False - - if isinstance(self.dialect, util.string_types): - if self.dialect != bind.engine.name: - return False - elif isinstance(self.dialect, (tuple, list, set)): - if bind.engine.name not in self.dialect: - return False - if (self.callable_ is not None and - not self.callable_(self, target, bind, - state=self.state, **kw)): - return False - - return True - - def _should_execute_deprecated(self, event, target, bind, **kw): - if self.on is None: - return True - elif isinstance(self.on, util.string_types): - return self.on == bind.engine.name - elif isinstance(self.on, (tuple, list, set)): - return bind.engine.name in self.on - else: - return self.on(self, event, target, bind, **kw) - - def __call__(self, target, bind, **kw): - """Execute the DDL as a ddl_listener.""" - - if self._should_execute(target, bind, **kw): - return bind.execute(self.against(target)) - - def _check_ddl_on(self, on): - if (on is not None and - (not isinstance(on, util.string_types + (tuple, list, set)) and - not util.callable(on))): - raise exc.ArgumentError( - "Expected the name of a database dialect, a tuple " - "of names, or a callable for " - "'on' criteria, got type '%s'." % type(on).__name__) - - def bind(self): - if self._bind: - return self._bind - - def _set_bind(self, bind): - self._bind = bind - bind = property(bind, _set_bind) - - def _generate(self): - s = self.__class__.__new__(self.__class__) - s.__dict__ = self.__dict__.copy() - return s - - -class DDL(DDLElement): - """A literal DDL statement. - - Specifies literal SQL DDL to be executed by the database. DDL objects - function as DDL event listeners, and can be subscribed to those events - listed in :class:`.DDLEvents`, using either :class:`.Table` or - :class:`.MetaData` objects as targets. Basic templating support allows - a single DDL instance to handle repetitive tasks for multiple tables. - - Examples:: - - from sqlalchemy import event, DDL - - tbl = Table('users', metadata, Column('uid', Integer)) - event.listen(tbl, 'before_create', DDL('DROP TRIGGER users_trigger')) - - spow = DDL('ALTER TABLE %(table)s SET secretpowers TRUE') - event.listen(tbl, 'after_create', spow.execute_if(dialect='somedb')) - - drop_spow = DDL('ALTER TABLE users SET secretpowers FALSE') - connection.execute(drop_spow) - - When operating on Table events, the following ``statement`` - string substitions are available:: - - %(table)s - the Table name, with any required quoting applied - %(schema)s - the schema name, with any required quoting applied - %(fullname)s - the Table name including schema, quoted if needed - - The DDL's "context", if any, will be combined with the standard - substitutions noted above. Keys present in the context will override - the standard substitutions. - - """ - - __visit_name__ = "ddl" - - def __init__(self, statement, on=None, context=None, bind=None): - """Create a DDL statement. - - :param statement: - A string or unicode string to be executed. Statements will be - processed with Python's string formatting operator. See the - ``context`` argument and the ``execute_at`` method. - - A literal '%' in a statement must be escaped as '%%'. - - SQL bind parameters are not available in DDL statements. - - :param on: - .. deprecated:: 0.7 - See :meth:`.DDLElement.execute_if`. - - Optional filtering criteria. May be a string, tuple or a callable - predicate. If a string, it will be compared to the name of the - executing database dialect:: - - DDL('something', on='postgresql') - - If a tuple, specifies multiple dialect names:: - - DDL('something', on=('postgresql', 'mysql')) - - If a callable, it will be invoked with four positional arguments - as well as optional keyword arguments: - - :ddl: - This DDL element. - - :event: - The name of the event that has triggered this DDL, such as - 'after-create' Will be None if the DDL is executed explicitly. - - :target: - The ``Table`` or ``MetaData`` object which is the target of - this event. May be None if the DDL is executed explicitly. - - :connection: - The ``Connection`` being used for DDL execution - - :tables: - Optional keyword argument - a list of Table objects which are to - be created/ dropped within a MetaData.create_all() or drop_all() - method call. - - - If the callable returns a true value, the DDL statement will be - executed. - - :param context: - Optional dictionary, defaults to None. These values will be - available for use in string substitutions on the DDL statement. - - :param bind: - Optional. A :class:`.Connectable`, used by - default when ``execute()`` is invoked without a bind argument. - - - .. seealso:: - - :class:`.DDLEvents` - - :ref:`event_toplevel` - - """ - - if not isinstance(statement, util.string_types): - raise exc.ArgumentError( - "Expected a string or unicode SQL statement, got '%r'" % - statement) - - self.statement = statement - self.context = context or {} - - self._check_ddl_on(on) - self.on = on - self._bind = bind - - def __repr__(self): - return '<%s@%s; %s>' % ( - type(self).__name__, id(self), - ', '.join([repr(self.statement)] + - ['%s=%r' % (key, getattr(self, key)) - for key in ('on', 'context') - if getattr(self, key)])) - - -class _CreateDropBase(DDLElement): - """Base class for DDL constructs that represent CREATE and DROP or - equivalents. - - The common theme of _CreateDropBase is a single - ``element`` attribute which refers to the element - to be created or dropped. - - """ - - def __init__(self, element, on=None, bind=None): - self.element = element - self._check_ddl_on(on) - self.on = on - self.bind = bind - - def _create_rule_disable(self, compiler): - """Allow disable of _create_rule using a callable. - - Pass to _create_rule using - util.portable_instancemethod(self._create_rule_disable) - to retain serializability. - - """ - return False - - -class CreateSchema(_CreateDropBase): - """Represent a CREATE SCHEMA statement. - - .. versionadded:: 0.7.4 - - The argument here is the string name of the schema. - - """ - - __visit_name__ = "create_schema" - - def __init__(self, name, quote=None, **kw): - """Create a new :class:`.CreateSchema` construct.""" - - self.quote = quote - super(CreateSchema, self).__init__(name, **kw) - - -class DropSchema(_CreateDropBase): - """Represent a DROP SCHEMA statement. - - The argument here is the string name of the schema. - - .. versionadded:: 0.7.4 - - """ - - __visit_name__ = "drop_schema" - - def __init__(self, name, quote=None, cascade=False, **kw): - """Create a new :class:`.DropSchema` construct.""" - - self.quote = quote - self.cascade = cascade - super(DropSchema, self).__init__(name, **kw) - - -class CreateTable(_CreateDropBase): - """Represent a CREATE TABLE statement.""" - - __visit_name__ = "create_table" - - def __init__( - self, element, on=None, bind=None, - include_foreign_key_constraints=None): - """Create a :class:`.CreateTable` construct. - - :param element: a :class:`.Table` that's the subject - of the CREATE - :param on: See the description for 'on' in :class:`.DDL`. - :param bind: See the description for 'bind' in :class:`.DDL`. - :param include_foreign_key_constraints: optional sequence of - :class:`.ForeignKeyConstraint` objects that will be included - inline within the CREATE construct; if omitted, all foreign key - constraints that do not specify use_alter=True are included. - - .. versionadded:: 1.0.0 - - """ - super(CreateTable, self).__init__(element, on=on, bind=bind) - self.columns = [CreateColumn(column) - for column in element.columns - ] - self.include_foreign_key_constraints = include_foreign_key_constraints - - -class _DropView(_CreateDropBase): - """Semi-public 'DROP VIEW' construct. - - Used by the test suite for dialect-agnostic drops of views. - This object will eventually be part of a public "view" API. - - """ - __visit_name__ = "drop_view" - - -class CreateColumn(_DDLCompiles): - """Represent a :class:`.Column` as rendered in a CREATE TABLE statement, - via the :class:`.CreateTable` construct. - - This is provided to support custom column DDL within the generation - of CREATE TABLE statements, by using the - compiler extension documented in :ref:`sqlalchemy.ext.compiler_toplevel` - to extend :class:`.CreateColumn`. - - Typical integration is to examine the incoming :class:`.Column` - object, and to redirect compilation if a particular flag or condition - is found:: - - from sqlalchemy import schema - from sqlalchemy.ext.compiler import compiles - - @compiles(schema.CreateColumn) - def compile(element, compiler, **kw): - column = element.element - - if "special" not in column.info: - return compiler.visit_create_column(element, **kw) - - text = "%s SPECIAL DIRECTIVE %s" % ( - column.name, - compiler.type_compiler.process(column.type) - ) - default = compiler.get_column_default_string(column) - if default is not None: - text += " DEFAULT " + default - - if not column.nullable: - text += " NOT NULL" - - if column.constraints: - text += " ".join( - compiler.process(const) - for const in column.constraints) - return text - - The above construct can be applied to a :class:`.Table` as follows:: - - from sqlalchemy import Table, Metadata, Column, Integer, String - from sqlalchemy import schema - - metadata = MetaData() - - table = Table('mytable', MetaData(), - Column('x', Integer, info={"special":True}, primary_key=True), - Column('y', String(50)), - Column('z', String(20), info={"special":True}) - ) - - metadata.create_all(conn) - - Above, the directives we've added to the :attr:`.Column.info` collection - will be detected by our custom compilation scheme:: - - CREATE TABLE mytable ( - x SPECIAL DIRECTIVE INTEGER NOT NULL, - y VARCHAR(50), - z SPECIAL DIRECTIVE VARCHAR(20), - PRIMARY KEY (x) - ) - - The :class:`.CreateColumn` construct can also be used to skip certain - columns when producing a ``CREATE TABLE``. This is accomplished by - creating a compilation rule that conditionally returns ``None``. - This is essentially how to produce the same effect as using the - ``system=True`` argument on :class:`.Column`, which marks a column - as an implicitly-present "system" column. - - For example, suppose we wish to produce a :class:`.Table` which skips - rendering of the Postgresql ``xmin`` column against the Postgresql - backend, but on other backends does render it, in anticipation of a - triggered rule. A conditional compilation rule could skip this name only - on Postgresql:: - - from sqlalchemy.schema import CreateColumn - - @compiles(CreateColumn, "postgresql") - def skip_xmin(element, compiler, **kw): - if element.element.name == 'xmin': - return None - else: - return compiler.visit_create_column(element, **kw) - - - my_table = Table('mytable', metadata, - Column('id', Integer, primary_key=True), - Column('xmin', Integer) - ) - - Above, a :class:`.CreateTable` construct will generate a ``CREATE TABLE`` - which only includes the ``id`` column in the string; the ``xmin`` column - will be omitted, but only against the Postgresql backend. - - .. versionadded:: 0.8.3 The :class:`.CreateColumn` construct supports - skipping of columns by returning ``None`` from a custom compilation - rule. - - .. versionadded:: 0.8 The :class:`.CreateColumn` construct was added - to support custom column creation styles. - - """ - __visit_name__ = 'create_column' - - def __init__(self, element): - self.element = element - - -class DropTable(_CreateDropBase): - """Represent a DROP TABLE statement.""" - - __visit_name__ = "drop_table" - - -class CreateSequence(_CreateDropBase): - """Represent a CREATE SEQUENCE statement.""" - - __visit_name__ = "create_sequence" - - -class DropSequence(_CreateDropBase): - """Represent a DROP SEQUENCE statement.""" - - __visit_name__ = "drop_sequence" - - -class CreateIndex(_CreateDropBase): - """Represent a CREATE INDEX statement.""" - - __visit_name__ = "create_index" - - -class DropIndex(_CreateDropBase): - """Represent a DROP INDEX statement.""" - - __visit_name__ = "drop_index" - - -class AddConstraint(_CreateDropBase): - """Represent an ALTER TABLE ADD CONSTRAINT statement.""" - - __visit_name__ = "add_constraint" - - def __init__(self, element, *args, **kw): - super(AddConstraint, self).__init__(element, *args, **kw) - element._create_rule = util.portable_instancemethod( - self._create_rule_disable) - - -class DropConstraint(_CreateDropBase): - """Represent an ALTER TABLE DROP CONSTRAINT statement.""" - - __visit_name__ = "drop_constraint" - - def __init__(self, element, cascade=False, **kw): - self.cascade = cascade - super(DropConstraint, self).__init__(element, **kw) - element._create_rule = util.portable_instancemethod( - self._create_rule_disable) - - -class DDLBase(SchemaVisitor): - def __init__(self, connection): - self.connection = connection - - -class SchemaGenerator(DDLBase): - - def __init__(self, dialect, connection, checkfirst=False, - tables=None, **kwargs): - super(SchemaGenerator, self).__init__(connection, **kwargs) - self.checkfirst = checkfirst - self.tables = tables - self.preparer = dialect.identifier_preparer - self.dialect = dialect - self.memo = {} - - def _can_create_table(self, table): - self.dialect.validate_identifier(table.name) - if table.schema: - self.dialect.validate_identifier(table.schema) - return not self.checkfirst or \ - not self.dialect.has_table(self.connection, - table.name, schema=table.schema) - - def _can_create_sequence(self, sequence): - return self.dialect.supports_sequences and \ - ( - (not self.dialect.sequences_optional or - not sequence.optional) and - ( - not self.checkfirst or - not self.dialect.has_sequence( - self.connection, - sequence.name, - schema=sequence.schema) - ) - ) - - def visit_metadata(self, metadata): - if self.tables is not None: - tables = self.tables - else: - tables = list(metadata.tables.values()) - - collection = sort_tables_and_constraints( - [t for t in tables if self._can_create_table(t)]) - - seq_coll = [s for s in metadata._sequences.values() - if s.column is None and self._can_create_sequence(s)] - - event_collection = [ - t for (t, fks) in collection if t is not None - ] - metadata.dispatch.before_create(metadata, self.connection, - tables=event_collection, - checkfirst=self.checkfirst, - _ddl_runner=self) - - for seq in seq_coll: - self.traverse_single(seq, create_ok=True) - - for table, fkcs in collection: - if table is not None: - self.traverse_single( - table, create_ok=True, - include_foreign_key_constraints=fkcs, - _is_metadata_operation=True) - else: - for fkc in fkcs: - self.traverse_single(fkc) - - metadata.dispatch.after_create(metadata, self.connection, - tables=event_collection, - checkfirst=self.checkfirst, - _ddl_runner=self) - - def visit_table( - self, table, create_ok=False, - include_foreign_key_constraints=None, - _is_metadata_operation=False): - if not create_ok and not self._can_create_table(table): - return - - table.dispatch.before_create( - table, self.connection, - checkfirst=self.checkfirst, - _ddl_runner=self, - _is_metadata_operation=_is_metadata_operation) - - for column in table.columns: - if column.default is not None: - self.traverse_single(column.default) - - if not self.dialect.supports_alter: - # e.g., don't omit any foreign key constraints - include_foreign_key_constraints = None - - self.connection.execute( - CreateTable( - table, - include_foreign_key_constraints=include_foreign_key_constraints - )) - - if hasattr(table, 'indexes'): - for index in table.indexes: - self.traverse_single(index) - - table.dispatch.after_create( - table, self.connection, - checkfirst=self.checkfirst, - _ddl_runner=self, - _is_metadata_operation=_is_metadata_operation) - - def visit_foreign_key_constraint(self, constraint): - if not self.dialect.supports_alter: - return - self.connection.execute(AddConstraint(constraint)) - - def visit_sequence(self, sequence, create_ok=False): - if not create_ok and not self._can_create_sequence(sequence): - return - self.connection.execute(CreateSequence(sequence)) - - def visit_index(self, index): - self.connection.execute(CreateIndex(index)) - - -class SchemaDropper(DDLBase): - - def __init__(self, dialect, connection, checkfirst=False, - tables=None, **kwargs): - super(SchemaDropper, self).__init__(connection, **kwargs) - self.checkfirst = checkfirst - self.tables = tables - self.preparer = dialect.identifier_preparer - self.dialect = dialect - self.memo = {} - - def visit_metadata(self, metadata): - if self.tables is not None: - tables = self.tables - else: - tables = list(metadata.tables.values()) - - try: - unsorted_tables = [t for t in tables if self._can_drop_table(t)] - collection = list(reversed( - sort_tables_and_constraints( - unsorted_tables, - filter_fn=lambda constraint: False - if not self.dialect.supports_alter - or constraint.name is None - else None - ) - )) - except exc.CircularDependencyError as err2: - if not self.dialect.supports_alter: - util.warn( - "Can't sort tables for DROP; an " - "unresolvable foreign key " - "dependency exists between tables: %s, and backend does " - "not support ALTER. To restore at least a partial sort, " - "apply use_alter=True to ForeignKey and " - "ForeignKeyConstraint " - "objects involved in the cycle to mark these as known " - "cycles that will be ignored." - % ( - ", ".join(sorted([t.fullname for t in err2.cycles])) - ) - ) - collection = [(t, ()) for t in unsorted_tables] - else: - util.raise_from_cause( - exc.CircularDependencyError( - err2.args[0], - err2.cycles, err2.edges, - msg="Can't sort tables for DROP; an " - "unresolvable foreign key " - "dependency exists between tables: %s. Please ensure " - "that the ForeignKey and ForeignKeyConstraint objects " - "involved in the cycle have " - "names so that they can be dropped using " - "DROP CONSTRAINT." - % ( - ", ".join(sorted([t.fullname for t in err2.cycles])) - ) - - ) - ) - - seq_coll = [ - s - for s in metadata._sequences.values() - if s.column is None and self._can_drop_sequence(s) - ] - - event_collection = [ - t for (t, fks) in collection if t is not None - ] - - metadata.dispatch.before_drop( - metadata, self.connection, tables=event_collection, - checkfirst=self.checkfirst, _ddl_runner=self) - - for table, fkcs in collection: - if table is not None: - self.traverse_single( - table, drop_ok=True, _is_metadata_operation=True) - else: - for fkc in fkcs: - self.traverse_single(fkc) - - for seq in seq_coll: - self.traverse_single(seq, drop_ok=True) - - metadata.dispatch.after_drop( - metadata, self.connection, tables=event_collection, - checkfirst=self.checkfirst, _ddl_runner=self) - - def _can_drop_table(self, table): - self.dialect.validate_identifier(table.name) - if table.schema: - self.dialect.validate_identifier(table.schema) - return not self.checkfirst or self.dialect.has_table( - self.connection, table.name, schema=table.schema) - - def _can_drop_sequence(self, sequence): - return self.dialect.supports_sequences and \ - ((not self.dialect.sequences_optional or - not sequence.optional) and - (not self.checkfirst or - self.dialect.has_sequence( - self.connection, - sequence.name, - schema=sequence.schema)) - ) - - def visit_index(self, index): - self.connection.execute(DropIndex(index)) - - def visit_table(self, table, drop_ok=False, _is_metadata_operation=False): - if not drop_ok and not self._can_drop_table(table): - return - - table.dispatch.before_drop( - table, self.connection, - checkfirst=self.checkfirst, - _ddl_runner=self, - _is_metadata_operation=_is_metadata_operation) - - for column in table.columns: - if column.default is not None: - self.traverse_single(column.default) - - self.connection.execute(DropTable(table)) - - table.dispatch.after_drop( - table, self.connection, - checkfirst=self.checkfirst, - _ddl_runner=self, - _is_metadata_operation=_is_metadata_operation) - - def visit_foreign_key_constraint(self, constraint): - if not self.dialect.supports_alter: - return - self.connection.execute(DropConstraint(constraint)) - - def visit_sequence(self, sequence, drop_ok=False): - if not drop_ok and not self._can_drop_sequence(sequence): - return - self.connection.execute(DropSequence(sequence)) - - -def sort_tables(tables, skip_fn=None, extra_dependencies=None): - """sort a collection of :class:`.Table` objects based on dependency. - - This is a dependency-ordered sort which will emit :class:`.Table` - objects such that they will follow their dependent :class:`.Table` objects. - Tables are dependent on another based on the presence of - :class:`.ForeignKeyConstraint` objects as well as explicit dependencies - added by :meth:`.Table.add_is_dependent_on`. - - .. warning:: - - The :func:`.sort_tables` function cannot by itself accommodate - automatic resolution of dependency cycles between tables, which - are usually caused by mutually dependent foreign key constraints. - To resolve these cycles, either the - :paramref:`.ForeignKeyConstraint.use_alter` parameter may be appled - to those constraints, or use the - :func:`.sql.sort_tables_and_constraints` function which will break - out foreign key constraints involved in cycles separately. - - :param tables: a sequence of :class:`.Table` objects. - - :param skip_fn: optional callable which will be passed a - :class:`.ForeignKey` object; if it returns True, this - constraint will not be considered as a dependency. Note this is - **different** from the same parameter in - :func:`.sort_tables_and_constraints`, which is - instead passed the owning :class:`.ForeignKeyConstraint` object. - - :param extra_dependencies: a sequence of 2-tuples of tables which will - also be considered as dependent on each other. - - .. seealso:: - - :func:`.sort_tables_and_constraints` - - :meth:`.MetaData.sorted_tables` - uses this function to sort - - - """ - - if skip_fn is not None: - def _skip_fn(fkc): - for fk in fkc.elements: - if skip_fn(fk): - return True - else: - return None - else: - _skip_fn = None - - return [ - t for (t, fkcs) in - sort_tables_and_constraints( - tables, filter_fn=_skip_fn, extra_dependencies=extra_dependencies) - if t is not None - ] - - -def sort_tables_and_constraints( - tables, filter_fn=None, extra_dependencies=None): - """sort a collection of :class:`.Table` / :class:`.ForeignKeyConstraint` - objects. - - This is a dependency-ordered sort which will emit tuples of - ``(Table, [ForeignKeyConstraint, ...])`` such that each - :class:`.Table` follows its dependent :class:`.Table` objects. - Remaining :class:`.ForeignKeyConstraint` objects that are separate due to - dependency rules not satisifed by the sort are emitted afterwards - as ``(None, [ForeignKeyConstraint ...])``. - - Tables are dependent on another based on the presence of - :class:`.ForeignKeyConstraint` objects, explicit dependencies - added by :meth:`.Table.add_is_dependent_on`, as well as dependencies - stated here using the :paramref:`~.sort_tables_and_constraints.skip_fn` - and/or :paramref:`~.sort_tables_and_constraints.extra_dependencies` - parameters. - - :param tables: a sequence of :class:`.Table` objects. - - :param filter_fn: optional callable which will be passed a - :class:`.ForeignKeyConstraint` object, and returns a value based on - whether this constraint should definitely be included or excluded as - an inline constraint, or neither. If it returns False, the constraint - will definitely be included as a dependency that cannot be subject - to ALTER; if True, it will **only** be included as an ALTER result at - the end. Returning None means the constraint is included in the - table-based result unless it is detected as part of a dependency cycle. - - :param extra_dependencies: a sequence of 2-tuples of tables which will - also be considered as dependent on each other. - - .. versionadded:: 1.0.0 - - .. seealso:: - - :func:`.sort_tables` - - - """ - - fixed_dependencies = set() - mutable_dependencies = set() - - if extra_dependencies is not None: - fixed_dependencies.update(extra_dependencies) - - remaining_fkcs = set() - for table in tables: - for fkc in table.foreign_key_constraints: - if fkc.use_alter is True: - remaining_fkcs.add(fkc) - continue - - if filter_fn: - filtered = filter_fn(fkc) - - if filtered is True: - remaining_fkcs.add(fkc) - continue - - dependent_on = fkc.referred_table - if dependent_on is not table: - mutable_dependencies.add((dependent_on, table)) - - fixed_dependencies.update( - (parent, table) for parent in table._extra_dependencies - ) - - try: - candidate_sort = list( - topological.sort( - fixed_dependencies.union(mutable_dependencies), tables, - deterministic_order=True - ) - ) - except exc.CircularDependencyError as err: - for edge in err.edges: - if edge in mutable_dependencies: - table = edge[1] - can_remove = [ - fkc for fkc in table.foreign_key_constraints - if filter_fn is None or filter_fn(fkc) is not False] - remaining_fkcs.update(can_remove) - for fkc in can_remove: - dependent_on = fkc.referred_table - if dependent_on is not table: - mutable_dependencies.discard((dependent_on, table)) - candidate_sort = list( - topological.sort( - fixed_dependencies.union(mutable_dependencies), tables, - deterministic_order=True - ) - ) - - return [ - (table, table.foreign_key_constraints.difference(remaining_fkcs)) - for table in candidate_sort - ] + [(None, list(remaining_fkcs))] diff --git a/python/sqlalchemy/sql/default_comparator.py b/python/sqlalchemy/sql/default_comparator.py deleted file mode 100644 index e77ad765..00000000 --- a/python/sqlalchemy/sql/default_comparator.py +++ /dev/null @@ -1,287 +0,0 @@ -# sql/default_comparator.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Default implementation of SQL comparison operations. -""" - -from .. import exc, util -from . import type_api -from . import operators -from .elements import BindParameter, True_, False_, BinaryExpression, \ - Null, _const_expr, _clause_element_as_expr, \ - ClauseList, ColumnElement, TextClause, UnaryExpression, \ - collate, _is_literal, _literal_as_text, ClauseElement, and_, or_ -from .selectable import SelectBase, Alias, Selectable, ScalarSelect - - -def _boolean_compare(expr, op, obj, negate=None, reverse=False, - _python_is_types=(util.NoneType, bool), - result_type = None, - **kwargs): - - if result_type is None: - result_type = type_api.BOOLEANTYPE - - if isinstance(obj, _python_is_types + (Null, True_, False_)): - - # allow x ==/!= True/False to be treated as a literal. - # this comes out to "== / != true/false" or "1/0" if those - # constants aren't supported and works on all platforms - if op in (operators.eq, operators.ne) and \ - isinstance(obj, (bool, True_, False_)): - return BinaryExpression(expr, - _literal_as_text(obj), - op, - type_=result_type, - negate=negate, modifiers=kwargs) - else: - # all other None/True/False uses IS, IS NOT - if op in (operators.eq, operators.is_): - return BinaryExpression(expr, _const_expr(obj), - operators.is_, - negate=operators.isnot) - elif op in (operators.ne, operators.isnot): - return BinaryExpression(expr, _const_expr(obj), - operators.isnot, - negate=operators.is_) - else: - raise exc.ArgumentError( - "Only '=', '!=', 'is_()', 'isnot()' operators can " - "be used with None/True/False") - else: - obj = _check_literal(expr, op, obj) - - if reverse: - return BinaryExpression(obj, - expr, - op, - type_=result_type, - negate=negate, modifiers=kwargs) - else: - return BinaryExpression(expr, - obj, - op, - type_=result_type, - negate=negate, modifiers=kwargs) - - -def _binary_operate(expr, op, obj, reverse=False, result_type=None, - **kw): - obj = _check_literal(expr, op, obj) - - if reverse: - left, right = obj, expr - else: - left, right = expr, obj - - if result_type is None: - op, result_type = left.comparator._adapt_expression( - op, right.comparator) - - return BinaryExpression( - left, right, op, type_=result_type, modifiers=kw) - - -def _conjunction_operate(expr, op, other, **kw): - if op is operators.and_: - return and_(expr, other) - elif op is operators.or_: - return or_(expr, other) - else: - raise NotImplementedError() - - -def _scalar(expr, op, fn, **kw): - return fn(expr) - - -def _in_impl(expr, op, seq_or_selectable, negate_op, **kw): - seq_or_selectable = _clause_element_as_expr(seq_or_selectable) - - if isinstance(seq_or_selectable, ScalarSelect): - return _boolean_compare(expr, op, seq_or_selectable, - negate=negate_op) - elif isinstance(seq_or_selectable, SelectBase): - - # TODO: if we ever want to support (x, y, z) IN (select x, - # y, z from table), we would need a multi-column version of - # as_scalar() to produce a multi- column selectable that - # does not export itself as a FROM clause - - return _boolean_compare( - expr, op, seq_or_selectable.as_scalar(), - negate=negate_op, **kw) - elif isinstance(seq_or_selectable, (Selectable, TextClause)): - return _boolean_compare(expr, op, seq_or_selectable, - negate=negate_op, **kw) - elif isinstance(seq_or_selectable, ClauseElement): - raise exc.InvalidRequestError( - 'in_() accepts' - ' either a list of expressions ' - 'or a selectable: %r' % seq_or_selectable) - - # Handle non selectable arguments as sequences - args = [] - for o in seq_or_selectable: - if not _is_literal(o): - if not isinstance(o, operators.ColumnOperators): - raise exc.InvalidRequestError( - 'in_() accepts' - ' either a list of expressions ' - 'or a selectable: %r' % o) - elif o is None: - o = Null() - else: - o = expr._bind_param(op, o) - args.append(o) - if len(args) == 0: - - # Special case handling for empty IN's, behave like - # comparison against zero row selectable. We use != to - # build the contradiction as it handles NULL values - # appropriately, i.e. "not (x IN ())" should not return NULL - # values for x. - - util.warn('The IN-predicate on "%s" was invoked with an ' - 'empty sequence. This results in a ' - 'contradiction, which nonetheless can be ' - 'expensive to evaluate. Consider alternative ' - 'strategies for improved performance.' % expr) - if op is operators.in_op: - return expr != expr - else: - return expr == expr - - return _boolean_compare(expr, op, - ClauseList(*args).self_group(against=op), - negate=negate_op) - - -def _unsupported_impl(expr, op, *arg, **kw): - raise NotImplementedError("Operator '%s' is not supported on " - "this expression" % op.__name__) - - -def _inv_impl(expr, op, **kw): - """See :meth:`.ColumnOperators.__inv__`.""" - if hasattr(expr, 'negation_clause'): - return expr.negation_clause - else: - return expr._negate() - - -def _neg_impl(expr, op, **kw): - """See :meth:`.ColumnOperators.__neg__`.""" - return UnaryExpression(expr, operator=operators.neg) - - -def _match_impl(expr, op, other, **kw): - """See :meth:`.ColumnOperators.match`.""" - - return _boolean_compare( - expr, operators.match_op, - _check_literal( - expr, operators.match_op, other), - result_type=type_api.MATCHTYPE, - negate=operators.notmatch_op - if op is operators.match_op else operators.match_op, - **kw - ) - - -def _distinct_impl(expr, op, **kw): - """See :meth:`.ColumnOperators.distinct`.""" - return UnaryExpression(expr, operator=operators.distinct_op, - type_=expr.type) - - -def _between_impl(expr, op, cleft, cright, **kw): - """See :meth:`.ColumnOperators.between`.""" - return BinaryExpression( - expr, - ClauseList( - _check_literal(expr, operators.and_, cleft), - _check_literal(expr, operators.and_, cright), - operator=operators.and_, - group=False, group_contents=False), - op, - negate=operators.notbetween_op - if op is operators.between_op - else operators.between_op, - modifiers=kw) - - -def _collate_impl(expr, op, other, **kw): - return collate(expr, other) - -# a mapping of operators with the method they use, along with -# their negated operator for comparison operators -operator_lookup = { - "and_": (_conjunction_operate,), - "or_": (_conjunction_operate,), - "inv": (_inv_impl,), - "add": (_binary_operate,), - "mul": (_binary_operate,), - "sub": (_binary_operate,), - "div": (_binary_operate,), - "mod": (_binary_operate,), - "truediv": (_binary_operate,), - "custom_op": (_binary_operate,), - "concat_op": (_binary_operate,), - "lt": (_boolean_compare, operators.ge), - "le": (_boolean_compare, operators.gt), - "ne": (_boolean_compare, operators.eq), - "gt": (_boolean_compare, operators.le), - "ge": (_boolean_compare, operators.lt), - "eq": (_boolean_compare, operators.ne), - "like_op": (_boolean_compare, operators.notlike_op), - "ilike_op": (_boolean_compare, operators.notilike_op), - "notlike_op": (_boolean_compare, operators.like_op), - "notilike_op": (_boolean_compare, operators.ilike_op), - "contains_op": (_boolean_compare, operators.notcontains_op), - "startswith_op": (_boolean_compare, operators.notstartswith_op), - "endswith_op": (_boolean_compare, operators.notendswith_op), - "desc_op": (_scalar, UnaryExpression._create_desc), - "asc_op": (_scalar, UnaryExpression._create_asc), - "nullsfirst_op": (_scalar, UnaryExpression._create_nullsfirst), - "nullslast_op": (_scalar, UnaryExpression._create_nullslast), - "in_op": (_in_impl, operators.notin_op), - "notin_op": (_in_impl, operators.in_op), - "is_": (_boolean_compare, operators.is_), - "isnot": (_boolean_compare, operators.isnot), - "collate": (_collate_impl,), - "match_op": (_match_impl,), - "notmatch_op": (_match_impl,), - "distinct_op": (_distinct_impl,), - "between_op": (_between_impl, ), - "notbetween_op": (_between_impl, ), - "neg": (_neg_impl,), - "getitem": (_unsupported_impl,), - "lshift": (_unsupported_impl,), - "rshift": (_unsupported_impl,), -} - - -def _check_literal(expr, operator, other): - if isinstance(other, (ColumnElement, TextClause)): - if isinstance(other, BindParameter) and \ - other.type._isnull: - other = other._clone() - other.type = expr.type - return other - elif hasattr(other, '__clause_element__'): - other = other.__clause_element__() - elif isinstance(other, type_api.TypeEngine.Comparator): - other = other.expr - - if isinstance(other, (SelectBase, Alias)): - return other.as_scalar() - elif not isinstance(other, (ColumnElement, TextClause)): - return expr._bind_param(operator, other) - else: - return other - diff --git a/python/sqlalchemy/sql/dml.py b/python/sqlalchemy/sql/dml.py deleted file mode 100644 index 6756f155..00000000 --- a/python/sqlalchemy/sql/dml.py +++ /dev/null @@ -1,804 +0,0 @@ -# sql/dml.py -# Copyright (C) 2009-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php -""" -Provide :class:`.Insert`, :class:`.Update` and :class:`.Delete`. - -""" - -from .base import Executable, _generative, _from_objects, DialectKWArgs -from .elements import ClauseElement, _literal_as_text, Null, and_, _clone, \ - _column_as_key -from .selectable import _interpret_as_from, _interpret_as_select, HasPrefixes -from .. import util -from .. import exc - - -class UpdateBase(DialectKWArgs, HasPrefixes, Executable, ClauseElement): - """Form the base for ``INSERT``, ``UPDATE``, and ``DELETE`` statements. - - """ - - __visit_name__ = 'update_base' - - _execution_options = \ - Executable._execution_options.union({'autocommit': True}) - _hints = util.immutabledict() - _prefixes = () - - def _process_colparams(self, parameters): - def process_single(p): - if isinstance(p, (list, tuple)): - return dict( - (c.key, pval) - for c, pval in zip(self.table.c, p) - ) - else: - return p - - if (isinstance(parameters, (list, tuple)) and parameters and - isinstance(parameters[0], (list, tuple, dict))): - - if not self._supports_multi_parameters: - raise exc.InvalidRequestError( - "This construct does not support " - "multiple parameter sets.") - - return [process_single(p) for p in parameters], True - else: - return process_single(parameters), False - - def params(self, *arg, **kw): - """Set the parameters for the statement. - - This method raises ``NotImplementedError`` on the base class, - and is overridden by :class:`.ValuesBase` to provide the - SET/VALUES clause of UPDATE and INSERT. - - """ - raise NotImplementedError( - "params() is not supported for INSERT/UPDATE/DELETE statements." - " To set the values for an INSERT or UPDATE statement, use" - " stmt.values(**parameters).") - - def bind(self): - """Return a 'bind' linked to this :class:`.UpdateBase` - or a :class:`.Table` associated with it. - - """ - return self._bind or self.table.bind - - def _set_bind(self, bind): - self._bind = bind - bind = property(bind, _set_bind) - - @_generative - def returning(self, *cols): - """Add a :term:`RETURNING` or equivalent clause to this statement. - - e.g.:: - - stmt = table.update().\\ - where(table.c.data == 'value').\\ - values(status='X').\\ - returning(table.c.server_flag, - table.c.updated_timestamp) - - for server_flag, updated_timestamp in connection.execute(stmt): - print(server_flag, updated_timestamp) - - The given collection of column expressions should be derived from - the table that is - the target of the INSERT, UPDATE, or DELETE. While :class:`.Column` - objects are typical, the elements can also be expressions:: - - stmt = table.insert().returning( - (table.c.first_name + " " + table.c.last_name). - label('fullname')) - - Upon compilation, a RETURNING clause, or database equivalent, - will be rendered within the statement. For INSERT and UPDATE, - the values are the newly inserted/updated values. For DELETE, - the values are those of the rows which were deleted. - - Upon execution, the values of the columns to be returned are made - available via the result set and can be iterated using - :meth:`.ResultProxy.fetchone` and similar. For DBAPIs which do not - natively support returning values (i.e. cx_oracle), SQLAlchemy will - approximate this behavior at the result level so that a reasonable - amount of behavioral neutrality is provided. - - Note that not all databases/DBAPIs - support RETURNING. For those backends with no support, - an exception is raised upon compilation and/or execution. - For those who do support it, the functionality across backends - varies greatly, including restrictions on executemany() - and other statements which return multiple rows. Please - read the documentation notes for the database in use in - order to determine the availability of RETURNING. - - .. seealso:: - - :meth:`.ValuesBase.return_defaults` - an alternative method tailored - towards efficient fetching of server-side defaults and triggers - for single-row INSERTs or UPDATEs. - - - """ - self._returning = cols - - @_generative - def with_hint(self, text, selectable=None, dialect_name="*"): - """Add a table hint for a single table to this - INSERT/UPDATE/DELETE statement. - - .. note:: - - :meth:`.UpdateBase.with_hint` currently applies only to - Microsoft SQL Server. For MySQL INSERT/UPDATE/DELETE hints, use - :meth:`.UpdateBase.prefix_with`. - - The text of the hint is rendered in the appropriate - location for the database backend in use, relative - to the :class:`.Table` that is the subject of this - statement, or optionally to that of the given - :class:`.Table` passed as the ``selectable`` argument. - - The ``dialect_name`` option will limit the rendering of a particular - hint to a particular backend. Such as, to add a hint - that only takes effect for SQL Server:: - - mytable.insert().with_hint("WITH (PAGLOCK)", dialect_name="mssql") - - .. versionadded:: 0.7.6 - - :param text: Text of the hint. - :param selectable: optional :class:`.Table` that specifies - an element of the FROM clause within an UPDATE or DELETE - to be the subject of the hint - applies only to certain backends. - :param dialect_name: defaults to ``*``, if specified as the name - of a particular dialect, will apply these hints only when - that dialect is in use. - """ - if selectable is None: - selectable = self.table - - self._hints = self._hints.union( - {(selectable, dialect_name): text}) - - -class ValuesBase(UpdateBase): - """Supplies support for :meth:`.ValuesBase.values` to - INSERT and UPDATE constructs.""" - - __visit_name__ = 'values_base' - - _supports_multi_parameters = False - _has_multi_parameters = False - select = None - - def __init__(self, table, values, prefixes): - self.table = _interpret_as_from(table) - self.parameters, self._has_multi_parameters = \ - self._process_colparams(values) - if prefixes: - self._setup_prefixes(prefixes) - - @_generative - def values(self, *args, **kwargs): - """specify a fixed VALUES clause for an INSERT statement, or the SET - clause for an UPDATE. - - Note that the :class:`.Insert` and :class:`.Update` constructs support - per-execution time formatting of the VALUES and/or SET clauses, - based on the arguments passed to :meth:`.Connection.execute`. - However, the :meth:`.ValuesBase.values` method can be used to "fix" a - particular set of parameters into the statement. - - Multiple calls to :meth:`.ValuesBase.values` will produce a new - construct, each one with the parameter list modified to include - the new parameters sent. In the typical case of a single - dictionary of parameters, the newly passed keys will replace - the same keys in the previous construct. In the case of a list-based - "multiple values" construct, each new list of values is extended - onto the existing list of values. - - :param \**kwargs: key value pairs representing the string key - of a :class:`.Column` mapped to the value to be rendered into the - VALUES or SET clause:: - - users.insert().values(name="some name") - - users.update().where(users.c.id==5).values(name="some name") - - :param \*args: Alternatively, a dictionary, tuple or list - of dictionaries or tuples can be passed as a single positional - argument in order to form the VALUES or - SET clause of the statement. The single dictionary form - works the same as the kwargs form:: - - users.insert().values({"name": "some name"}) - - If a tuple is passed, the tuple should contain the same number - of columns as the target :class:`.Table`:: - - users.insert().values((5, "some name")) - - The :class:`.Insert` construct also supports multiply-rendered VALUES - construct, for those backends which support this SQL syntax - (SQLite, Postgresql, MySQL). This mode is indicated by passing a - list of one or more dictionaries/tuples:: - - users.insert().values([ - {"name": "some name"}, - {"name": "some other name"}, - {"name": "yet another name"}, - ]) - - In the case of an :class:`.Update` - construct, only the single dictionary/tuple form is accepted, - else an exception is raised. It is also an exception case to - attempt to mix the single-/multiple- value styles together, - either through multiple :meth:`.ValuesBase.values` calls - or by sending a list + kwargs at the same time. - - .. note:: - - Passing a multiple values list is *not* the same - as passing a multiple values list to the - :meth:`.Connection.execute` method. Passing a list of parameter - sets to :meth:`.ValuesBase.values` produces a construct of this - form:: - - INSERT INTO table (col1, col2, col3) VALUES - (col1_0, col2_0, col3_0), - (col1_1, col2_1, col3_1), - ... - - whereas a multiple list passed to :meth:`.Connection.execute` - has the effect of using the DBAPI - `executemany() `_ - method, which provides a high-performance system of invoking - a single-row INSERT or single-criteria UPDATE or DELETE statement - many times against a series - of parameter sets. The "executemany" style is supported by - all database backends, and works equally well for INSERT, - UPDATE, and DELETE, as it does not depend on a special SQL - syntax. See :ref:`execute_multiple` for an introduction to - the traditional Core method of multiple parameter set invocation - using this system. - - .. versionadded:: 0.8 - Support for multiple-VALUES INSERT statements. - - .. versionchanged:: 1.0.0 an INSERT that uses a multiple-VALUES - clause, even a list of length one, - implies that the :paramref:`.Insert.inline` flag is set to - True, indicating that the statement will not attempt to fetch - the "last inserted primary key" or other defaults. The statement - deals with an arbitrary number of rows, so the - :attr:`.ResultProxy.inserted_primary_key` accessor does not apply. - - .. versionchanged:: 1.0.0 A multiple-VALUES INSERT now supports - columns with Python side default values and callables in the - same way as that of an "executemany" style of invocation; the - callable is invoked for each row. See :ref:`bug_3288` - for other details. - - .. seealso:: - - :ref:`inserts_and_updates` - SQL Expression - Language Tutorial - - :func:`~.expression.insert` - produce an ``INSERT`` statement - - :func:`~.expression.update` - produce an ``UPDATE`` statement - - """ - if self.select is not None: - raise exc.InvalidRequestError( - "This construct already inserts from a SELECT") - if self._has_multi_parameters and kwargs: - raise exc.InvalidRequestError( - "This construct already has multiple parameter sets.") - - if args: - if len(args) > 1: - raise exc.ArgumentError( - "Only a single dictionary/tuple or list of " - "dictionaries/tuples is accepted positionally.") - v = args[0] - else: - v = {} - - if self.parameters is None: - self.parameters, self._has_multi_parameters = \ - self._process_colparams(v) - else: - if self._has_multi_parameters: - self.parameters = list(self.parameters) - p, self._has_multi_parameters = self._process_colparams(v) - if not self._has_multi_parameters: - raise exc.ArgumentError( - "Can't mix single-values and multiple values " - "formats in one statement") - - self.parameters.extend(p) - else: - self.parameters = self.parameters.copy() - p, self._has_multi_parameters = self._process_colparams(v) - if self._has_multi_parameters: - raise exc.ArgumentError( - "Can't mix single-values and multiple values " - "formats in one statement") - self.parameters.update(p) - - if kwargs: - if self._has_multi_parameters: - raise exc.ArgumentError( - "Can't pass kwargs and multiple parameter sets " - "simultaenously") - else: - self.parameters.update(kwargs) - - @_generative - def return_defaults(self, *cols): - """Make use of a :term:`RETURNING` clause for the purpose - of fetching server-side expressions and defaults. - - E.g.:: - - stmt = table.insert().values(data='newdata').return_defaults() - - result = connection.execute(stmt) - - server_created_at = result.returned_defaults['created_at'] - - When used against a backend that supports RETURNING, all column - values generated by SQL expression or server-side-default will be - added to any existing RETURNING clause, provided that - :meth:`.UpdateBase.returning` is not used simultaneously. The column - values will then be available on the result using the - :attr:`.ResultProxy.returned_defaults` accessor as a dictionary, - referring to values keyed to the :class:`.Column` object as well as - its ``.key``. - - This method differs from :meth:`.UpdateBase.returning` in these ways: - - 1. :meth:`.ValuesBase.return_defaults` is only intended for use with - an INSERT or an UPDATE statement that matches exactly one row. - While the RETURNING construct in the general sense supports - multiple rows for a multi-row UPDATE or DELETE statement, or for - special cases of INSERT that return multiple rows (e.g. INSERT from - SELECT, multi-valued VALUES clause), - :meth:`.ValuesBase.return_defaults` is intended only for an - "ORM-style" single-row INSERT/UPDATE statement. The row returned - by the statement is also consumed implcitly when - :meth:`.ValuesBase.return_defaults` is used. By contrast, - :meth:`.UpdateBase.returning` leaves the RETURNING result-set - intact with a collection of any number of rows. - - 2. It is compatible with the existing logic to fetch auto-generated - primary key values, also known as "implicit returning". Backends - that support RETURNING will automatically make use of RETURNING in - order to fetch the value of newly generated primary keys; while the - :meth:`.UpdateBase.returning` method circumvents this behavior, - :meth:`.ValuesBase.return_defaults` leaves it intact. - - 3. It can be called against any backend. Backends that don't support - RETURNING will skip the usage of the feature, rather than raising - an exception. The return value of - :attr:`.ResultProxy.returned_defaults` will be ``None`` - - :meth:`.ValuesBase.return_defaults` is used by the ORM to provide - an efficient implementation for the ``eager_defaults`` feature of - :func:`.mapper`. - - :param cols: optional list of column key names or :class:`.Column` - objects. If omitted, all column expressions evaluated on the server - are added to the returning list. - - .. versionadded:: 0.9.0 - - .. seealso:: - - :meth:`.UpdateBase.returning` - - :attr:`.ResultProxy.returned_defaults` - - """ - self._return_defaults = cols or True - - -class Insert(ValuesBase): - """Represent an INSERT construct. - - The :class:`.Insert` object is created using the - :func:`~.expression.insert()` function. - - .. seealso:: - - :ref:`coretutorial_insert_expressions` - - """ - __visit_name__ = 'insert' - - _supports_multi_parameters = True - - def __init__(self, - table, - values=None, - inline=False, - bind=None, - prefixes=None, - returning=None, - return_defaults=False, - **dialect_kw): - """Construct an :class:`.Insert` object. - - Similar functionality is available via the - :meth:`~.TableClause.insert` method on - :class:`~.schema.Table`. - - :param table: :class:`.TableClause` which is the subject of the - insert. - - :param values: collection of values to be inserted; see - :meth:`.Insert.values` for a description of allowed formats here. - Can be omitted entirely; a :class:`.Insert` construct will also - dynamically render the VALUES clause at execution time based on - the parameters passed to :meth:`.Connection.execute`. - - :param inline: if True, no attempt will be made to retrieve the - SQL-generated default values to be provided within the statement; - in particular, - this allows SQL expressions to be rendered 'inline' within the - statement without the need to pre-execute them beforehand; for - backends that support "returning", this turns off the "implicit - returning" feature for the statement. - - If both `values` and compile-time bind parameters are present, the - compile-time bind parameters override the information specified - within `values` on a per-key basis. - - The keys within `values` can be either - :class:`~sqlalchemy.schema.Column` objects or their string - identifiers. Each key may reference one of: - - * a literal data value (i.e. string, number, etc.); - * a Column object; - * a SELECT statement. - - If a ``SELECT`` statement is specified which references this - ``INSERT`` statement's table, the statement will be correlated - against the ``INSERT`` statement. - - .. seealso:: - - :ref:`coretutorial_insert_expressions` - SQL Expression Tutorial - - :ref:`inserts_and_updates` - SQL Expression Tutorial - - """ - ValuesBase.__init__(self, table, values, prefixes) - self._bind = bind - self.select = self.select_names = None - self.include_insert_from_select_defaults = False - self.inline = inline - self._returning = returning - self._validate_dialect_kwargs(dialect_kw) - self._return_defaults = return_defaults - - def get_children(self, **kwargs): - if self.select is not None: - return self.select, - else: - return () - - @_generative - def from_select(self, names, select, include_defaults=True): - """Return a new :class:`.Insert` construct which represents - an ``INSERT...FROM SELECT`` statement. - - e.g.:: - - sel = select([table1.c.a, table1.c.b]).where(table1.c.c > 5) - ins = table2.insert().from_select(['a', 'b'], sel) - - :param names: a sequence of string column names or :class:`.Column` - objects representing the target columns. - :param select: a :func:`.select` construct, :class:`.FromClause` - or other construct which resolves into a :class:`.FromClause`, - such as an ORM :class:`.Query` object, etc. The order of - columns returned from this FROM clause should correspond to the - order of columns sent as the ``names`` parameter; while this - is not checked before passing along to the database, the database - would normally raise an exception if these column lists don't - correspond. - :param include_defaults: if True, non-server default values and - SQL expressions as specified on :class:`.Column` objects - (as documented in :ref:`metadata_defaults_toplevel`) not - otherwise specified in the list of names will be rendered - into the INSERT and SELECT statements, so that these values are also - included in the data to be inserted. - - .. note:: A Python-side default that uses a Python callable function - will only be invoked **once** for the whole statement, and **not - per row**. - - .. versionadded:: 1.0.0 - :meth:`.Insert.from_select` now renders - Python-side and SQL expression column defaults into the - SELECT statement for columns otherwise not included in the - list of column names. - - .. versionchanged:: 1.0.0 an INSERT that uses FROM SELECT - implies that the :paramref:`.insert.inline` flag is set to - True, indicating that the statement will not attempt to fetch - the "last inserted primary key" or other defaults. The statement - deals with an arbitrary number of rows, so the - :attr:`.ResultProxy.inserted_primary_key` accessor does not apply. - - .. versionadded:: 0.8.3 - - """ - if self.parameters: - raise exc.InvalidRequestError( - "This construct already inserts value expressions") - - self.parameters, self._has_multi_parameters = \ - self._process_colparams( - dict((_column_as_key(n), Null()) for n in names)) - - self.select_names = names - self.inline = True - self.include_insert_from_select_defaults = include_defaults - self.select = _interpret_as_select(select) - - def _copy_internals(self, clone=_clone, **kw): - # TODO: coverage - self.parameters = self.parameters.copy() - if self.select is not None: - self.select = _clone(self.select) - - -class Update(ValuesBase): - """Represent an Update construct. - - The :class:`.Update` object is created using the :func:`update()` - function. - - """ - __visit_name__ = 'update' - - def __init__(self, - table, - whereclause=None, - values=None, - inline=False, - bind=None, - prefixes=None, - returning=None, - return_defaults=False, - **dialect_kw): - """Construct an :class:`.Update` object. - - E.g.:: - - from sqlalchemy import update - - stmt = update(users).where(users.c.id==5).\\ - values(name='user #5') - - Similar functionality is available via the - :meth:`~.TableClause.update` method on - :class:`.Table`:: - - stmt = users.update().\\ - where(users.c.id==5).\\ - values(name='user #5') - - :param table: A :class:`.Table` object representing the database - table to be updated. - - :param whereclause: Optional SQL expression describing the ``WHERE`` - condition of the ``UPDATE`` statement. Modern applications - may prefer to use the generative :meth:`~Update.where()` - method to specify the ``WHERE`` clause. - - The WHERE clause can refer to multiple tables. - For databases which support this, an ``UPDATE FROM`` clause will - be generated, or on MySQL, a multi-table update. The statement - will fail on databases that don't have support for multi-table - update statements. A SQL-standard method of referring to - additional tables in the WHERE clause is to use a correlated - subquery:: - - users.update().values(name='ed').where( - users.c.name==select([addresses.c.email_address]).\\ - where(addresses.c.user_id==users.c.id).\\ - as_scalar() - ) - - .. versionchanged:: 0.7.4 - The WHERE clause can refer to multiple tables. - - :param values: - Optional dictionary which specifies the ``SET`` conditions of the - ``UPDATE``. If left as ``None``, the ``SET`` - conditions are determined from those parameters passed to the - statement during the execution and/or compilation of the - statement. When compiled standalone without any parameters, - the ``SET`` clause generates for all columns. - - Modern applications may prefer to use the generative - :meth:`.Update.values` method to set the values of the - UPDATE statement. - - :param inline: - if True, SQL defaults present on :class:`.Column` objects via - the ``default`` keyword will be compiled 'inline' into the statement - and not pre-executed. This means that their values will not - be available in the dictionary returned from - :meth:`.ResultProxy.last_updated_params`. - - If both ``values`` and compile-time bind parameters are present, the - compile-time bind parameters override the information specified - within ``values`` on a per-key basis. - - The keys within ``values`` can be either :class:`.Column` - objects or their string identifiers (specifically the "key" of the - :class:`.Column`, normally but not necessarily equivalent to - its "name"). Normally, the - :class:`.Column` objects used here are expected to be - part of the target :class:`.Table` that is the table - to be updated. However when using MySQL, a multiple-table - UPDATE statement can refer to columns from any of - the tables referred to in the WHERE clause. - - The values referred to in ``values`` are typically: - - * a literal data value (i.e. string, number, etc.) - * a SQL expression, such as a related :class:`.Column`, - a scalar-returning :func:`.select` construct, - etc. - - When combining :func:`.select` constructs within the values - clause of an :func:`.update` construct, - the subquery represented by the :func:`.select` should be - *correlated* to the parent table, that is, providing criterion - which links the table inside the subquery to the outer table - being updated:: - - users.update().values( - name=select([addresses.c.email_address]).\\ - where(addresses.c.user_id==users.c.id).\\ - as_scalar() - ) - - .. seealso:: - - :ref:`inserts_and_updates` - SQL Expression - Language Tutorial - - - """ - ValuesBase.__init__(self, table, values, prefixes) - self._bind = bind - self._returning = returning - if whereclause is not None: - self._whereclause = _literal_as_text(whereclause) - else: - self._whereclause = None - self.inline = inline - self._validate_dialect_kwargs(dialect_kw) - self._return_defaults = return_defaults - - def get_children(self, **kwargs): - if self._whereclause is not None: - return self._whereclause, - else: - return () - - def _copy_internals(self, clone=_clone, **kw): - # TODO: coverage - self._whereclause = clone(self._whereclause, **kw) - self.parameters = self.parameters.copy() - - @_generative - def where(self, whereclause): - """return a new update() construct with the given expression added to - its WHERE clause, joined to the existing clause via AND, if any. - - """ - if self._whereclause is not None: - self._whereclause = and_(self._whereclause, - _literal_as_text(whereclause)) - else: - self._whereclause = _literal_as_text(whereclause) - - @property - def _extra_froms(self): - # TODO: this could be made memoized - # if the memoization is reset on each generative call. - froms = [] - seen = set([self.table]) - - if self._whereclause is not None: - for item in _from_objects(self._whereclause): - if not seen.intersection(item._cloned_set): - froms.append(item) - seen.update(item._cloned_set) - - return froms - - -class Delete(UpdateBase): - """Represent a DELETE construct. - - The :class:`.Delete` object is created using the :func:`delete()` - function. - - """ - - __visit_name__ = 'delete' - - def __init__(self, - table, - whereclause=None, - bind=None, - returning=None, - prefixes=None, - **dialect_kw): - """Construct :class:`.Delete` object. - - Similar functionality is available via the - :meth:`~.TableClause.delete` method on - :class:`~.schema.Table`. - - :param table: The table to delete rows from. - - :param whereclause: A :class:`.ClauseElement` describing the ``WHERE`` - condition of the ``DELETE`` statement. Note that the - :meth:`~Delete.where()` generative method may be used instead. - - .. seealso:: - - :ref:`deletes` - SQL Expression Tutorial - - """ - self._bind = bind - self.table = _interpret_as_from(table) - self._returning = returning - - if prefixes: - self._setup_prefixes(prefixes) - - if whereclause is not None: - self._whereclause = _literal_as_text(whereclause) - else: - self._whereclause = None - - self._validate_dialect_kwargs(dialect_kw) - - def get_children(self, **kwargs): - if self._whereclause is not None: - return self._whereclause, - else: - return () - - @_generative - def where(self, whereclause): - """Add the given WHERE clause to a newly returned delete construct.""" - - if self._whereclause is not None: - self._whereclause = and_(self._whereclause, - _literal_as_text(whereclause)) - else: - self._whereclause = _literal_as_text(whereclause) - - def _copy_internals(self, clone=_clone, **kw): - # TODO: coverage - self._whereclause = clone(self._whereclause, **kw) diff --git a/python/sqlalchemy/sql/elements.py b/python/sqlalchemy/sql/elements.py deleted file mode 100644 index a44c308e..00000000 --- a/python/sqlalchemy/sql/elements.py +++ /dev/null @@ -1,3952 +0,0 @@ -# sql/elements.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Core SQL expression elements, including :class:`.ClauseElement`, -:class:`.ColumnElement`, and derived classes. - -""" - -from __future__ import unicode_literals - -from .. import util, exc, inspection -from . import type_api -from . import operators -from .visitors import Visitable, cloned_traverse, traverse -from .annotation import Annotated -import itertools -from .base import Executable, PARSE_AUTOCOMMIT, Immutable, NO_ARG -from .base import _generative -import numbers - -import re -import operator - - -def _clone(element, **kw): - return element._clone() - - -def collate(expression, collation): - """Return the clause ``expression COLLATE collation``. - - e.g.:: - - collate(mycolumn, 'utf8_bin') - - produces:: - - mycolumn COLLATE utf8_bin - - """ - - expr = _literal_as_binds(expression) - return BinaryExpression( - expr, - _literal_as_text(collation), - operators.collate, type_=expr.type) - - -def between(expr, lower_bound, upper_bound, symmetric=False): - """Produce a ``BETWEEN`` predicate clause. - - E.g.:: - - from sqlalchemy import between - stmt = select([users_table]).where(between(users_table.c.id, 5, 7)) - - Would produce SQL resembling:: - - SELECT id, name FROM user WHERE id BETWEEN :id_1 AND :id_2 - - The :func:`.between` function is a standalone version of the - :meth:`.ColumnElement.between` method available on all - SQL expressions, as in:: - - stmt = select([users_table]).where(users_table.c.id.between(5, 7)) - - All arguments passed to :func:`.between`, including the left side - column expression, are coerced from Python scalar values if a - the value is not a :class:`.ColumnElement` subclass. For example, - three fixed values can be compared as in:: - - print(between(5, 3, 7)) - - Which would produce:: - - :param_1 BETWEEN :param_2 AND :param_3 - - :param expr: a column expression, typically a :class:`.ColumnElement` - instance or alternatively a Python scalar expression to be coerced - into a column expression, serving as the left side of the ``BETWEEN`` - expression. - - :param lower_bound: a column or Python scalar expression serving as the - lower bound of the right side of the ``BETWEEN`` expression. - - :param upper_bound: a column or Python scalar expression serving as the - upper bound of the right side of the ``BETWEEN`` expression. - - :param symmetric: if True, will render " BETWEEN SYMMETRIC ". Note - that not all databases support this syntax. - - .. versionadded:: 0.9.5 - - .. seealso:: - - :meth:`.ColumnElement.between` - - """ - expr = _literal_as_binds(expr) - return expr.between(lower_bound, upper_bound, symmetric=symmetric) - - -def literal(value, type_=None): - """Return a literal clause, bound to a bind parameter. - - Literal clauses are created automatically when non- - :class:`.ClauseElement` objects (such as strings, ints, dates, etc.) are - used in a comparison operation with a :class:`.ColumnElement` subclass, - such as a :class:`~sqlalchemy.schema.Column` object. Use this function - to force the generation of a literal clause, which will be created as a - :class:`BindParameter` with a bound value. - - :param value: the value to be bound. Can be any Python object supported by - the underlying DB-API, or is translatable via the given type argument. - - :param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` which - will provide bind-parameter translation for this literal. - - """ - return BindParameter(None, value, type_=type_, unique=True) - - -def type_coerce(expression, type_): - """Associate a SQL expression with a particular type, without rendering - ``CAST``. - - E.g.:: - - from sqlalchemy import type_coerce - - stmt = select([type_coerce(log_table.date_string, StringDateTime())]) - - The above construct will produce SQL that is usually otherwise unaffected - by the :func:`.type_coerce` call:: - - SELECT date_string FROM log - - However, when result rows are fetched, the ``StringDateTime`` type - will be applied to result rows on behalf of the ``date_string`` column. - - A type that features bound-value handling will also have that behavior - take effect when literal values or :func:`.bindparam` constructs are - passed to :func:`.type_coerce` as targets. - For example, if a type implements the :meth:`.TypeEngine.bind_expression` - method or :meth:`.TypeEngine.bind_processor` method or equivalent, - these functions will take effect at statement compilation/execution time - when a literal value is passed, as in:: - - # bound-value handling of MyStringType will be applied to the - # literal value "some string" - stmt = select([type_coerce("some string", MyStringType)]) - - :func:`.type_coerce` is similar to the :func:`.cast` function, - except that it does not render the ``CAST`` expression in the resulting - statement. - - :param expression: A SQL expression, such as a :class:`.ColumnElement` - expression or a Python string which will be coerced into a bound literal - value. - - :param type_: A :class:`.TypeEngine` class or instance indicating - the type to which the expression is coerced. - - .. seealso:: - - :func:`.cast` - - """ - type_ = type_api.to_instance(type_) - - if hasattr(expression, '__clause_element__'): - return type_coerce(expression.__clause_element__(), type_) - elif isinstance(expression, BindParameter): - bp = expression._clone() - bp.type = type_ - return bp - elif not isinstance(expression, Visitable): - if expression is None: - return Null() - else: - return literal(expression, type_=type_) - else: - return Label(None, expression, type_=type_) - - -def outparam(key, type_=None): - """Create an 'OUT' parameter for usage in functions (stored procedures), - for databases which support them. - - The ``outparam`` can be used like a regular function parameter. - The "output" value will be available from the - :class:`~sqlalchemy.engine.ResultProxy` object via its ``out_parameters`` - attribute, which returns a dictionary containing the values. - - """ - return BindParameter( - key, None, type_=type_, unique=False, isoutparam=True) - - -def not_(clause): - """Return a negation of the given clause, i.e. ``NOT(clause)``. - - The ``~`` operator is also overloaded on all - :class:`.ColumnElement` subclasses to produce the - same result. - - """ - return operators.inv(_literal_as_binds(clause)) - - -@inspection._self_inspects -class ClauseElement(Visitable): - """Base class for elements of a programmatically constructed SQL - expression. - - """ - __visit_name__ = 'clause' - - _annotations = {} - supports_execution = False - _from_objects = [] - bind = None - _is_clone_of = None - is_selectable = False - is_clause_element = True - - description = None - _order_by_label_element = None - _is_from_container = False - - def _clone(self): - """Create a shallow copy of this ClauseElement. - - This method may be used by a generative API. Its also used as - part of the "deep" copy afforded by a traversal that combines - the _copy_internals() method. - - """ - c = self.__class__.__new__(self.__class__) - c.__dict__ = self.__dict__.copy() - ClauseElement._cloned_set._reset(c) - ColumnElement.comparator._reset(c) - - # this is a marker that helps to "equate" clauses to each other - # when a Select returns its list of FROM clauses. the cloning - # process leaves around a lot of remnants of the previous clause - # typically in the form of column expressions still attached to the - # old table. - c._is_clone_of = self - - return c - - @property - def _constructor(self): - """return the 'constructor' for this ClauseElement. - - This is for the purposes for creating a new object of - this type. Usually, its just the element's __class__. - However, the "Annotated" version of the object overrides - to return the class of its proxied element. - - """ - return self.__class__ - - @util.memoized_property - def _cloned_set(self): - """Return the set consisting all cloned ancestors of this - ClauseElement. - - Includes this ClauseElement. This accessor tends to be used for - FromClause objects to identify 'equivalent' FROM clauses, regardless - of transformative operations. - - """ - s = util.column_set() - f = self - while f is not None: - s.add(f) - f = f._is_clone_of - return s - - def __getstate__(self): - d = self.__dict__.copy() - d.pop('_is_clone_of', None) - return d - - def _annotate(self, values): - """return a copy of this ClauseElement with annotations - updated by the given dictionary. - - """ - return Annotated(self, values) - - def _with_annotations(self, values): - """return a copy of this ClauseElement with annotations - replaced by the given dictionary. - - """ - return Annotated(self, values) - - def _deannotate(self, values=None, clone=False): - """return a copy of this :class:`.ClauseElement` with annotations - removed. - - :param values: optional tuple of individual values - to remove. - - """ - if clone: - # clone is used when we are also copying - # the expression for a deep deannotation - return self._clone() - else: - # if no clone, since we have no annotations we return - # self - return self - - def _execute_on_connection(self, connection, multiparams, params): - return connection._execute_clauseelement(self, multiparams, params) - - def unique_params(self, *optionaldict, **kwargs): - """Return a copy with :func:`bindparam()` elements replaced. - - Same functionality as ``params()``, except adds `unique=True` - to affected bind parameters so that multiple statements can be - used. - - """ - return self._params(True, optionaldict, kwargs) - - def params(self, *optionaldict, **kwargs): - """Return a copy with :func:`bindparam()` elements replaced. - - Returns a copy of this ClauseElement with :func:`bindparam()` - elements replaced with values taken from the given dictionary:: - - >>> clause = column('x') + bindparam('foo') - >>> print clause.compile().params - {'foo':None} - >>> print clause.params({'foo':7}).compile().params - {'foo':7} - - """ - return self._params(False, optionaldict, kwargs) - - def _params(self, unique, optionaldict, kwargs): - if len(optionaldict) == 1: - kwargs.update(optionaldict[0]) - elif len(optionaldict) > 1: - raise exc.ArgumentError( - "params() takes zero or one positional dictionary argument") - - def visit_bindparam(bind): - if bind.key in kwargs: - bind.value = kwargs[bind.key] - bind.required = False - if unique: - bind._convert_to_unique() - return cloned_traverse(self, {}, {'bindparam': visit_bindparam}) - - def compare(self, other, **kw): - """Compare this ClauseElement to the given ClauseElement. - - Subclasses should override the default behavior, which is a - straight identity comparison. - - \**kw are arguments consumed by subclass compare() methods and - may be used to modify the criteria for comparison. - (see :class:`.ColumnElement`) - - """ - return self is other - - def _copy_internals(self, clone=_clone, **kw): - """Reassign internal elements to be clones of themselves. - - Called during a copy-and-traverse operation on newly - shallow-copied elements to create a deep copy. - - The given clone function should be used, which may be applying - additional transformations to the element (i.e. replacement - traversal, cloned traversal, annotations). - - """ - pass - - def get_children(self, **kwargs): - """Return immediate child elements of this :class:`.ClauseElement`. - - This is used for visit traversal. - - \**kwargs may contain flags that change the collection that is - returned, for example to return a subset of items in order to - cut down on larger traversals, or to return child items from a - different context (such as schema-level collections instead of - clause-level). - - """ - return [] - - def self_group(self, against=None): - """Apply a 'grouping' to this :class:`.ClauseElement`. - - This method is overridden by subclasses to return a - "grouping" construct, i.e. parenthesis. In particular - it's used by "binary" expressions to provide a grouping - around themselves when placed into a larger expression, - as well as by :func:`.select` constructs when placed into - the FROM clause of another :func:`.select`. (Note that - subqueries should be normally created using the - :meth:`.Select.alias` method, as many platforms require - nested SELECT statements to be named). - - As expressions are composed together, the application of - :meth:`self_group` is automatic - end-user code should never - need to use this method directly. Note that SQLAlchemy's - clause constructs take operator precedence into account - - so parenthesis might not be needed, for example, in - an expression like ``x OR (y AND z)`` - AND takes precedence - over OR. - - The base :meth:`self_group` method of :class:`.ClauseElement` - just returns self. - """ - return self - - @util.dependencies("sqlalchemy.engine.default") - def compile(self, default, bind=None, dialect=None, **kw): - """Compile this SQL expression. - - The return value is a :class:`~.Compiled` object. - Calling ``str()`` or ``unicode()`` on the returned value will yield a - string representation of the result. The - :class:`~.Compiled` object also can return a - dictionary of bind parameter names and values - using the ``params`` accessor. - - :param bind: An ``Engine`` or ``Connection`` from which a - ``Compiled`` will be acquired. This argument takes precedence over - this :class:`.ClauseElement`'s bound engine, if any. - - :param column_keys: Used for INSERT and UPDATE statements, a list of - column names which should be present in the VALUES clause of the - compiled statement. If ``None``, all columns from the target table - object are rendered. - - :param dialect: A ``Dialect`` instance from which a ``Compiled`` - will be acquired. This argument takes precedence over the `bind` - argument as well as this :class:`.ClauseElement`'s bound engine, - if any. - - :param inline: Used for INSERT statements, for a dialect which does - not support inline retrieval of newly generated primary key - columns, will force the expression used to create the new primary - key value to be rendered inline within the INSERT statement's - VALUES clause. This typically refers to Sequence execution but may - also refer to any server-side default generation function - associated with a primary key `Column`. - - :param compile_kwargs: optional dictionary of additional parameters - that will be passed through to the compiler within all "visit" - methods. This allows any custom flag to be passed through to - a custom compilation construct, for example. It is also used - for the case of passing the ``literal_binds`` flag through:: - - from sqlalchemy.sql import table, column, select - - t = table('t', column('x')) - - s = select([t]).where(t.c.x == 5) - - print s.compile(compile_kwargs={"literal_binds": True}) - - .. versionadded:: 0.9.0 - - .. seealso:: - - :ref:`faq_sql_expression_string` - - """ - - if not dialect: - if bind: - dialect = bind.dialect - elif self.bind: - dialect = self.bind.dialect - bind = self.bind - else: - dialect = default.DefaultDialect() - return self._compiler(dialect, bind=bind, **kw) - - def _compiler(self, dialect, **kw): - """Return a compiler appropriate for this ClauseElement, given a - Dialect.""" - - return dialect.statement_compiler(dialect, self, **kw) - - def __str__(self): - if util.py3k: - return str(self.compile()) - else: - return unicode(self.compile()).encode('ascii', 'backslashreplace') - - def __and__(self, other): - """'and' at the ClauseElement level. - - .. deprecated:: 0.9.5 - conjunctions are intended to be - at the :class:`.ColumnElement`. level - - """ - return and_(self, other) - - def __or__(self, other): - """'or' at the ClauseElement level. - - .. deprecated:: 0.9.5 - conjunctions are intended to be - at the :class:`.ColumnElement`. level - - """ - return or_(self, other) - - def __invert__(self): - if hasattr(self, 'negation_clause'): - return self.negation_clause - else: - return self._negate() - - def _negate(self): - return UnaryExpression( - self.self_group(against=operators.inv), - operator=operators.inv, - negate=None) - - def __bool__(self): - raise TypeError("Boolean value of this clause is not defined") - - __nonzero__ = __bool__ - - def __repr__(self): - friendly = self.description - if friendly is None: - return object.__repr__(self) - else: - return '<%s.%s at 0x%x; %s>' % ( - self.__module__, self.__class__.__name__, id(self), friendly) - - -class ColumnElement(operators.ColumnOperators, ClauseElement): - """Represent a column-oriented SQL expression suitable for usage in the - "columns" clause, WHERE clause etc. of a statement. - - While the most familiar kind of :class:`.ColumnElement` is the - :class:`.Column` object, :class:`.ColumnElement` serves as the basis - for any unit that may be present in a SQL expression, including - the expressions themselves, SQL functions, bound parameters, - literal expressions, keywords such as ``NULL``, etc. - :class:`.ColumnElement` is the ultimate base class for all such elements. - - A wide variety of SQLAlchemy Core functions work at the SQL expression - level, and are intended to accept instances of :class:`.ColumnElement` as - arguments. These functions will typically document that they accept a - "SQL expression" as an argument. What this means in terms of SQLAlchemy - usually refers to an input which is either already in the form of a - :class:`.ColumnElement` object, or a value which can be **coerced** into - one. The coercion rules followed by most, but not all, SQLAlchemy Core - functions with regards to SQL expressions are as follows: - - * a literal Python value, such as a string, integer or floating - point value, boolean, datetime, ``Decimal`` object, or virtually - any other Python object, will be coerced into a "literal bound - value". This generally means that a :func:`.bindparam` will be - produced featuring the given value embedded into the construct; the - resulting :class:`.BindParameter` object is an instance of - :class:`.ColumnElement`. The Python value will ultimately be sent - to the DBAPI at execution time as a paramterized argument to the - ``execute()`` or ``executemany()`` methods, after SQLAlchemy - type-specific converters (e.g. those provided by any associated - :class:`.TypeEngine` objects) are applied to the value. - - * any special object value, typically ORM-level constructs, which - feature a method called ``__clause_element__()``. The Core - expression system looks for this method when an object of otherwise - unknown type is passed to a function that is looking to coerce the - argument into a :class:`.ColumnElement` expression. The - ``__clause_element__()`` method, if present, should return a - :class:`.ColumnElement` instance. The primary use of - ``__clause_element__()`` within SQLAlchemy is that of class-bound - attributes on ORM-mapped classes; a ``User`` class which contains a - mapped attribute named ``.name`` will have a method - ``User.name.__clause_element__()`` which when invoked returns the - :class:`.Column` called ``name`` associated with the mapped table. - - * The Python ``None`` value is typically interpreted as ``NULL``, - which in SQLAlchemy Core produces an instance of :func:`.null`. - - A :class:`.ColumnElement` provides the ability to generate new - :class:`.ColumnElement` - objects using Python expressions. This means that Python operators - such as ``==``, ``!=`` and ``<`` are overloaded to mimic SQL operations, - and allow the instantiation of further :class:`.ColumnElement` instances - which are composed from other, more fundamental :class:`.ColumnElement` - objects. For example, two :class:`.ColumnClause` objects can be added - together with the addition operator ``+`` to produce - a :class:`.BinaryExpression`. - Both :class:`.ColumnClause` and :class:`.BinaryExpression` are subclasses - of :class:`.ColumnElement`:: - - >>> from sqlalchemy.sql import column - >>> column('a') + column('b') - - >>> print column('a') + column('b') - a + b - - .. seealso:: - - :class:`.Column` - - :func:`.expression.column` - - """ - - __visit_name__ = 'column' - primary_key = False - foreign_keys = [] - - _label = None - """The named label that can be used to target - this column in a result set. - - This label is almost always the label used when - rendering AS AS "; typically columns that don't have - any parent table and are named the same as what the label would be - in any case. - - """ - - _resolve_label = None - """The name that should be used to identify this ColumnElement in a - select() object when "label resolution" logic is used; this refers - to using a string name in an expression like order_by() or group_by() - that wishes to target a labeled expression in the columns clause. - - The name is distinct from that of .name or ._label to account for the case - where anonymizing logic may be used to change the name that's actually - rendered at compile time; this attribute should hold onto the original - name that was user-assigned when producing a .label() construct. - - """ - - _allow_label_resolve = True - """A flag that can be flipped to prevent a column from being resolvable - by string label name.""" - - _alt_names = () - - def self_group(self, against=None): - if (against in (operators.and_, operators.or_, operators._asbool) and - self.type._type_affinity - is type_api.BOOLEANTYPE._type_affinity): - return AsBoolean(self, operators.istrue, operators.isfalse) - else: - return self - - def _negate(self): - if self.type._type_affinity is type_api.BOOLEANTYPE._type_affinity: - return AsBoolean(self, operators.isfalse, operators.istrue) - else: - return super(ColumnElement, self)._negate() - - @util.memoized_property - def type(self): - return type_api.NULLTYPE - - @util.memoized_property - def comparator(self): - try: - comparator_factory = self.type.comparator_factory - except AttributeError: - raise TypeError( - "Object %r associated with '.type' attribute " - "is not a TypeEngine class or object" % self.type) - else: - return comparator_factory(self) - - def __getattr__(self, key): - try: - return getattr(self.comparator, key) - except AttributeError: - raise AttributeError( - 'Neither %r object nor %r object has an attribute %r' % ( - type(self).__name__, - type(self.comparator).__name__, - key) - ) - - def operate(self, op, *other, **kwargs): - return op(self.comparator, *other, **kwargs) - - def reverse_operate(self, op, other, **kwargs): - return op(other, self.comparator, **kwargs) - - def _bind_param(self, operator, obj): - return BindParameter(None, obj, - _compared_to_operator=operator, - _compared_to_type=self.type, unique=True) - - @property - def expression(self): - """Return a column expression. - - Part of the inspection interface; returns self. - - """ - return self - - @property - def _select_iterable(self): - return (self, ) - - @util.memoized_property - def base_columns(self): - return util.column_set(c for c in self.proxy_set - if not hasattr(c, '_proxies')) - - @util.memoized_property - def proxy_set(self): - s = util.column_set([self]) - if hasattr(self, '_proxies'): - for c in self._proxies: - s.update(c.proxy_set) - return s - - def shares_lineage(self, othercolumn): - """Return True if the given :class:`.ColumnElement` - has a common ancestor to this :class:`.ColumnElement`.""" - - return bool(self.proxy_set.intersection(othercolumn.proxy_set)) - - def _compare_name_for_result(self, other): - """Return True if the given column element compares to this one - when targeting within a result row.""" - - return hasattr(other, 'name') and hasattr(self, 'name') and \ - other.name == self.name - - def _make_proxy( - self, selectable, name=None, name_is_truncatable=False, **kw): - """Create a new :class:`.ColumnElement` representing this - :class:`.ColumnElement` as it appears in the select list of a - descending selectable. - - """ - if name is None: - name = self.anon_label - if self.key: - key = self.key - else: - try: - key = str(self) - except exc.UnsupportedCompilationError: - key = self.anon_label - - else: - key = name - co = ColumnClause( - _as_truncated(name) if name_is_truncatable else name, - type_=getattr(self, 'type', None), - _selectable=selectable - ) - co._proxies = [self] - if selectable._is_clone_of is not None: - co._is_clone_of = \ - selectable._is_clone_of.columns.get(key) - selectable._columns[key] = co - return co - - def compare(self, other, use_proxies=False, equivalents=None, **kw): - """Compare this ColumnElement to another. - - Special arguments understood: - - :param use_proxies: when True, consider two columns that - share a common base column as equivalent (i.e. shares_lineage()) - - :param equivalents: a dictionary of columns as keys mapped to sets - of columns. If the given "other" column is present in this - dictionary, if any of the columns in the corresponding set() pass - the comparison test, the result is True. This is used to expand the - comparison to other columns that may be known to be equivalent to - this one via foreign key or other criterion. - - """ - to_compare = (other, ) - if equivalents and other in equivalents: - to_compare = equivalents[other].union(to_compare) - - for oth in to_compare: - if use_proxies and self.shares_lineage(oth): - return True - elif hash(oth) == hash(self): - return True - else: - return False - - def cast(self, type_): - """Produce a type cast, i.e. ``CAST( AS )``. - - This is a shortcut to the :func:`~.expression.cast` function. - - .. versionadded:: 1.0.7 - - """ - return Cast(self, type_) - - def label(self, name): - """Produce a column label, i.e. `` AS ``. - - This is a shortcut to the :func:`~.expression.label` function. - - if 'name' is None, an anonymous label name will be generated. - - """ - return Label(name, self, self.type) - - @util.memoized_property - def anon_label(self): - """provides a constant 'anonymous label' for this ColumnElement. - - This is a label() expression which will be named at compile time. - The same label() is returned each time anon_label is called so - that expressions can reference anon_label multiple times, producing - the same label name at compile time. - - the compiler uses this function automatically at compile time - for expressions that are known to be 'unnamed' like binary - expressions and function calls. - - """ - while self._is_clone_of is not None: - self = self._is_clone_of - - return _anonymous_label( - '%%(%d %s)s' % (id(self), getattr(self, 'name', 'anon')) - ) - - -class BindParameter(ColumnElement): - """Represent a "bound expression". - - :class:`.BindParameter` is invoked explicitly using the - :func:`.bindparam` function, as in:: - - from sqlalchemy import bindparam - - stmt = select([users_table]).\\ - where(users_table.c.name == bindparam('username')) - - Detailed discussion of how :class:`.BindParameter` is used is - at :func:`.bindparam`. - - .. seealso:: - - :func:`.bindparam` - - """ - - __visit_name__ = 'bindparam' - - _is_crud = False - - def __init__(self, key, value=NO_ARG, type_=None, - unique=False, required=NO_ARG, - quote=None, callable_=None, - isoutparam=False, - _compared_to_operator=None, - _compared_to_type=None): - """Produce a "bound expression". - - The return value is an instance of :class:`.BindParameter`; this - is a :class:`.ColumnElement` subclass which represents a so-called - "placeholder" value in a SQL expression, the value of which is - supplied at the point at which the statement in executed against a - database connection. - - In SQLAlchemy, the :func:`.bindparam` construct has - the ability to carry along the actual value that will be ultimately - used at expression time. In this way, it serves not just as - a "placeholder" for eventual population, but also as a means of - representing so-called "unsafe" values which should not be rendered - directly in a SQL statement, but rather should be passed along - to the :term:`DBAPI` as values which need to be correctly escaped - and potentially handled for type-safety. - - When using :func:`.bindparam` explicitly, the use case is typically - one of traditional deferment of parameters; the :func:`.bindparam` - construct accepts a name which can then be referred to at execution - time:: - - from sqlalchemy import bindparam - - stmt = select([users_table]).\\ - where(users_table.c.name == bindparam('username')) - - The above statement, when rendered, will produce SQL similar to:: - - SELECT id, name FROM user WHERE name = :username - - In order to populate the value of ``:username`` above, the value - would typically be applied at execution time to a method - like :meth:`.Connection.execute`:: - - result = connection.execute(stmt, username='wendy') - - Explicit use of :func:`.bindparam` is also common when producing - UPDATE or DELETE statements that are to be invoked multiple times, - where the WHERE criterion of the statement is to change on each - invocation, such as:: - - stmt = (users_table.update(). - where(user_table.c.name == bindparam('username')). - values(fullname=bindparam('fullname')) - ) - - connection.execute( - stmt, [{"username": "wendy", "fullname": "Wendy Smith"}, - {"username": "jack", "fullname": "Jack Jones"}, - ] - ) - - SQLAlchemy's Core expression system makes wide use of - :func:`.bindparam` in an implicit sense. It is typical that Python - literal values passed to virtually all SQL expression functions are - coerced into fixed :func:`.bindparam` constructs. For example, given - a comparison operation such as:: - - expr = users_table.c.name == 'Wendy' - - The above expression will produce a :class:`.BinaryExpression` - construct, where the left side is the :class:`.Column` object - representing the ``name`` column, and the right side is a - :class:`.BindParameter` representing the literal value:: - - print(repr(expr.right)) - BindParameter('%(4327771088 name)s', 'Wendy', type_=String()) - - The expression above will render SQL such as:: - - user.name = :name_1 - - Where the ``:name_1`` parameter name is an anonymous name. The - actual string ``Wendy`` is not in the rendered string, but is carried - along where it is later used within statement execution. If we - invoke a statement like the following:: - - stmt = select([users_table]).where(users_table.c.name == 'Wendy') - result = connection.execute(stmt) - - We would see SQL logging output as:: - - SELECT "user".id, "user".name - FROM "user" - WHERE "user".name = %(name_1)s - {'name_1': 'Wendy'} - - Above, we see that ``Wendy`` is passed as a parameter to the database, - while the placeholder ``:name_1`` is rendered in the appropriate form - for the target database, in this case the Postgresql database. - - Similarly, :func:`.bindparam` is invoked automatically - when working with :term:`CRUD` statements as far as the "VALUES" - portion is concerned. The :func:`.insert` construct produces an - ``INSERT`` expression which will, at statement execution time, - generate bound placeholders based on the arguments passed, as in:: - - stmt = users_table.insert() - result = connection.execute(stmt, name='Wendy') - - The above will produce SQL output as:: - - INSERT INTO "user" (name) VALUES (%(name)s) - {'name': 'Wendy'} - - The :class:`.Insert` construct, at compilation/execution time, - rendered a single :func:`.bindparam` mirroring the column - name ``name`` as a result of the single ``name`` parameter - we passed to the :meth:`.Connection.execute` method. - - :param key: - the key (e.g. the name) for this bind param. - Will be used in the generated - SQL statement for dialects that use named parameters. This - value may be modified when part of a compilation operation, - if other :class:`BindParameter` objects exist with the same - key, or if its length is too long and truncation is - required. - - :param value: - Initial value for this bind param. Will be used at statement - execution time as the value for this parameter passed to the - DBAPI, if no other value is indicated to the statement execution - method for this particular parameter name. Defaults to ``None``. - - :param callable\_: - A callable function that takes the place of "value". The function - will be called at statement execution time to determine the - ultimate value. Used for scenarios where the actual bind - value cannot be determined at the point at which the clause - construct is created, but embedded bind values are still desirable. - - :param type\_: - A :class:`.TypeEngine` class or instance representing an optional - datatype for this :func:`.bindparam`. If not passed, a type - may be determined automatically for the bind, based on the given - value; for example, trivial Python types such as ``str``, - ``int``, ``bool`` - may result in the :class:`.String`, :class:`.Integer` or - :class:`.Boolean` types being autoamtically selected. - - The type of a :func:`.bindparam` is significant especially in that - the type will apply pre-processing to the value before it is - passed to the database. For example, a :func:`.bindparam` which - refers to a datetime value, and is specified as holding the - :class:`.DateTime` type, may apply conversion needed to the - value (such as stringification on SQLite) before passing the value - to the database. - - :param unique: - if True, the key name of this :class:`.BindParameter` will be - modified if another :class:`.BindParameter` of the same name - already has been located within the containing - expression. This flag is used generally by the internals - when producing so-called "anonymous" bound expressions, it - isn't generally applicable to explicitly-named :func:`.bindparam` - constructs. - - :param required: - If ``True``, a value is required at execution time. If not passed, - it defaults to ``True`` if neither :paramref:`.bindparam.value` - or :paramref:`.bindparam.callable` were passed. If either of these - parameters are present, then :paramref:`.bindparam.required` - defaults to ``False``. - - .. versionchanged:: 0.8 If the ``required`` flag is not specified, - it will be set automatically to ``True`` or ``False`` depending - on whether or not the ``value`` or ``callable`` parameters - were specified. - - :param quote: - True if this parameter name requires quoting and is not - currently known as a SQLAlchemy reserved word; this currently - only applies to the Oracle backend, where bound names must - sometimes be quoted. - - :param isoutparam: - if True, the parameter should be treated like a stored procedure - "OUT" parameter. This applies to backends such as Oracle which - support OUT parameters. - - .. seealso:: - - :ref:`coretutorial_bind_param` - - :ref:`coretutorial_insert_expressions` - - :func:`.outparam` - - """ - if isinstance(key, ColumnClause): - type_ = key.type - key = key.key - if required is NO_ARG: - required = (value is NO_ARG and callable_ is None) - if value is NO_ARG: - value = None - - if quote is not None: - key = quoted_name(key, quote) - - if unique: - self.key = _anonymous_label('%%(%d %s)s' % (id(self), key - or 'param')) - else: - self.key = key or _anonymous_label('%%(%d param)s' - % id(self)) - - # identifying key that won't change across - # clones, used to identify the bind's logical - # identity - self._identifying_key = self.key - - # key that was passed in the first place, used to - # generate new keys - self._orig_key = key or 'param' - - self.unique = unique - self.value = value - self.callable = callable_ - self.isoutparam = isoutparam - self.required = required - if type_ is None: - if _compared_to_type is not None: - self.type = \ - _compared_to_type.coerce_compared_value( - _compared_to_operator, value) - else: - self.type = type_api._type_map.get(type(value), - type_api.NULLTYPE) - elif isinstance(type_, type): - self.type = type_() - else: - self.type = type_ - - def _with_value(self, value): - """Return a copy of this :class:`.BindParameter` with the given value - set. - """ - cloned = self._clone() - cloned.value = value - cloned.callable = None - cloned.required = False - if cloned.type is type_api.NULLTYPE: - cloned.type = type_api._type_map.get(type(value), - type_api.NULLTYPE) - return cloned - - @property - def effective_value(self): - """Return the value of this bound parameter, - taking into account if the ``callable`` parameter - was set. - - The ``callable`` value will be evaluated - and returned if present, else ``value``. - - """ - if self.callable: - return self.callable() - else: - return self.value - - def _clone(self): - c = ClauseElement._clone(self) - if self.unique: - c.key = _anonymous_label('%%(%d %s)s' % (id(c), c._orig_key - or 'param')) - return c - - def _convert_to_unique(self): - if not self.unique: - self.unique = True - self.key = _anonymous_label( - '%%(%d %s)s' % (id(self), self._orig_key or 'param')) - - def compare(self, other, **kw): - """Compare this :class:`BindParameter` to the given - clause.""" - - return isinstance(other, BindParameter) \ - and self.type._compare_type_affinity(other.type) \ - and self.value == other.value - - def __getstate__(self): - """execute a deferred value for serialization purposes.""" - - d = self.__dict__.copy() - v = self.value - if self.callable: - v = self.callable() - d['callable'] = None - d['value'] = v - return d - - def __repr__(self): - return 'BindParameter(%r, %r, type_=%r)' % (self.key, - self.value, self.type) - - -class TypeClause(ClauseElement): - """Handle a type keyword in a SQL statement. - - Used by the ``Case`` statement. - - """ - - __visit_name__ = 'typeclause' - - def __init__(self, type): - self.type = type - - -class TextClause(Executable, ClauseElement): - """Represent a literal SQL text fragment. - - E.g.:: - - from sqlalchemy import text - - t = text("SELECT * FROM users") - result = connection.execute(t) - - - The :class:`.Text` construct is produced using the :func:`.text` - function; see that function for full documentation. - - .. seealso:: - - :func:`.text` - - """ - - __visit_name__ = 'textclause' - - _bind_params_regex = re.compile(r'(?`` - to specify bind parameters; they will be compiled to their - engine-specific format. - - :param autocommit: - Deprecated. Use .execution_options(autocommit=) - to set the autocommit option. - - :param bind: - an optional connection or engine to be used for this text query. - - :param bindparams: - Deprecated. A list of :func:`.bindparam` instances used to - provide information about parameters embedded in the statement. - This argument now invokes the :meth:`.TextClause.bindparams` - method on the construct before returning it. E.g.:: - - stmt = text("SELECT * FROM table WHERE id=:id", - bindparams=[bindparam('id', value=5, type_=Integer)]) - - Is equivalent to:: - - stmt = text("SELECT * FROM table WHERE id=:id").\\ - bindparams(bindparam('id', value=5, type_=Integer)) - - .. deprecated:: 0.9.0 the :meth:`.TextClause.bindparams` method - supersedes the ``bindparams`` argument to :func:`.text`. - - :param typemap: - Deprecated. A dictionary mapping the names of columns - represented in the columns clause of a ``SELECT`` statement - to type objects, - which will be used to perform post-processing on columns within - the result set. This parameter now invokes the - :meth:`.TextClause.columns` method, which returns a - :class:`.TextAsFrom` construct that gains a ``.c`` collection and - can be embedded in other expressions. E.g.:: - - stmt = text("SELECT * FROM table", - typemap={'id': Integer, 'name': String}, - ) - - Is equivalent to:: - - stmt = text("SELECT * FROM table").columns(id=Integer, - name=String) - - Or alternatively:: - - from sqlalchemy.sql import column - stmt = text("SELECT * FROM table").columns( - column('id', Integer), - column('name', String) - ) - - .. deprecated:: 0.9.0 the :meth:`.TextClause.columns` method - supersedes the ``typemap`` argument to :func:`.text`. - - """ - stmt = TextClause(text, bind=bind) - if bindparams: - stmt = stmt.bindparams(*bindparams) - if typemap: - stmt = stmt.columns(**typemap) - if autocommit is not None: - util.warn_deprecated('autocommit on text() is deprecated. ' - 'Use .execution_options(autocommit=True)') - stmt = stmt.execution_options(autocommit=autocommit) - - return stmt - - @_generative - def bindparams(self, *binds, **names_to_values): - """Establish the values and/or types of bound parameters within - this :class:`.TextClause` construct. - - Given a text construct such as:: - - from sqlalchemy import text - stmt = text("SELECT id, name FROM user WHERE name=:name " - "AND timestamp=:timestamp") - - the :meth:`.TextClause.bindparams` method can be used to establish - the initial value of ``:name`` and ``:timestamp``, - using simple keyword arguments:: - - stmt = stmt.bindparams(name='jack', - timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5)) - - Where above, new :class:`.BindParameter` objects - will be generated with the names ``name`` and ``timestamp``, and - values of ``jack`` and ``datetime.datetime(2012, 10, 8, 15, 12, 5)``, - respectively. The types will be - inferred from the values given, in this case :class:`.String` and - :class:`.DateTime`. - - When specific typing behavior is needed, the positional ``*binds`` - argument can be used in which to specify :func:`.bindparam` constructs - directly. These constructs must include at least the ``key`` - argument, then an optional value and type:: - - from sqlalchemy import bindparam - stmt = stmt.bindparams( - bindparam('name', value='jack', type_=String), - bindparam('timestamp', type_=DateTime) - ) - - Above, we specified the type of :class:`.DateTime` for the - ``timestamp`` bind, and the type of :class:`.String` for the ``name`` - bind. In the case of ``name`` we also set the default value of - ``"jack"``. - - Additional bound parameters can be supplied at statement execution - time, e.g.:: - - result = connection.execute(stmt, - timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5)) - - The :meth:`.TextClause.bindparams` method can be called repeatedly, - where it will re-use existing :class:`.BindParameter` objects to add - new information. For example, we can call - :meth:`.TextClause.bindparams` first with typing information, and a - second time with value information, and it will be combined:: - - stmt = text("SELECT id, name FROM user WHERE name=:name " - "AND timestamp=:timestamp") - stmt = stmt.bindparams( - bindparam('name', type_=String), - bindparam('timestamp', type_=DateTime) - ) - stmt = stmt.bindparams( - name='jack', - timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5) - ) - - - .. versionadded:: 0.9.0 The :meth:`.TextClause.bindparams` method - supersedes the argument ``bindparams`` passed to - :func:`~.expression.text`. - - - """ - self._bindparams = new_params = self._bindparams.copy() - - for bind in binds: - try: - existing = new_params[bind.key] - except KeyError: - raise exc.ArgumentError( - "This text() construct doesn't define a " - "bound parameter named %r" % bind.key) - else: - new_params[existing.key] = bind - - for key, value in names_to_values.items(): - try: - existing = new_params[key] - except KeyError: - raise exc.ArgumentError( - "This text() construct doesn't define a " - "bound parameter named %r" % key) - else: - new_params[key] = existing._with_value(value) - - @util.dependencies('sqlalchemy.sql.selectable') - def columns(self, selectable, *cols, **types): - """Turn this :class:`.TextClause` object into a :class:`.TextAsFrom` - object that can be embedded into another statement. - - This function essentially bridges the gap between an entirely - textual SELECT statement and the SQL expression language concept - of a "selectable":: - - from sqlalchemy.sql import column, text - - stmt = text("SELECT id, name FROM some_table") - stmt = stmt.columns(column('id'), column('name')).alias('st') - - stmt = select([mytable]).\\ - select_from( - mytable.join(stmt, mytable.c.name == stmt.c.name) - ).where(stmt.c.id > 5) - - Above, we used untyped :func:`.column` elements. These can also have - types specified, which will impact how the column behaves in - expressions as well as determining result set behavior:: - - stmt = text("SELECT id, name, timestamp FROM some_table") - stmt = stmt.columns( - column('id', Integer), - column('name', Unicode), - column('timestamp', DateTime) - ) - - for id, name, timestamp in connection.execute(stmt): - print(id, name, timestamp) - - Keyword arguments allow just the names and types of columns to be - specified, where the :func:`.column` elements will be generated - automatically:: - - stmt = text("SELECT id, name, timestamp FROM some_table") - stmt = stmt.columns( - id=Integer, - name=Unicode, - timestamp=DateTime - ) - - for id, name, timestamp in connection.execute(stmt): - print(id, name, timestamp) - - The :meth:`.TextClause.columns` method provides a direct - route to calling :meth:`.FromClause.alias` as well as - :meth:`.SelectBase.cte` against a textual SELECT statement:: - - stmt = stmt.columns(id=Integer, name=String).cte('st') - - stmt = select([sometable]).where(sometable.c.id == stmt.c.id) - - .. versionadded:: 0.9.0 :func:`.text` can now be converted into a - fully featured "selectable" construct using the - :meth:`.TextClause.columns` method. This method supersedes the - ``typemap`` argument to :func:`.text`. - - """ - - input_cols = [ - ColumnClause(col.key, types.pop(col.key)) - if col.key in types - else col - for col in cols - ] + [ColumnClause(key, type_) for key, type_ in types.items()] - return selectable.TextAsFrom(self, input_cols) - - @property - def type(self): - return type_api.NULLTYPE - - @property - def comparator(self): - return self.type.comparator_factory(self) - - def self_group(self, against=None): - if against is operators.in_op: - return Grouping(self) - else: - return self - - def _copy_internals(self, clone=_clone, **kw): - self._bindparams = dict((b.key, clone(b, **kw)) - for b in self._bindparams.values()) - - def get_children(self, **kwargs): - return list(self._bindparams.values()) - - def compare(self, other): - return isinstance(other, TextClause) and other.text == self.text - - -class Null(ColumnElement): - """Represent the NULL keyword in a SQL statement. - - :class:`.Null` is accessed as a constant via the - :func:`.null` function. - - """ - - __visit_name__ = 'null' - - @util.memoized_property - def type(self): - return type_api.NULLTYPE - - @classmethod - def _instance(cls): - """Return a constant :class:`.Null` construct.""" - - return Null() - - def compare(self, other): - return isinstance(other, Null) - - -class False_(ColumnElement): - """Represent the ``false`` keyword, or equivalent, in a SQL statement. - - :class:`.False_` is accessed as a constant via the - :func:`.false` function. - - """ - - __visit_name__ = 'false' - - @util.memoized_property - def type(self): - return type_api.BOOLEANTYPE - - def _negate(self): - return True_() - - @classmethod - def _instance(cls): - """Return a :class:`.False_` construct. - - E.g.:: - - >>> from sqlalchemy import false - >>> print select([t.c.x]).where(false()) - SELECT x FROM t WHERE false - - A backend which does not support true/false constants will render as - an expression against 1 or 0:: - - >>> print select([t.c.x]).where(false()) - SELECT x FROM t WHERE 0 = 1 - - The :func:`.true` and :func:`.false` constants also feature - "short circuit" operation within an :func:`.and_` or :func:`.or_` - conjunction:: - - >>> print select([t.c.x]).where(or_(t.c.x > 5, true())) - SELECT x FROM t WHERE true - - >>> print select([t.c.x]).where(and_(t.c.x > 5, false())) - SELECT x FROM t WHERE false - - .. versionchanged:: 0.9 :func:`.true` and :func:`.false` feature - better integrated behavior within conjunctions and on dialects - that don't support true/false constants. - - .. seealso:: - - :func:`.true` - - """ - - return False_() - - def compare(self, other): - return isinstance(other, False_) - - -class True_(ColumnElement): - """Represent the ``true`` keyword, or equivalent, in a SQL statement. - - :class:`.True_` is accessed as a constant via the - :func:`.true` function. - - """ - - __visit_name__ = 'true' - - @util.memoized_property - def type(self): - return type_api.BOOLEANTYPE - - def _negate(self): - return False_() - - @classmethod - def _ifnone(cls, other): - if other is None: - return cls._instance() - else: - return other - - @classmethod - def _instance(cls): - """Return a constant :class:`.True_` construct. - - E.g.:: - - >>> from sqlalchemy import true - >>> print select([t.c.x]).where(true()) - SELECT x FROM t WHERE true - - A backend which does not support true/false constants will render as - an expression against 1 or 0:: - - >>> print select([t.c.x]).where(true()) - SELECT x FROM t WHERE 1 = 1 - - The :func:`.true` and :func:`.false` constants also feature - "short circuit" operation within an :func:`.and_` or :func:`.or_` - conjunction:: - - >>> print select([t.c.x]).where(or_(t.c.x > 5, true())) - SELECT x FROM t WHERE true - - >>> print select([t.c.x]).where(and_(t.c.x > 5, false())) - SELECT x FROM t WHERE false - - .. versionchanged:: 0.9 :func:`.true` and :func:`.false` feature - better integrated behavior within conjunctions and on dialects - that don't support true/false constants. - - .. seealso:: - - :func:`.false` - - """ - - return True_() - - def compare(self, other): - return isinstance(other, True_) - - -class ClauseList(ClauseElement): - """Describe a list of clauses, separated by an operator. - - By default, is comma-separated, such as a column listing. - - """ - __visit_name__ = 'clauselist' - - def __init__(self, *clauses, **kwargs): - self.operator = kwargs.pop('operator', operators.comma_op) - self.group = kwargs.pop('group', True) - self.group_contents = kwargs.pop('group_contents', True) - text_converter = kwargs.pop( - '_literal_as_text', - _expression_literal_as_text) - if self.group_contents: - self.clauses = [ - text_converter(clause).self_group(against=self.operator) - for clause in clauses] - else: - self.clauses = [ - text_converter(clause) - for clause in clauses] - - def __iter__(self): - return iter(self.clauses) - - def __len__(self): - return len(self.clauses) - - @property - def _select_iterable(self): - return iter(self) - - def append(self, clause): - if self.group_contents: - self.clauses.append(_literal_as_text(clause). - self_group(against=self.operator)) - else: - self.clauses.append(_literal_as_text(clause)) - - def _copy_internals(self, clone=_clone, **kw): - self.clauses = [clone(clause, **kw) for clause in self.clauses] - - def get_children(self, **kwargs): - return self.clauses - - @property - def _from_objects(self): - return list(itertools.chain(*[c._from_objects for c in self.clauses])) - - def self_group(self, against=None): - if self.group and operators.is_precedent(self.operator, against): - return Grouping(self) - else: - return self - - def compare(self, other, **kw): - """Compare this :class:`.ClauseList` to the given :class:`.ClauseList`, - including a comparison of all the clause items. - - """ - if not isinstance(other, ClauseList) and len(self.clauses) == 1: - return self.clauses[0].compare(other, **kw) - elif isinstance(other, ClauseList) and \ - len(self.clauses) == len(other.clauses): - for i in range(0, len(self.clauses)): - if not self.clauses[i].compare(other.clauses[i], **kw): - return False - else: - return self.operator == other.operator - else: - return False - - -class BooleanClauseList(ClauseList, ColumnElement): - __visit_name__ = 'clauselist' - - def __init__(self, *arg, **kw): - raise NotImplementedError( - "BooleanClauseList has a private constructor") - - @classmethod - def _construct(cls, operator, continue_on, skip_on, *clauses, **kw): - convert_clauses = [] - - clauses = [ - _expression_literal_as_text(clause) - for clause in - util.coerce_generator_arg(clauses) - ] - for clause in clauses: - - if isinstance(clause, continue_on): - continue - elif isinstance(clause, skip_on): - return clause.self_group(against=operators._asbool) - - convert_clauses.append(clause) - - if len(convert_clauses) == 1: - return convert_clauses[0].self_group(against=operators._asbool) - elif not convert_clauses and clauses: - return clauses[0].self_group(against=operators._asbool) - - convert_clauses = [c.self_group(against=operator) - for c in convert_clauses] - - self = cls.__new__(cls) - self.clauses = convert_clauses - self.group = True - self.operator = operator - self.group_contents = True - self.type = type_api.BOOLEANTYPE - return self - - @classmethod - def and_(cls, *clauses): - """Produce a conjunction of expressions joined by ``AND``. - - E.g.:: - - from sqlalchemy import and_ - - stmt = select([users_table]).where( - and_( - users_table.c.name == 'wendy', - users_table.c.enrolled == True - ) - ) - - The :func:`.and_` conjunction is also available using the - Python ``&`` operator (though note that compound expressions - need to be parenthesized in order to function with Python - operator precedence behavior):: - - stmt = select([users_table]).where( - (users_table.c.name == 'wendy') & - (users_table.c.enrolled == True) - ) - - The :func:`.and_` operation is also implicit in some cases; - the :meth:`.Select.where` method for example can be invoked multiple - times against a statement, which will have the effect of each - clause being combined using :func:`.and_`:: - - stmt = select([users_table]).\\ - where(users_table.c.name == 'wendy').\\ - where(users_table.c.enrolled == True) - - .. seealso:: - - :func:`.or_` - - """ - return cls._construct(operators.and_, True_, False_, *clauses) - - @classmethod - def or_(cls, *clauses): - """Produce a conjunction of expressions joined by ``OR``. - - E.g.:: - - from sqlalchemy import or_ - - stmt = select([users_table]).where( - or_( - users_table.c.name == 'wendy', - users_table.c.name == 'jack' - ) - ) - - The :func:`.or_` conjunction is also available using the - Python ``|`` operator (though note that compound expressions - need to be parenthesized in order to function with Python - operator precedence behavior):: - - stmt = select([users_table]).where( - (users_table.c.name == 'wendy') | - (users_table.c.name == 'jack') - ) - - .. seealso:: - - :func:`.and_` - - """ - return cls._construct(operators.or_, False_, True_, *clauses) - - @property - def _select_iterable(self): - return (self, ) - - def self_group(self, against=None): - if not self.clauses: - return self - else: - return super(BooleanClauseList, self).self_group(against=against) - - def _negate(self): - return ClauseList._negate(self) - - -and_ = BooleanClauseList.and_ -or_ = BooleanClauseList.or_ - - -class Tuple(ClauseList, ColumnElement): - """Represent a SQL tuple.""" - - def __init__(self, *clauses, **kw): - """Return a :class:`.Tuple`. - - Main usage is to produce a composite IN construct:: - - from sqlalchemy import tuple_ - - tuple_(table.c.col1, table.c.col2).in_( - [(1, 2), (5, 12), (10, 19)] - ) - - .. warning:: - - The composite IN construct is not supported by all backends, - and is currently known to work on Postgresql and MySQL, - but not SQLite. Unsupported backends will raise - a subclass of :class:`~sqlalchemy.exc.DBAPIError` when such - an expression is invoked. - - """ - - clauses = [_literal_as_binds(c) for c in clauses] - self._type_tuple = [arg.type for arg in clauses] - self.type = kw.pop('type_', self._type_tuple[0] - if self._type_tuple else type_api.NULLTYPE) - - super(Tuple, self).__init__(*clauses, **kw) - - @property - def _select_iterable(self): - return (self, ) - - def _bind_param(self, operator, obj): - return Tuple(*[ - BindParameter(None, o, _compared_to_operator=operator, - _compared_to_type=type_, unique=True) - for o, type_ in zip(obj, self._type_tuple) - ]).self_group() - - -class Case(ColumnElement): - """Represent a ``CASE`` expression. - - :class:`.Case` is produced using the :func:`.case` factory function, - as in:: - - from sqlalchemy import case - - stmt = select([users_table]).\\ - where( - case( - [ - (users_table.c.name == 'wendy', 'W'), - (users_table.c.name == 'jack', 'J') - ], - else_='E' - ) - ) - - Details on :class:`.Case` usage is at :func:`.case`. - - .. seealso:: - - :func:`.case` - - """ - - __visit_name__ = 'case' - - def __init__(self, whens, value=None, else_=None): - """Produce a ``CASE`` expression. - - The ``CASE`` construct in SQL is a conditional object that - acts somewhat analogously to an "if/then" construct in other - languages. It returns an instance of :class:`.Case`. - - :func:`.case` in its usual form is passed a list of "when" - constructs, that is, a list of conditions and results as tuples:: - - from sqlalchemy import case - - stmt = select([users_table]).\\ - where( - case( - [ - (users_table.c.name == 'wendy', 'W'), - (users_table.c.name == 'jack', 'J') - ], - else_='E' - ) - ) - - The above statement will produce SQL resembling:: - - SELECT id, name FROM user - WHERE CASE - WHEN (name = :name_1) THEN :param_1 - WHEN (name = :name_2) THEN :param_2 - ELSE :param_3 - END - - When simple equality expressions of several values against a single - parent column are needed, :func:`.case` also has a "shorthand" format - used via the - :paramref:`.case.value` parameter, which is passed a column - expression to be compared. In this form, the :paramref:`.case.whens` - parameter is passed as a dictionary containing expressions to be - compared against keyed to result expressions. The statement below is - equivalent to the preceding statement:: - - stmt = select([users_table]).\\ - where( - case( - {"wendy": "W", "jack": "J"}, - value=users_table.c.name, - else_='E' - ) - ) - - The values which are accepted as result values in - :paramref:`.case.whens` as well as with :paramref:`.case.else_` are - coerced from Python literals into :func:`.bindparam` constructs. - SQL expressions, e.g. :class:`.ColumnElement` constructs, are accepted - as well. To coerce a literal string expression into a constant - expression rendered inline, use the :func:`.literal_column` construct, - as in:: - - from sqlalchemy import case, literal_column - - case( - [ - ( - orderline.c.qty > 100, - literal_column("'greaterthan100'") - ), - ( - orderline.c.qty > 10, - literal_column("'greaterthan10'") - ) - ], - else_=literal_column("'lessthan10'") - ) - - The above will render the given constants without using bound - parameters for the result values (but still for the comparison - values), as in:: - - CASE - WHEN (orderline.qty > :qty_1) THEN 'greaterthan100' - WHEN (orderline.qty > :qty_2) THEN 'greaterthan10' - ELSE 'lessthan10' - END - - :param whens: The criteria to be compared against, - :paramref:`.case.whens` accepts two different forms, based on - whether or not :paramref:`.case.value` is used. - - In the first form, it accepts a list of 2-tuples; each 2-tuple - consists of ``(, )``, where the SQL - expression is a boolean expression and "value" is a resulting value, - e.g.:: - - case([ - (users_table.c.name == 'wendy', 'W'), - (users_table.c.name == 'jack', 'J') - ]) - - In the second form, it accepts a Python dictionary of comparison - values mapped to a resulting value; this form requires - :paramref:`.case.value` to be present, and values will be compared - using the ``==`` operator, e.g.:: - - case( - {"wendy": "W", "jack": "J"}, - value=users_table.c.name - ) - - :param value: An optional SQL expression which will be used as a - fixed "comparison point" for candidate values within a dictionary - passed to :paramref:`.case.whens`. - - :param else\_: An optional SQL expression which will be the evaluated - result of the ``CASE`` construct if all expressions within - :paramref:`.case.whens` evaluate to false. When omitted, most - databases will produce a result of NULL if none of the "when" - expressions evaluate to true. - - - """ - - try: - whens = util.dictlike_iteritems(whens) - except TypeError: - pass - - if value is not None: - whenlist = [ - (_literal_as_binds(c).self_group(), - _literal_as_binds(r)) for (c, r) in whens - ] - else: - whenlist = [ - (_no_literals(c).self_group(), - _literal_as_binds(r)) for (c, r) in whens - ] - - if whenlist: - type_ = list(whenlist[-1])[-1].type - else: - type_ = None - - if value is None: - self.value = None - else: - self.value = _literal_as_binds(value) - - self.type = type_ - self.whens = whenlist - if else_ is not None: - self.else_ = _literal_as_binds(else_) - else: - self.else_ = None - - def _copy_internals(self, clone=_clone, **kw): - if self.value is not None: - self.value = clone(self.value, **kw) - self.whens = [(clone(x, **kw), clone(y, **kw)) - for x, y in self.whens] - if self.else_ is not None: - self.else_ = clone(self.else_, **kw) - - def get_children(self, **kwargs): - if self.value is not None: - yield self.value - for x, y in self.whens: - yield x - yield y - if self.else_ is not None: - yield self.else_ - - @property - def _from_objects(self): - return list(itertools.chain(*[x._from_objects for x in - self.get_children()])) - - -def literal_column(text, type_=None): - """Produce a :class:`.ColumnClause` object that has the - :paramref:`.column.is_literal` flag set to True. - - :func:`.literal_column` is similar to :func:`.column`, except that - it is more often used as a "standalone" column expression that renders - exactly as stated; while :func:`.column` stores a string name that - will be assumed to be part of a table and may be quoted as such, - :func:`.literal_column` can be that, or any other arbitrary column-oriented - expression. - - :param text: the text of the expression; can be any SQL expression. - Quoting rules will not be applied. To specify a column-name expression - which should be subject to quoting rules, use the :func:`column` - function. - - :param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` - object which will - provide result-set translation and additional expression semantics for - this column. If left as None the type will be NullType. - - .. seealso:: - - :func:`.column` - - :func:`.text` - - :ref:`sqlexpression_literal_column` - - """ - return ColumnClause(text, type_=type_, is_literal=True) - - -class Cast(ColumnElement): - """Represent a ``CAST`` expression. - - :class:`.Cast` is produced using the :func:`.cast` factory function, - as in:: - - from sqlalchemy import cast, Numeric - - stmt = select([ - cast(product_table.c.unit_price, Numeric(10, 4)) - ]) - - Details on :class:`.Cast` usage is at :func:`.cast`. - - .. seealso:: - - :func:`.cast` - - """ - - __visit_name__ = 'cast' - - def __init__(self, expression, type_): - """Produce a ``CAST`` expression. - - :func:`.cast` returns an instance of :class:`.Cast`. - - E.g.:: - - from sqlalchemy import cast, Numeric - - stmt = select([ - cast(product_table.c.unit_price, Numeric(10, 4)) - ]) - - The above statement will produce SQL resembling:: - - SELECT CAST(unit_price AS NUMERIC(10, 4)) FROM product - - The :func:`.cast` function performs two distinct functions when - used. The first is that it renders the ``CAST`` expression within - the resulting SQL string. The second is that it associates the given - type (e.g. :class:`.TypeEngine` class or instance) with the column - expression on the Python side, which means the expression will take - on the expression operator behavior associated with that type, - as well as the bound-value handling and result-row-handling behavior - of the type. - - .. versionchanged:: 0.9.0 :func:`.cast` now applies the given type - to the expression such that it takes effect on the bound-value, - e.g. the Python-to-database direction, in addition to the - result handling, e.g. database-to-Python, direction. - - An alternative to :func:`.cast` is the :func:`.type_coerce` function. - This function performs the second task of associating an expression - with a specific type, but does not render the ``CAST`` expression - in SQL. - - :param expression: A SQL expression, such as a :class:`.ColumnElement` - expression or a Python string which will be coerced into a bound - literal value. - - :param type_: A :class:`.TypeEngine` class or instance indicating - the type to which the ``CAST`` should apply. - - .. seealso:: - - :func:`.type_coerce` - Python-side type coercion without emitting - CAST. - - """ - self.type = type_api.to_instance(type_) - self.clause = _literal_as_binds(expression, type_=self.type) - self.typeclause = TypeClause(self.type) - - def _copy_internals(self, clone=_clone, **kw): - self.clause = clone(self.clause, **kw) - self.typeclause = clone(self.typeclause, **kw) - - def get_children(self, **kwargs): - return self.clause, self.typeclause - - @property - def _from_objects(self): - return self.clause._from_objects - - -class Extract(ColumnElement): - """Represent a SQL EXTRACT clause, ``extract(field FROM expr)``.""" - - __visit_name__ = 'extract' - - def __init__(self, field, expr, **kwargs): - """Return a :class:`.Extract` construct. - - This is typically available as :func:`.extract` - as well as ``func.extract`` from the - :data:`.func` namespace. - - """ - self.type = type_api.INTEGERTYPE - self.field = field - self.expr = _literal_as_binds(expr, None) - - def _copy_internals(self, clone=_clone, **kw): - self.expr = clone(self.expr, **kw) - - def get_children(self, **kwargs): - return self.expr, - - @property - def _from_objects(self): - return self.expr._from_objects - - -class _label_reference(ColumnElement): - """Wrap a column expression as it appears in a 'reference' context. - - This expression is any that inclues an _order_by_label_element, - which is a Label, or a DESC / ASC construct wrapping a Label. - - The production of _label_reference() should occur when an expression - is added to this context; this includes the ORDER BY or GROUP BY of a - SELECT statement, as well as a few other places, such as the ORDER BY - within an OVER clause. - - """ - __visit_name__ = 'label_reference' - - def __init__(self, element): - self.element = element - - def _copy_internals(self, clone=_clone, **kw): - self.element = clone(self.element, **kw) - - @property - def _from_objects(self): - return () - - -class _textual_label_reference(ColumnElement): - __visit_name__ = 'textual_label_reference' - - def __init__(self, element): - self.element = element - - @util.memoized_property - def _text_clause(self): - return TextClause._create_text(self.element) - - -class UnaryExpression(ColumnElement): - """Define a 'unary' expression. - - A unary expression has a single column expression - and an operator. The operator can be placed on the left - (where it is called the 'operator') or right (where it is called the - 'modifier') of the column expression. - - :class:`.UnaryExpression` is the basis for several unary operators - including those used by :func:`.desc`, :func:`.asc`, :func:`.distinct`, - :func:`.nullsfirst` and :func:`.nullslast`. - - """ - __visit_name__ = 'unary' - - def __init__(self, element, operator=None, modifier=None, - type_=None, negate=None, wraps_column_expression=False): - self.operator = operator - self.modifier = modifier - self.element = element.self_group( - against=self.operator or self.modifier) - self.type = type_api.to_instance(type_) - self.negate = negate - self.wraps_column_expression = wraps_column_expression - - @classmethod - def _create_nullsfirst(cls, column): - """Produce the ``NULLS FIRST`` modifier for an ``ORDER BY`` expression. - - :func:`.nullsfirst` is intended to modify the expression produced - by :func:`.asc` or :func:`.desc`, and indicates how NULL values - should be handled when they are encountered during ordering:: - - - from sqlalchemy import desc, nullsfirst - - stmt = select([users_table]).\\ - order_by(nullsfirst(desc(users_table.c.name))) - - The SQL expression from the above would resemble:: - - SELECT id, name FROM user ORDER BY name DESC NULLS FIRST - - Like :func:`.asc` and :func:`.desc`, :func:`.nullsfirst` is typically - invoked from the column expression itself using - :meth:`.ColumnElement.nullsfirst`, rather than as its standalone - function version, as in:: - - stmt = (select([users_table]). - order_by(users_table.c.name.desc().nullsfirst()) - ) - - .. seealso:: - - :func:`.asc` - - :func:`.desc` - - :func:`.nullslast` - - :meth:`.Select.order_by` - - """ - return UnaryExpression( - _literal_as_label_reference(column), - modifier=operators.nullsfirst_op, - wraps_column_expression=False) - - @classmethod - def _create_nullslast(cls, column): - """Produce the ``NULLS LAST`` modifier for an ``ORDER BY`` expression. - - :func:`.nullslast` is intended to modify the expression produced - by :func:`.asc` or :func:`.desc`, and indicates how NULL values - should be handled when they are encountered during ordering:: - - - from sqlalchemy import desc, nullslast - - stmt = select([users_table]).\\ - order_by(nullslast(desc(users_table.c.name))) - - The SQL expression from the above would resemble:: - - SELECT id, name FROM user ORDER BY name DESC NULLS LAST - - Like :func:`.asc` and :func:`.desc`, :func:`.nullslast` is typically - invoked from the column expression itself using - :meth:`.ColumnElement.nullslast`, rather than as its standalone - function version, as in:: - - stmt = select([users_table]).\\ - order_by(users_table.c.name.desc().nullslast()) - - .. seealso:: - - :func:`.asc` - - :func:`.desc` - - :func:`.nullsfirst` - - :meth:`.Select.order_by` - - """ - return UnaryExpression( - _literal_as_label_reference(column), - modifier=operators.nullslast_op, - wraps_column_expression=False) - - @classmethod - def _create_desc(cls, column): - """Produce a descending ``ORDER BY`` clause element. - - e.g.:: - - from sqlalchemy import desc - - stmt = select([users_table]).order_by(desc(users_table.c.name)) - - will produce SQL as:: - - SELECT id, name FROM user ORDER BY name DESC - - The :func:`.desc` function is a standalone version of the - :meth:`.ColumnElement.desc` method available on all SQL expressions, - e.g.:: - - - stmt = select([users_table]).order_by(users_table.c.name.desc()) - - :param column: A :class:`.ColumnElement` (e.g. scalar SQL expression) - with which to apply the :func:`.desc` operation. - - .. seealso:: - - :func:`.asc` - - :func:`.nullsfirst` - - :func:`.nullslast` - - :meth:`.Select.order_by` - - """ - return UnaryExpression( - _literal_as_label_reference(column), - modifier=operators.desc_op, - wraps_column_expression=False) - - @classmethod - def _create_asc(cls, column): - """Produce an ascending ``ORDER BY`` clause element. - - e.g.:: - - from sqlalchemy import asc - stmt = select([users_table]).order_by(asc(users_table.c.name)) - - will produce SQL as:: - - SELECT id, name FROM user ORDER BY name ASC - - The :func:`.asc` function is a standalone version of the - :meth:`.ColumnElement.asc` method available on all SQL expressions, - e.g.:: - - - stmt = select([users_table]).order_by(users_table.c.name.asc()) - - :param column: A :class:`.ColumnElement` (e.g. scalar SQL expression) - with which to apply the :func:`.asc` operation. - - .. seealso:: - - :func:`.desc` - - :func:`.nullsfirst` - - :func:`.nullslast` - - :meth:`.Select.order_by` - - """ - return UnaryExpression( - _literal_as_label_reference(column), - modifier=operators.asc_op, - wraps_column_expression=False) - - @classmethod - def _create_distinct(cls, expr): - """Produce an column-expression-level unary ``DISTINCT`` clause. - - This applies the ``DISTINCT`` keyword to an individual column - expression, and is typically contained within an aggregate function, - as in:: - - from sqlalchemy import distinct, func - stmt = select([func.count(distinct(users_table.c.name))]) - - The above would produce an expression resembling:: - - SELECT COUNT(DISTINCT name) FROM user - - The :func:`.distinct` function is also available as a column-level - method, e.g. :meth:`.ColumnElement.distinct`, as in:: - - stmt = select([func.count(users_table.c.name.distinct())]) - - The :func:`.distinct` operator is different from the - :meth:`.Select.distinct` method of :class:`.Select`, - which produces a ``SELECT`` statement - with ``DISTINCT`` applied to the result set as a whole, - e.g. a ``SELECT DISTINCT`` expression. See that method for further - information. - - .. seealso:: - - :meth:`.ColumnElement.distinct` - - :meth:`.Select.distinct` - - :data:`.func` - - """ - expr = _literal_as_binds(expr) - return UnaryExpression( - expr, operator=operators.distinct_op, - type_=expr.type, wraps_column_expression=False) - - @property - def _order_by_label_element(self): - if self.modifier in (operators.desc_op, operators.asc_op): - return self.element._order_by_label_element - else: - return None - - @property - def _from_objects(self): - return self.element._from_objects - - def _copy_internals(self, clone=_clone, **kw): - self.element = clone(self.element, **kw) - - def get_children(self, **kwargs): - return self.element, - - def compare(self, other, **kw): - """Compare this :class:`UnaryExpression` against the given - :class:`.ClauseElement`.""" - - return ( - isinstance(other, UnaryExpression) and - self.operator == other.operator and - self.modifier == other.modifier and - self.element.compare(other.element, **kw) - ) - - def _negate(self): - if self.negate is not None: - return UnaryExpression( - self.element, - operator=self.negate, - negate=self.operator, - modifier=self.modifier, - type_=self.type, - wraps_column_expression=self.wraps_column_expression) - else: - return ClauseElement._negate(self) - - def self_group(self, against=None): - if self.operator and operators.is_precedent(self.operator, against): - return Grouping(self) - else: - return self - - -class AsBoolean(UnaryExpression): - - def __init__(self, element, operator, negate): - self.element = element - self.type = type_api.BOOLEANTYPE - self.operator = operator - self.negate = negate - self.modifier = None - self.wraps_column_expression = True - - def self_group(self, against=None): - return self - - def _negate(self): - return self.element._negate() - - -class BinaryExpression(ColumnElement): - """Represent an expression that is ``LEFT RIGHT``. - - A :class:`.BinaryExpression` is generated automatically - whenever two column expressions are used in a Python binary expression:: - - >>> from sqlalchemy.sql import column - >>> column('a') + column('b') - - >>> print column('a') + column('b') - a + b - - """ - - __visit_name__ = 'binary' - - def __init__(self, left, right, operator, type_=None, - negate=None, modifiers=None): - # allow compatibility with libraries that - # refer to BinaryExpression directly and pass strings - if isinstance(operator, util.string_types): - operator = operators.custom_op(operator) - self._orig = (left, right) - self.left = left.self_group(against=operator) - self.right = right.self_group(against=operator) - self.operator = operator - self.type = type_api.to_instance(type_) - self.negate = negate - - if modifiers is None: - self.modifiers = {} - else: - self.modifiers = modifiers - - def __bool__(self): - if self.operator in (operator.eq, operator.ne): - return self.operator(hash(self._orig[0]), hash(self._orig[1])) - else: - raise TypeError("Boolean value of this clause is not defined") - - __nonzero__ = __bool__ - - @property - def is_comparison(self): - return operators.is_comparison(self.operator) - - @property - def _from_objects(self): - return self.left._from_objects + self.right._from_objects - - def _copy_internals(self, clone=_clone, **kw): - self.left = clone(self.left, **kw) - self.right = clone(self.right, **kw) - - def get_children(self, **kwargs): - return self.left, self.right - - def compare(self, other, **kw): - """Compare this :class:`BinaryExpression` against the - given :class:`BinaryExpression`.""" - - return ( - isinstance(other, BinaryExpression) and - self.operator == other.operator and - ( - self.left.compare(other.left, **kw) and - self.right.compare(other.right, **kw) or - ( - operators.is_commutative(self.operator) and - self.left.compare(other.right, **kw) and - self.right.compare(other.left, **kw) - ) - ) - ) - - def self_group(self, against=None): - if operators.is_precedent(self.operator, against): - return Grouping(self) - else: - return self - - def _negate(self): - if self.negate is not None: - return BinaryExpression( - self.left, - self.right, - self.negate, - negate=self.operator, - type_=self.type, - modifiers=self.modifiers) - else: - return super(BinaryExpression, self)._negate() - - -class Grouping(ColumnElement): - """Represent a grouping within a column expression""" - - __visit_name__ = 'grouping' - - def __init__(self, element): - self.element = element - self.type = getattr(element, 'type', type_api.NULLTYPE) - - def self_group(self, against=None): - return self - - @property - def _key_label(self): - return self._label - - @property - def _label(self): - return getattr(self.element, '_label', None) or self.anon_label - - def _copy_internals(self, clone=_clone, **kw): - self.element = clone(self.element, **kw) - - def get_children(self, **kwargs): - return self.element, - - @property - def _from_objects(self): - return self.element._from_objects - - def __getattr__(self, attr): - return getattr(self.element, attr) - - def __getstate__(self): - return {'element': self.element, 'type': self.type} - - def __setstate__(self, state): - self.element = state['element'] - self.type = state['type'] - - def compare(self, other, **kw): - return isinstance(other, Grouping) and \ - self.element.compare(other.element) - - -class Over(ColumnElement): - """Represent an OVER clause. - - This is a special operator against a so-called - "window" function, as well as any aggregate function, - which produces results relative to the result set - itself. It's supported only by certain database - backends. - - """ - __visit_name__ = 'over' - - order_by = None - partition_by = None - - def __init__(self, func, partition_by=None, order_by=None): - """Produce an :class:`.Over` object against a function. - - Used against aggregate or so-called "window" functions, - for database backends that support window functions. - - E.g.:: - - from sqlalchemy import over - over(func.row_number(), order_by='x') - - Would produce "ROW_NUMBER() OVER(ORDER BY x)". - - :param func: a :class:`.FunctionElement` construct, typically - generated by :data:`~.expression.func`. - :param partition_by: a column element or string, or a list - of such, that will be used as the PARTITION BY clause - of the OVER construct. - :param order_by: a column element or string, or a list - of such, that will be used as the ORDER BY clause - of the OVER construct. - - This function is also available from the :data:`~.expression.func` - construct itself via the :meth:`.FunctionElement.over` method. - - .. versionadded:: 0.7 - - """ - self.func = func - if order_by is not None: - self.order_by = ClauseList( - *util.to_list(order_by), - _literal_as_text=_literal_as_label_reference) - if partition_by is not None: - self.partition_by = ClauseList( - *util.to_list(partition_by), - _literal_as_text=_literal_as_label_reference) - - @util.memoized_property - def type(self): - return self.func.type - - def get_children(self, **kwargs): - return [c for c in - (self.func, self.partition_by, self.order_by) - if c is not None] - - def _copy_internals(self, clone=_clone, **kw): - self.func = clone(self.func, **kw) - if self.partition_by is not None: - self.partition_by = clone(self.partition_by, **kw) - if self.order_by is not None: - self.order_by = clone(self.order_by, **kw) - - @property - def _from_objects(self): - return list(itertools.chain( - *[c._from_objects for c in - (self.func, self.partition_by, self.order_by) - if c is not None] - )) - - -class FunctionFilter(ColumnElement): - """Represent a function FILTER clause. - - This is a special operator against aggregate and window functions, - which controls which rows are passed to it. - It's supported only by certain database backends. - - Invocation of :class:`.FunctionFilter` is via - :meth:`.FunctionElement.filter`:: - - func.count(1).filter(True) - - .. versionadded:: 1.0.0 - - .. seealso:: - - :meth:`.FunctionElement.filter` - - """ - __visit_name__ = 'funcfilter' - - criterion = None - - def __init__(self, func, *criterion): - """Produce a :class:`.FunctionFilter` object against a function. - - Used against aggregate and window functions, - for database backends that support the "FILTER" clause. - - E.g.:: - - from sqlalchemy import funcfilter - funcfilter(func.count(1), MyClass.name == 'some name') - - Would produce "COUNT(1) FILTER (WHERE myclass.name = 'some name')". - - This function is also available from the :data:`~.expression.func` - construct itself via the :meth:`.FunctionElement.filter` method. - - .. versionadded:: 1.0.0 - - .. seealso:: - - :meth:`.FunctionElement.filter` - - - """ - self.func = func - self.filter(*criterion) - - def filter(self, *criterion): - """Produce an additional FILTER against the function. - - This method adds additional criteria to the initial criteria - set up by :meth:`.FunctionElement.filter`. - - Multiple criteria are joined together at SQL render time - via ``AND``. - - - """ - - for criterion in list(criterion): - criterion = _expression_literal_as_text(criterion) - - if self.criterion is not None: - self.criterion = self.criterion & criterion - else: - self.criterion = criterion - - return self - - def over(self, partition_by=None, order_by=None): - """Produce an OVER clause against this filtered function. - - Used against aggregate or so-called "window" functions, - for database backends that support window functions. - - The expression:: - - func.rank().filter(MyClass.y > 5).over(order_by='x') - - is shorthand for:: - - from sqlalchemy import over, funcfilter - over(funcfilter(func.rank(), MyClass.y > 5), order_by='x') - - See :func:`~.expression.over` for a full description. - - """ - return Over(self, partition_by=partition_by, order_by=order_by) - - @util.memoized_property - def type(self): - return self.func.type - - def get_children(self, **kwargs): - return [c for c in - (self.func, self.criterion) - if c is not None] - - def _copy_internals(self, clone=_clone, **kw): - self.func = clone(self.func, **kw) - if self.criterion is not None: - self.criterion = clone(self.criterion, **kw) - - @property - def _from_objects(self): - return list(itertools.chain( - *[c._from_objects for c in (self.func, self.criterion) - if c is not None] - )) - - -class Label(ColumnElement): - """Represents a column label (AS). - - Represent a label, as typically applied to any column-level - element using the ``AS`` sql keyword. - - """ - - __visit_name__ = 'label' - - def __init__(self, name, element, type_=None): - """Return a :class:`Label` object for the - given :class:`.ColumnElement`. - - A label changes the name of an element in the columns clause of a - ``SELECT`` statement, typically via the ``AS`` SQL keyword. - - This functionality is more conveniently available via the - :meth:`.ColumnElement.label` method on :class:`.ColumnElement`. - - :param name: label name - - :param obj: a :class:`.ColumnElement`. - - """ - - if isinstance(element, Label): - self._resolve_label = element._label - - while isinstance(element, Label): - element = element.element - - if name: - self.name = name - self._resolve_label = self.name - else: - self.name = _anonymous_label( - '%%(%d %s)s' % (id(self), getattr(element, 'name', 'anon')) - ) - - self.key = self._label = self._key_label = self.name - self._element = element - self._type = type_ - self._proxies = [element] - - def __reduce__(self): - return self.__class__, (self.name, self._element, self._type) - - @util.memoized_property - def _allow_label_resolve(self): - return self.element._allow_label_resolve - - @property - def _order_by_label_element(self): - return self - - @util.memoized_property - def type(self): - return type_api.to_instance( - self._type or getattr(self._element, 'type', None) - ) - - @util.memoized_property - def element(self): - return self._element.self_group(against=operators.as_) - - def self_group(self, against=None): - sub_element = self._element.self_group(against=against) - if sub_element is not self._element: - return Label(self.name, - sub_element, - type_=self._type) - else: - return self - - @property - def primary_key(self): - return self.element.primary_key - - @property - def foreign_keys(self): - return self.element.foreign_keys - - def get_children(self, **kwargs): - return self.element, - - def _copy_internals(self, clone=_clone, anonymize_labels=False, **kw): - self._element = clone(self._element, **kw) - self.__dict__.pop('element', None) - self.__dict__.pop('_allow_label_resolve', None) - if anonymize_labels: - self.name = self._resolve_label = _anonymous_label( - '%%(%d %s)s' % ( - id(self), getattr(self.element, 'name', 'anon')) - ) - self.key = self._label = self._key_label = self.name - - @property - def _from_objects(self): - return self.element._from_objects - - def _make_proxy(self, selectable, name=None, **kw): - e = self.element._make_proxy(selectable, - name=name if name else self.name) - e._proxies.append(self) - if self._type is not None: - e.type = self._type - return e - - -class ColumnClause(Immutable, ColumnElement): - """Represents a column expression from any textual string. - - The :class:`.ColumnClause`, a lightweight analogue to the - :class:`.Column` class, is typically invoked using the - :func:`.column` function, as in:: - - from sqlalchemy import column - - id, name = column("id"), column("name") - stmt = select([id, name]).select_from("user") - - The above statement would produce SQL like:: - - SELECT id, name FROM user - - :class:`.ColumnClause` is the immediate superclass of the schema-specific - :class:`.Column` object. While the :class:`.Column` class has all the - same capabilities as :class:`.ColumnClause`, the :class:`.ColumnClause` - class is usable by itself in those cases where behavioral requirements - are limited to simple SQL expression generation. The object has none of - the associations with schema-level metadata or with execution-time - behavior that :class:`.Column` does, so in that sense is a "lightweight" - version of :class:`.Column`. - - Full details on :class:`.ColumnClause` usage is at :func:`.column`. - - .. seealso:: - - :func:`.column` - - :class:`.Column` - - """ - __visit_name__ = 'column' - - onupdate = default = server_default = server_onupdate = None - - _memoized_property = util.group_expirable_memoized_property() - - def __init__(self, text, type_=None, is_literal=False, _selectable=None): - """Produce a :class:`.ColumnClause` object. - - The :class:`.ColumnClause` is a lightweight analogue to the - :class:`.Column` class. The :func:`.column` function can - be invoked with just a name alone, as in:: - - from sqlalchemy import column - - id, name = column("id"), column("name") - stmt = select([id, name]).select_from("user") - - The above statement would produce SQL like:: - - SELECT id, name FROM user - - Once constructed, :func:`.column` may be used like any other SQL - expression element such as within :func:`.select` constructs:: - - from sqlalchemy.sql import column - - id, name = column("id"), column("name") - stmt = select([id, name]).select_from("user") - - The text handled by :func:`.column` is assumed to be handled - like the name of a database column; if the string contains mixed case, - special characters, or matches a known reserved word on the target - backend, the column expression will render using the quoting - behavior determined by the backend. To produce a textual SQL - expression that is rendered exactly without any quoting, - use :func:`.literal_column` instead, or pass ``True`` as the - value of :paramref:`.column.is_literal`. Additionally, full SQL - statements are best handled using the :func:`.text` construct. - - :func:`.column` can be used in a table-like - fashion by combining it with the :func:`.table` function - (which is the lightweight analogue to :class:`.Table`) to produce - a working table construct with minimal boilerplate:: - - from sqlalchemy import table, column, select - - user = table("user", - column("id"), - column("name"), - column("description"), - ) - - stmt = select([user.c.description]).where(user.c.name == 'wendy') - - A :func:`.column` / :func:`.table` construct like that illustrated - above can be created in an - ad-hoc fashion and is not associated with any - :class:`.schema.MetaData`, DDL, or events, unlike its - :class:`.Table` counterpart. - - .. versionchanged:: 1.0.0 :func:`.expression.column` can now - be imported from the plain ``sqlalchemy`` namespace like any - other SQL element. - - :param text: the text of the element. - - :param type: :class:`.types.TypeEngine` object which can associate - this :class:`.ColumnClause` with a type. - - :param is_literal: if True, the :class:`.ColumnClause` is assumed to - be an exact expression that will be delivered to the output with no - quoting rules applied regardless of case sensitive settings. the - :func:`.literal_column()` function essentially invokes - :func:`.column` while passing ``is_literal=True``. - - .. seealso:: - - :class:`.Column` - - :func:`.literal_column` - - :func:`.table` - - :func:`.text` - - :ref:`sqlexpression_literal_column` - - """ - - self.key = self.name = text - self.table = _selectable - self.type = type_api.to_instance(type_) - self.is_literal = is_literal - - def _compare_name_for_result(self, other): - if self.is_literal or \ - self.table is None or self.table._textual or \ - not hasattr(other, 'proxy_set') or ( - isinstance(other, ColumnClause) and - (other.is_literal or - other.table is None or - other.table._textual) - ): - return (hasattr(other, 'name') and self.name == other.name) or \ - (hasattr(other, '_label') and self._label == other._label) - else: - return other.proxy_set.intersection(self.proxy_set) - - def _get_table(self): - return self.__dict__['table'] - - def _set_table(self, table): - self._memoized_property.expire_instance(self) - self.__dict__['table'] = table - table = property(_get_table, _set_table) - - @_memoized_property - def _from_objects(self): - t = self.table - if t is not None: - return [t] - else: - return [] - - @util.memoized_property - def description(self): - if util.py3k: - return self.name - else: - return self.name.encode('ascii', 'backslashreplace') - - @_memoized_property - def _key_label(self): - if self.key != self.name: - return self._gen_label(self.key) - else: - return self._label - - @_memoized_property - def _label(self): - return self._gen_label(self.name) - - @_memoized_property - def _render_label_in_columns_clause(self): - return self.table is not None - - def _gen_label(self, name): - t = self.table - - if self.is_literal: - return None - - elif t is not None and t.named_with_column: - if getattr(t, 'schema', None): - label = t.schema.replace('.', '_') + "_" + \ - t.name + "_" + name - else: - label = t.name + "_" + name - - # propagate name quoting rules for labels. - if getattr(name, "quote", None) is not None: - if isinstance(label, quoted_name): - label.quote = name.quote - else: - label = quoted_name(label, name.quote) - elif getattr(t.name, "quote", None) is not None: - # can't get this situation to occur, so let's - # assert false on it for now - assert not isinstance(label, quoted_name) - label = quoted_name(label, t.name.quote) - - # ensure the label name doesn't conflict with that - # of an existing column - if label in t.c: - _label = label - counter = 1 - while _label in t.c: - _label = label + "_" + str(counter) - counter += 1 - label = _label - - return _as_truncated(label) - - else: - return name - - def _bind_param(self, operator, obj): - return BindParameter(self.key, obj, - _compared_to_operator=operator, - _compared_to_type=self.type, - unique=True) - - def _make_proxy(self, selectable, name=None, attach=True, - name_is_truncatable=False, **kw): - # propagate the "is_literal" flag only if we are keeping our name, - # otherwise its considered to be a label - is_literal = self.is_literal and (name is None or name == self.name) - c = self._constructor( - _as_truncated(name or self.name) if - name_is_truncatable else - (name or self.name), - type_=self.type, - _selectable=selectable, - is_literal=is_literal - ) - if name is None: - c.key = self.key - c._proxies = [self] - if selectable._is_clone_of is not None: - c._is_clone_of = \ - selectable._is_clone_of.columns.get(c.key) - - if attach: - selectable._columns[c.key] = c - return c - - -class _IdentifiedClause(Executable, ClauseElement): - - __visit_name__ = 'identified' - _execution_options = \ - Executable._execution_options.union({'autocommit': False}) - - def __init__(self, ident): - self.ident = ident - - -class SavepointClause(_IdentifiedClause): - __visit_name__ = 'savepoint' - - -class RollbackToSavepointClause(_IdentifiedClause): - __visit_name__ = 'rollback_to_savepoint' - - -class ReleaseSavepointClause(_IdentifiedClause): - __visit_name__ = 'release_savepoint' - - -class quoted_name(util.MemoizedSlots, util.text_type): - """Represent a SQL identifier combined with quoting preferences. - - :class:`.quoted_name` is a Python unicode/str subclass which - represents a particular identifier name along with a - ``quote`` flag. This ``quote`` flag, when set to - ``True`` or ``False``, overrides automatic quoting behavior - for this identifier in order to either unconditionally quote - or to not quote the name. If left at its default of ``None``, - quoting behavior is applied to the identifier on a per-backend basis - based on an examination of the token itself. - - A :class:`.quoted_name` object with ``quote=True`` is also - prevented from being modified in the case of a so-called - "name normalize" option. Certain database backends, such as - Oracle, Firebird, and DB2 "normalize" case-insensitive names - as uppercase. The SQLAlchemy dialects for these backends - convert from SQLAlchemy's lower-case-means-insensitive convention - to the upper-case-means-insensitive conventions of those backends. - The ``quote=True`` flag here will prevent this conversion from occurring - to support an identifier that's quoted as all lower case against - such a backend. - - The :class:`.quoted_name` object is normally created automatically - when specifying the name for key schema constructs such as - :class:`.Table`, :class:`.Column`, and others. The class can also be - passed explicitly as the name to any function that receives a name which - can be quoted. Such as to use the :meth:`.Engine.has_table` method with - an unconditionally quoted name:: - - from sqlaclchemy import create_engine - from sqlalchemy.sql.elements import quoted_name - - engine = create_engine("oracle+cx_oracle://some_dsn") - engine.has_table(quoted_name("some_table", True)) - - The above logic will run the "has table" logic against the Oracle backend, - passing the name exactly as ``"some_table"`` without converting to - upper case. - - .. versionadded:: 0.9.0 - - """ - - __slots__ = 'quote', 'lower', 'upper' - - def __new__(cls, value, quote): - if value is None: - return None - # experimental - don't bother with quoted_name - # if quote flag is None. doesn't seem to make any dent - # in performance however - # elif not sprcls and quote is None: - # return value - elif isinstance(value, cls) and ( - quote is None or value.quote == quote - ): - return value - self = super(quoted_name, cls).__new__(cls, value) - self.quote = quote - return self - - def __reduce__(self): - return quoted_name, (util.text_type(self), self.quote) - - def _memoized_method_lower(self): - if self.quote: - return self - else: - return util.text_type(self).lower() - - def _memoized_method_upper(self): - if self.quote: - return self - else: - return util.text_type(self).upper() - - def __repr__(self): - backslashed = self.encode('ascii', 'backslashreplace') - if not util.py2k: - backslashed = backslashed.decode('ascii') - return "'%s'" % backslashed - - -class _truncated_label(quoted_name): - """A unicode subclass used to identify symbolic " - "names that may require truncation.""" - - __slots__ = () - - def __new__(cls, value, quote=None): - quote = getattr(value, "quote", quote) - # return super(_truncated_label, cls).__new__(cls, value, quote, True) - return super(_truncated_label, cls).__new__(cls, value, quote) - - def __reduce__(self): - return self.__class__, (util.text_type(self), self.quote) - - def apply_map(self, map_): - return self - - -class conv(_truncated_label): - """Mark a string indicating that a name has already been converted - by a naming convention. - - This is a string subclass that indicates a name that should not be - subject to any further naming conventions. - - E.g. when we create a :class:`.Constraint` using a naming convention - as follows:: - - m = MetaData(naming_convention={ - "ck": "ck_%(table_name)s_%(constraint_name)s" - }) - t = Table('t', m, Column('x', Integer), - CheckConstraint('x > 5', name='x5')) - - The name of the above constraint will be rendered as ``"ck_t_x5"``. - That is, the existing name ``x5`` is used in the naming convention as the - ``constraint_name`` token. - - In some situations, such as in migration scripts, we may be rendering - the above :class:`.CheckConstraint` with a name that's already been - converted. In order to make sure the name isn't double-modified, the - new name is applied using the :func:`.schema.conv` marker. We can - use this explicitly as follows:: - - - m = MetaData(naming_convention={ - "ck": "ck_%(table_name)s_%(constraint_name)s" - }) - t = Table('t', m, Column('x', Integer), - CheckConstraint('x > 5', name=conv('ck_t_x5'))) - - Where above, the :func:`.schema.conv` marker indicates that the constraint - name here is final, and the name will render as ``"ck_t_x5"`` and not - ``"ck_t_ck_t_x5"`` - - .. versionadded:: 0.9.4 - - .. seealso:: - - :ref:`constraint_naming_conventions` - - """ - __slots__ = () - - -class _defer_name(_truncated_label): - """mark a name as 'deferred' for the purposes of automated name - generation. - - """ - __slots__ = () - - def __new__(cls, value): - if value is None: - return _NONE_NAME - elif isinstance(value, conv): - return value - else: - return super(_defer_name, cls).__new__(cls, value) - - def __reduce__(self): - return self.__class__, (util.text_type(self), ) - - -class _defer_none_name(_defer_name): - """indicate a 'deferred' name that was ultimately the value None.""" - __slots__ = () - -_NONE_NAME = _defer_none_name("_unnamed_") - -# for backwards compatibility in case -# someone is re-implementing the -# _truncated_identifier() sequence in a custom -# compiler -_generated_label = _truncated_label - - -class _anonymous_label(_truncated_label): - """A unicode subclass used to identify anonymously - generated names.""" - - __slots__ = () - - def __add__(self, other): - return _anonymous_label( - quoted_name( - util.text_type.__add__(self, util.text_type(other)), - self.quote) - ) - - def __radd__(self, other): - return _anonymous_label( - quoted_name( - util.text_type.__add__(util.text_type(other), self), - self.quote) - ) - - def apply_map(self, map_): - if self.quote is not None: - # preserve quoting only if necessary - return quoted_name(self % map_, self.quote) - else: - # else skip the constructor call - return self % map_ - - -def _as_truncated(value): - """coerce the given value to :class:`._truncated_label`. - - Existing :class:`._truncated_label` and - :class:`._anonymous_label` objects are passed - unchanged. - """ - - if isinstance(value, _truncated_label): - return value - else: - return _truncated_label(value) - - -def _string_or_unprintable(element): - if isinstance(element, util.string_types): - return element - else: - try: - return str(element) - except Exception: - return "unprintable element %r" % element - - -def _expand_cloned(elements): - """expand the given set of ClauseElements to be the set of all 'cloned' - predecessors. - - """ - return itertools.chain(*[x._cloned_set for x in elements]) - - -def _select_iterables(elements): - """expand tables into individual columns in the - given list of column expressions. - - """ - return itertools.chain(*[c._select_iterable for c in elements]) - - -def _cloned_intersection(a, b): - """return the intersection of sets a and b, counting - any overlap between 'cloned' predecessors. - - The returned set is in terms of the entities present within 'a'. - - """ - all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b)) - return set(elem for elem in a - if all_overlap.intersection(elem._cloned_set)) - - -def _cloned_difference(a, b): - all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b)) - return set(elem for elem in a - if not all_overlap.intersection(elem._cloned_set)) - - -def _labeled(element): - if not hasattr(element, 'name'): - return element.label(None) - else: - return element - - -def _is_column(col): - """True if ``col`` is an instance of :class:`.ColumnElement`.""" - - return isinstance(col, ColumnElement) - - -def _find_columns(clause): - """locate Column objects within the given expression.""" - - cols = util.column_set() - traverse(clause, {}, {'column': cols.add}) - return cols - - -# there is some inconsistency here between the usage of -# inspect() vs. checking for Visitable and __clause_element__. -# Ideally all functions here would derive from inspect(), -# however the inspect() versions add significant callcount -# overhead for critical functions like _interpret_as_column_or_from(). -# Generally, the column-based functions are more performance critical -# and are fine just checking for __clause_element__(). It is only -# _interpret_as_from() where we'd like to be able to receive ORM entities -# that have no defined namespace, hence inspect() is needed there. - - -def _column_as_key(element): - if isinstance(element, util.string_types): - return element - if hasattr(element, '__clause_element__'): - element = element.__clause_element__() - try: - return element.key - except AttributeError: - return None - - -def _clause_element_as_expr(element): - if hasattr(element, '__clause_element__'): - return element.__clause_element__() - else: - return element - - -def _literal_as_label_reference(element): - if isinstance(element, util.string_types): - return _textual_label_reference(element) - - elif hasattr(element, '__clause_element__'): - element = element.__clause_element__() - - return _literal_as_text(element) - - -def _literal_and_labels_as_label_reference(element): - if isinstance(element, util.string_types): - return _textual_label_reference(element) - - elif hasattr(element, '__clause_element__'): - element = element.__clause_element__() - - if isinstance(element, ColumnElement) and \ - element._order_by_label_element is not None: - return _label_reference(element) - else: - return _literal_as_text(element) - - -def _expression_literal_as_text(element): - return _literal_as_text(element, warn=True) - - -def _literal_as_text(element, warn=False): - if isinstance(element, Visitable): - return element - elif hasattr(element, '__clause_element__'): - return element.__clause_element__() - elif isinstance(element, util.string_types): - if warn: - util.warn_limited( - "Textual SQL expression %(expr)r should be " - "explicitly declared as text(%(expr)r)", - {"expr": util.ellipses_string(element)}) - - return TextClause(util.text_type(element)) - elif isinstance(element, (util.NoneType, bool)): - return _const_expr(element) - else: - raise exc.ArgumentError( - "SQL expression object or string expected, got object of type %r " - "instead" % type(element) - ) - - -def _no_literals(element): - if hasattr(element, '__clause_element__'): - return element.__clause_element__() - elif not isinstance(element, Visitable): - raise exc.ArgumentError("Ambiguous literal: %r. Use the 'text()' " - "function to indicate a SQL expression " - "literal, or 'literal()' to indicate a " - "bound value." % element) - else: - return element - - -def _is_literal(element): - return not isinstance(element, Visitable) and \ - not hasattr(element, '__clause_element__') - - -def _only_column_elements_or_none(element, name): - if element is None: - return None - else: - return _only_column_elements(element, name) - - -def _only_column_elements(element, name): - if hasattr(element, '__clause_element__'): - element = element.__clause_element__() - if not isinstance(element, ColumnElement): - raise exc.ArgumentError( - "Column-based expression object expected for argument " - "'%s'; got: '%s', type %s" % (name, element, type(element))) - return element - - -def _literal_as_binds(element, name=None, type_=None): - if hasattr(element, '__clause_element__'): - return element.__clause_element__() - elif not isinstance(element, Visitable): - if element is None: - return Null() - else: - return BindParameter(name, element, type_=type_, unique=True) - else: - return element - -_guess_straight_column = re.compile(r'^\w\S*$', re.I) - - -def _interpret_as_column_or_from(element): - if isinstance(element, Visitable): - return element - elif hasattr(element, '__clause_element__'): - return element.__clause_element__() - - insp = inspection.inspect(element, raiseerr=False) - if insp is None: - if isinstance(element, (util.NoneType, bool)): - return _const_expr(element) - elif hasattr(insp, "selectable"): - return insp.selectable - - # be forgiving as this is an extremely common - # and known expression - if element == "*": - guess_is_literal = True - elif isinstance(element, (numbers.Number)): - return ColumnClause(str(element), is_literal=True) - else: - element = str(element) - # give into temptation, as this fact we are guessing about - # is not one we've previously ever needed our users tell us; - # but let them know we are not happy about it - guess_is_literal = not _guess_straight_column.match(element) - util.warn_limited( - "Textual column expression %(column)r should be " - "explicitly declared with text(%(column)r), " - "or use %(literal_column)s(%(column)r) " - "for more specificity", - { - "column": util.ellipses_string(element), - "literal_column": "literal_column" - if guess_is_literal else "column" - }) - return ColumnClause( - element, - is_literal=guess_is_literal) - - -def _const_expr(element): - if isinstance(element, (Null, False_, True_)): - return element - elif element is None: - return Null() - elif element is False: - return False_() - elif element is True: - return True_() - else: - raise exc.ArgumentError( - "Expected None, False, or True" - ) - - -def _type_from_args(args): - for a in args: - if not a.type._isnull: - return a.type - else: - return type_api.NULLTYPE - - -def _corresponding_column_or_error(fromclause, column, - require_embedded=False): - c = fromclause.corresponding_column(column, - require_embedded=require_embedded) - if c is None: - raise exc.InvalidRequestError( - "Given column '%s', attached to table '%s', " - "failed to locate a corresponding column from table '%s'" - % - (column, - getattr(column, 'table', None), - fromclause.description) - ) - return c - - -class AnnotatedColumnElement(Annotated): - def __init__(self, element, values): - Annotated.__init__(self, element, values) - ColumnElement.comparator._reset(self) - for attr in ('name', 'key', 'table'): - if self.__dict__.get(attr, False) is None: - self.__dict__.pop(attr) - - def _with_annotations(self, values): - clone = super(AnnotatedColumnElement, self)._with_annotations(values) - ColumnElement.comparator._reset(clone) - return clone - - @util.memoized_property - def name(self): - """pull 'name' from parent, if not present""" - return self._Annotated__element.name - - @util.memoized_property - def table(self): - """pull 'table' from parent, if not present""" - return self._Annotated__element.table - - @util.memoized_property - def key(self): - """pull 'key' from parent, if not present""" - return self._Annotated__element.key - - @util.memoized_property - def info(self): - return self._Annotated__element.info - - @util.memoized_property - def anon_label(self): - return self._Annotated__element.anon_label diff --git a/python/sqlalchemy/sql/expression.py b/python/sqlalchemy/sql/expression.py deleted file mode 100644 index 74b827d7..00000000 --- a/python/sqlalchemy/sql/expression.py +++ /dev/null @@ -1,137 +0,0 @@ -# sql/expression.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Defines the public namespace for SQL expression constructs. - -Prior to version 0.9, this module contained all of "elements", "dml", -"default_comparator" and "selectable". The module was broken up -and most "factory" functions were moved to be grouped with their associated -class. - -""" - -__all__ = [ - 'Alias', 'ClauseElement', 'ColumnCollection', 'ColumnElement', - 'CompoundSelect', 'Delete', 'FromClause', 'Insert', 'Join', 'Select', - 'Selectable', 'TableClause', 'Update', 'alias', 'and_', 'asc', 'between', - 'bindparam', 'case', 'cast', 'column', 'delete', 'desc', 'distinct', - 'except_', 'except_all', 'exists', 'extract', 'func', 'modifier', - 'collate', 'insert', 'intersect', 'intersect_all', 'join', 'label', - 'literal', 'literal_column', 'not_', 'null', 'nullsfirst', 'nullslast', - 'or_', 'outparam', 'outerjoin', 'over', 'select', 'subquery', - 'table', 'text', - 'tuple_', 'type_coerce', 'union', 'union_all', 'update'] - - -from .visitors import Visitable -from .functions import func, modifier, FunctionElement, Function -from ..util.langhelpers import public_factory -from .elements import ClauseElement, ColumnElement,\ - BindParameter, UnaryExpression, BooleanClauseList, \ - Label, Cast, Case, ColumnClause, TextClause, Over, Null, \ - True_, False_, BinaryExpression, Tuple, TypeClause, Extract, \ - Grouping, not_, \ - collate, literal_column, between,\ - literal, outparam, type_coerce, ClauseList, FunctionFilter - -from .elements import SavepointClause, RollbackToSavepointClause, \ - ReleaseSavepointClause - -from .base import ColumnCollection, Generative, Executable, \ - PARSE_AUTOCOMMIT - -from .selectable import Alias, Join, Select, Selectable, TableClause, \ - CompoundSelect, CTE, FromClause, FromGrouping, SelectBase, \ - alias, GenerativeSelect, \ - subquery, HasPrefixes, HasSuffixes, Exists, ScalarSelect, TextAsFrom - - -from .dml import Insert, Update, Delete, UpdateBase, ValuesBase - -# factory functions - these pull class-bound constructors and classmethods -# from SQL elements and selectables into public functions. This allows -# the functions to be available in the sqlalchemy.sql.* namespace and -# to be auto-cross-documenting from the function to the class itself. - -and_ = public_factory(BooleanClauseList.and_, ".expression.and_") -or_ = public_factory(BooleanClauseList.or_, ".expression.or_") -bindparam = public_factory(BindParameter, ".expression.bindparam") -select = public_factory(Select, ".expression.select") -text = public_factory(TextClause._create_text, ".expression.text") -table = public_factory(TableClause, ".expression.table") -column = public_factory(ColumnClause, ".expression.column") -over = public_factory(Over, ".expression.over") -label = public_factory(Label, ".expression.label") -case = public_factory(Case, ".expression.case") -cast = public_factory(Cast, ".expression.cast") -extract = public_factory(Extract, ".expression.extract") -tuple_ = public_factory(Tuple, ".expression.tuple_") -except_ = public_factory(CompoundSelect._create_except, ".expression.except_") -except_all = public_factory( - CompoundSelect._create_except_all, ".expression.except_all") -intersect = public_factory( - CompoundSelect._create_intersect, ".expression.intersect") -intersect_all = public_factory( - CompoundSelect._create_intersect_all, ".expression.intersect_all") -union = public_factory(CompoundSelect._create_union, ".expression.union") -union_all = public_factory( - CompoundSelect._create_union_all, ".expression.union_all") -exists = public_factory(Exists, ".expression.exists") -nullsfirst = public_factory( - UnaryExpression._create_nullsfirst, ".expression.nullsfirst") -nullslast = public_factory( - UnaryExpression._create_nullslast, ".expression.nullslast") -asc = public_factory(UnaryExpression._create_asc, ".expression.asc") -desc = public_factory(UnaryExpression._create_desc, ".expression.desc") -distinct = public_factory( - UnaryExpression._create_distinct, ".expression.distinct") -true = public_factory(True_._instance, ".expression.true") -false = public_factory(False_._instance, ".expression.false") -null = public_factory(Null._instance, ".expression.null") -join = public_factory(Join._create_join, ".expression.join") -outerjoin = public_factory(Join._create_outerjoin, ".expression.outerjoin") -insert = public_factory(Insert, ".expression.insert") -update = public_factory(Update, ".expression.update") -delete = public_factory(Delete, ".expression.delete") -funcfilter = public_factory( - FunctionFilter, ".expression.funcfilter") - - -# internal functions still being called from tests and the ORM, -# these might be better off in some other namespace -from .base import _from_objects -from .elements import _literal_as_text, _clause_element_as_expr,\ - _is_column, _labeled, _only_column_elements, _string_or_unprintable, \ - _truncated_label, _clone, _cloned_difference, _cloned_intersection,\ - _column_as_key, _literal_as_binds, _select_iterables, \ - _corresponding_column_or_error, _literal_as_label_reference, \ - _expression_literal_as_text -from .selectable import _interpret_as_from - - -# old names for compatibility -_Executable = Executable -_BindParamClause = BindParameter -_Label = Label -_SelectBase = SelectBase -_BinaryExpression = BinaryExpression -_Cast = Cast -_Null = Null -_False = False_ -_True = True_ -_TextClause = TextClause -_UnaryExpression = UnaryExpression -_Case = Case -_Tuple = Tuple -_Over = Over -_Generative = Generative -_TypeClause = TypeClause -_Extract = Extract -_Exists = Exists -_Grouping = Grouping -_FromGrouping = FromGrouping -_ScalarSelect = ScalarSelect diff --git a/python/sqlalchemy/sql/functions.py b/python/sqlalchemy/sql/functions.py deleted file mode 100644 index 538a2c54..00000000 --- a/python/sqlalchemy/sql/functions.py +++ /dev/null @@ -1,618 +0,0 @@ -# sql/functions.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""SQL function API, factories, and built-in functions. - -""" -from . import sqltypes, schema -from .base import Executable, ColumnCollection -from .elements import ClauseList, Cast, Extract, _literal_as_binds, \ - literal_column, _type_from_args, ColumnElement, _clone,\ - Over, BindParameter, FunctionFilter -from .selectable import FromClause, Select, Alias - -from . import operators -from .visitors import VisitableType -from .. import util -from . import annotation - -_registry = util.defaultdict(dict) - - -def register_function(identifier, fn, package="_default"): - """Associate a callable with a particular func. name. - - This is normally called by _GenericMeta, but is also - available by itself so that a non-Function construct - can be associated with the :data:`.func` accessor (i.e. - CAST, EXTRACT). - - """ - reg = _registry[package] - reg[identifier] = fn - - -class FunctionElement(Executable, ColumnElement, FromClause): - """Base for SQL function-oriented constructs. - - .. seealso:: - - :class:`.Function` - named SQL function. - - :data:`.func` - namespace which produces registered or ad-hoc - :class:`.Function` instances. - - :class:`.GenericFunction` - allows creation of registered function - types. - - """ - - packagenames = () - - def __init__(self, *clauses, **kwargs): - """Construct a :class:`.FunctionElement`. - """ - args = [_literal_as_binds(c, self.name) for c in clauses] - self.clause_expr = ClauseList( - operator=operators.comma_op, - group_contents=True, *args).\ - self_group() - - def _execute_on_connection(self, connection, multiparams, params): - return connection._execute_function(self, multiparams, params) - - @property - def columns(self): - """The set of columns exported by this :class:`.FunctionElement`. - - Function objects currently have no result column names built in; - this method returns a single-element column collection with - an anonymously named column. - - An interim approach to providing named columns for a function - as a FROM clause is to build a :func:`.select` with the - desired columns:: - - from sqlalchemy.sql import column - - stmt = select([column('x'), column('y')]).\ - select_from(func.myfunction()) - - - """ - return ColumnCollection(self.label(None)) - - @util.memoized_property - def clauses(self): - """Return the underlying :class:`.ClauseList` which contains - the arguments for this :class:`.FunctionElement`. - - """ - return self.clause_expr.element - - def over(self, partition_by=None, order_by=None): - """Produce an OVER clause against this function. - - Used against aggregate or so-called "window" functions, - for database backends that support window functions. - - The expression:: - - func.row_number().over(order_by='x') - - is shorthand for:: - - from sqlalchemy import over - over(func.row_number(), order_by='x') - - See :func:`~.expression.over` for a full description. - - .. versionadded:: 0.7 - - """ - return Over(self, partition_by=partition_by, order_by=order_by) - - def filter(self, *criterion): - """Produce a FILTER clause against this function. - - Used against aggregate and window functions, - for database backends that support the "FILTER" clause. - - The expression:: - - func.count(1).filter(True) - - is shorthand for:: - - from sqlalchemy import funcfilter - funcfilter(func.count(1), True) - - .. versionadded:: 1.0.0 - - .. seealso:: - - :class:`.FunctionFilter` - - :func:`.funcfilter` - - - """ - if not criterion: - return self - return FunctionFilter(self, *criterion) - - @property - def _from_objects(self): - return self.clauses._from_objects - - def get_children(self, **kwargs): - return self.clause_expr, - - def _copy_internals(self, clone=_clone, **kw): - self.clause_expr = clone(self.clause_expr, **kw) - self._reset_exported() - FunctionElement.clauses._reset(self) - - def alias(self, name=None, flat=False): - """Produce a :class:`.Alias` construct against this - :class:`.FunctionElement`. - - This construct wraps the function in a named alias which - is suitable for the FROM clause. - - e.g.:: - - from sqlalchemy.sql import column - - stmt = select([column('data')]).select_from( - func.unnest(Table.data).alias('data_view') - ) - - Would produce: - - .. sourcecode:: sql - - SELECT data - FROM unnest(sometable.data) AS data_view - - .. versionadded:: 0.9.8 The :meth:`.FunctionElement.alias` method - is now supported. Previously, this method's behavior was - undefined and did not behave consistently across versions. - - """ - - return Alias(self, name) - - def select(self): - """Produce a :func:`~.expression.select` construct - against this :class:`.FunctionElement`. - - This is shorthand for:: - - s = select([function_element]) - - """ - s = Select([self]) - if self._execution_options: - s = s.execution_options(**self._execution_options) - return s - - def scalar(self): - """Execute this :class:`.FunctionElement` against an embedded - 'bind' and return a scalar value. - - This first calls :meth:`~.FunctionElement.select` to - produce a SELECT construct. - - Note that :class:`.FunctionElement` can be passed to - the :meth:`.Connectable.scalar` method of :class:`.Connection` - or :class:`.Engine`. - - """ - return self.select().execute().scalar() - - def execute(self): - """Execute this :class:`.FunctionElement` against an embedded - 'bind'. - - This first calls :meth:`~.FunctionElement.select` to - produce a SELECT construct. - - Note that :class:`.FunctionElement` can be passed to - the :meth:`.Connectable.execute` method of :class:`.Connection` - or :class:`.Engine`. - - """ - return self.select().execute() - - def _bind_param(self, operator, obj): - return BindParameter(None, obj, _compared_to_operator=operator, - _compared_to_type=self.type, unique=True) - - -class _FunctionGenerator(object): - """Generate :class:`.Function` objects based on getattr calls.""" - - def __init__(self, **opts): - self.__names = [] - self.opts = opts - - def __getattr__(self, name): - # passthru __ attributes; fixes pydoc - if name.startswith('__'): - try: - return self.__dict__[name] - except KeyError: - raise AttributeError(name) - - elif name.endswith('_'): - name = name[0:-1] - f = _FunctionGenerator(**self.opts) - f.__names = list(self.__names) + [name] - return f - - def __call__(self, *c, **kwargs): - o = self.opts.copy() - o.update(kwargs) - - tokens = len(self.__names) - - if tokens == 2: - package, fname = self.__names - elif tokens == 1: - package, fname = "_default", self.__names[0] - else: - package = None - - if package is not None: - func = _registry[package].get(fname) - if func is not None: - return func(*c, **o) - - return Function(self.__names[-1], - packagenames=self.__names[0:-1], *c, **o) - - -func = _FunctionGenerator() -"""Generate SQL function expressions. - - :data:`.func` is a special object instance which generates SQL - functions based on name-based attributes, e.g.:: - - >>> print func.count(1) - count(:param_1) - - The element is a column-oriented SQL element like any other, and is - used in that way:: - - >>> print select([func.count(table.c.id)]) - SELECT count(sometable.id) FROM sometable - - Any name can be given to :data:`.func`. If the function name is unknown to - SQLAlchemy, it will be rendered exactly as is. For common SQL functions - which SQLAlchemy is aware of, the name may be interpreted as a *generic - function* which will be compiled appropriately to the target database:: - - >>> print func.current_timestamp() - CURRENT_TIMESTAMP - - To call functions which are present in dot-separated packages, - specify them in the same manner:: - - >>> print func.stats.yield_curve(5, 10) - stats.yield_curve(:yield_curve_1, :yield_curve_2) - - SQLAlchemy can be made aware of the return type of functions to enable - type-specific lexical and result-based behavior. For example, to ensure - that a string-based function returns a Unicode value and is similarly - treated as a string in expressions, specify - :class:`~sqlalchemy.types.Unicode` as the type: - - >>> print func.my_string(u'hi', type_=Unicode) + ' ' + \ - ... func.my_string(u'there', type_=Unicode) - my_string(:my_string_1) || :my_string_2 || my_string(:my_string_3) - - The object returned by a :data:`.func` call is usually an instance of - :class:`.Function`. - This object meets the "column" interface, including comparison and labeling - functions. The object can also be passed the :meth:`~.Connectable.execute` - method of a :class:`.Connection` or :class:`.Engine`, where it will be - wrapped inside of a SELECT statement first:: - - print connection.execute(func.current_timestamp()).scalar() - - In a few exception cases, the :data:`.func` accessor - will redirect a name to a built-in expression such as :func:`.cast` - or :func:`.extract`, as these names have well-known meaning - but are not exactly the same as "functions" from a SQLAlchemy - perspective. - - .. versionadded:: 0.8 :data:`.func` can return non-function expression - constructs for common quasi-functional names like :func:`.cast` - and :func:`.extract`. - - Functions which are interpreted as "generic" functions know how to - calculate their return type automatically. For a listing of known generic - functions, see :ref:`generic_functions`. - - .. note:: - - The :data:`.func` construct has only limited support for calling - standalone "stored procedures", especially those with special - parameterization concerns. - - See the section :ref:`stored_procedures` for details on how to use - the DBAPI-level ``callproc()`` method for fully traditional stored - procedures. - -""" - -modifier = _FunctionGenerator(group=False) - - -class Function(FunctionElement): - """Describe a named SQL function. - - See the superclass :class:`.FunctionElement` for a description - of public methods. - - .. seealso:: - - :data:`.func` - namespace which produces registered or ad-hoc - :class:`.Function` instances. - - :class:`.GenericFunction` - allows creation of registered function - types. - - """ - - __visit_name__ = 'function' - - def __init__(self, name, *clauses, **kw): - """Construct a :class:`.Function`. - - The :data:`.func` construct is normally used to construct - new :class:`.Function` instances. - - """ - self.packagenames = kw.pop('packagenames', None) or [] - self.name = name - self._bind = kw.get('bind', None) - self.type = sqltypes.to_instance(kw.get('type_', None)) - - FunctionElement.__init__(self, *clauses, **kw) - - def _bind_param(self, operator, obj): - return BindParameter(self.name, obj, - _compared_to_operator=operator, - _compared_to_type=self.type, - unique=True) - - -class _GenericMeta(VisitableType): - def __init__(cls, clsname, bases, clsdict): - if annotation.Annotated not in cls.__mro__: - cls.name = name = clsdict.get('name', clsname) - cls.identifier = identifier = clsdict.get('identifier', name) - package = clsdict.pop('package', '_default') - # legacy - if '__return_type__' in clsdict: - cls.type = clsdict['__return_type__'] - register_function(identifier, cls, package) - super(_GenericMeta, cls).__init__(clsname, bases, clsdict) - - -class GenericFunction(util.with_metaclass(_GenericMeta, Function)): - """Define a 'generic' function. - - A generic function is a pre-established :class:`.Function` - class that is instantiated automatically when called - by name from the :data:`.func` attribute. Note that - calling any name from :data:`.func` has the effect that - a new :class:`.Function` instance is created automatically, - given that name. The primary use case for defining - a :class:`.GenericFunction` class is so that a function - of a particular name may be given a fixed return type. - It can also include custom argument parsing schemes as well - as additional methods. - - Subclasses of :class:`.GenericFunction` are automatically - registered under the name of the class. For - example, a user-defined function ``as_utc()`` would - be available immediately:: - - from sqlalchemy.sql.functions import GenericFunction - from sqlalchemy.types import DateTime - - class as_utc(GenericFunction): - type = DateTime - - print select([func.as_utc()]) - - User-defined generic functions can be organized into - packages by specifying the "package" attribute when defining - :class:`.GenericFunction`. Third party libraries - containing many functions may want to use this in order - to avoid name conflicts with other systems. For example, - if our ``as_utc()`` function were part of a package - "time":: - - class as_utc(GenericFunction): - type = DateTime - package = "time" - - The above function would be available from :data:`.func` - using the package name ``time``:: - - print select([func.time.as_utc()]) - - A final option is to allow the function to be accessed - from one name in :data:`.func` but to render as a different name. - The ``identifier`` attribute will override the name used to - access the function as loaded from :data:`.func`, but will retain - the usage of ``name`` as the rendered name:: - - class GeoBuffer(GenericFunction): - type = Geometry - package = "geo" - name = "ST_Buffer" - identifier = "buffer" - - The above function will render as follows:: - - >>> print func.geo.buffer() - ST_Buffer() - - .. versionadded:: 0.8 :class:`.GenericFunction` now supports - automatic registration of new functions as well as package - and custom naming support. - - .. versionchanged:: 0.8 The attribute name ``type`` is used - to specify the function's return type at the class level. - Previously, the name ``__return_type__`` was used. This - name is still recognized for backwards-compatibility. - - """ - - coerce_arguments = True - - def __init__(self, *args, **kwargs): - parsed_args = kwargs.pop('_parsed_args', None) - if parsed_args is None: - parsed_args = [_literal_as_binds(c) for c in args] - self.packagenames = [] - self._bind = kwargs.get('bind', None) - self.clause_expr = ClauseList( - operator=operators.comma_op, - group_contents=True, *parsed_args).self_group() - self.type = sqltypes.to_instance( - kwargs.pop("type_", None) or getattr(self, 'type', None)) - -register_function("cast", Cast) -register_function("extract", Extract) - - -class next_value(GenericFunction): - """Represent the 'next value', given a :class:`.Sequence` - as its single argument. - - Compiles into the appropriate function on each backend, - or will raise NotImplementedError if used on a backend - that does not provide support for sequences. - - """ - type = sqltypes.Integer() - name = "next_value" - - def __init__(self, seq, **kw): - assert isinstance(seq, schema.Sequence), \ - "next_value() accepts a Sequence object as input." - self._bind = kw.get('bind', None) - self.sequence = seq - - @property - def _from_objects(self): - return [] - - -class AnsiFunction(GenericFunction): - def __init__(self, **kwargs): - GenericFunction.__init__(self, **kwargs) - - -class ReturnTypeFromArgs(GenericFunction): - """Define a function whose return type is the same as its arguments.""" - - def __init__(self, *args, **kwargs): - args = [_literal_as_binds(c) for c in args] - kwargs.setdefault('type_', _type_from_args(args)) - kwargs['_parsed_args'] = args - GenericFunction.__init__(self, *args, **kwargs) - - -class coalesce(ReturnTypeFromArgs): - pass - - -class max(ReturnTypeFromArgs): - pass - - -class min(ReturnTypeFromArgs): - pass - - -class sum(ReturnTypeFromArgs): - pass - - -class now(GenericFunction): - type = sqltypes.DateTime - - -class concat(GenericFunction): - type = sqltypes.String - - -class char_length(GenericFunction): - type = sqltypes.Integer - - def __init__(self, arg, **kwargs): - GenericFunction.__init__(self, arg, **kwargs) - - -class random(GenericFunction): - pass - - -class count(GenericFunction): - """The ANSI COUNT aggregate function. With no arguments, - emits COUNT \*. - - """ - type = sqltypes.Integer - - def __init__(self, expression=None, **kwargs): - if expression is None: - expression = literal_column('*') - GenericFunction.__init__(self, expression, **kwargs) - - -class current_date(AnsiFunction): - type = sqltypes.Date - - -class current_time(AnsiFunction): - type = sqltypes.Time - - -class current_timestamp(AnsiFunction): - type = sqltypes.DateTime - - -class current_user(AnsiFunction): - type = sqltypes.String - - -class localtime(AnsiFunction): - type = sqltypes.DateTime - - -class localtimestamp(AnsiFunction): - type = sqltypes.DateTime - - -class session_user(AnsiFunction): - type = sqltypes.String - - -class sysdate(AnsiFunction): - type = sqltypes.DateTime - - -class user(AnsiFunction): - type = sqltypes.String diff --git a/python/sqlalchemy/sql/naming.py b/python/sqlalchemy/sql/naming.py deleted file mode 100644 index bc13835e..00000000 --- a/python/sqlalchemy/sql/naming.py +++ /dev/null @@ -1,146 +0,0 @@ -# sqlalchemy/naming.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Establish constraint and index naming conventions. - - -""" - -from .schema import Constraint, ForeignKeyConstraint, PrimaryKeyConstraint, \ - UniqueConstraint, CheckConstraint, Index, Table, Column -from .. import event, events -from .. import exc -from .elements import _truncated_label, _defer_name, _defer_none_name, conv -import re - - -class ConventionDict(object): - - def __init__(self, const, table, convention): - self.const = const - self._is_fk = isinstance(const, ForeignKeyConstraint) - self.table = table - self.convention = convention - self._const_name = const.name - - def _key_table_name(self): - return self.table.name - - def _column_X(self, idx): - if self._is_fk: - fk = self.const.elements[idx] - return fk.parent - else: - return list(self.const.columns)[idx] - - def _key_constraint_name(self): - if isinstance(self._const_name, (type(None), _defer_none_name)): - raise exc.InvalidRequestError( - "Naming convention including " - "%(constraint_name)s token requires that " - "constraint is explicitly named." - ) - if not isinstance(self._const_name, conv): - self.const.name = None - return self._const_name - - def _key_column_X_name(self, idx): - return self._column_X(idx).name - - def _key_column_X_label(self, idx): - return self._column_X(idx)._label - - def _key_referred_table_name(self): - fk = self.const.elements[0] - refs = fk.target_fullname.split(".") - if len(refs) == 3: - refschema, reftable, refcol = refs - else: - reftable, refcol = refs - return reftable - - def _key_referred_column_X_name(self, idx): - fk = self.const.elements[idx] - refs = fk.target_fullname.split(".") - if len(refs) == 3: - refschema, reftable, refcol = refs - else: - reftable, refcol = refs - return refcol - - def __getitem__(self, key): - if key in self.convention: - return self.convention[key](self.const, self.table) - elif hasattr(self, '_key_%s' % key): - return getattr(self, '_key_%s' % key)() - else: - col_template = re.match(r".*_?column_(\d+)_.+", key) - if col_template: - idx = col_template.group(1) - attr = "_key_" + key.replace(idx, "X") - idx = int(idx) - if hasattr(self, attr): - return getattr(self, attr)(idx) - raise KeyError(key) - -_prefix_dict = { - Index: "ix", - PrimaryKeyConstraint: "pk", - CheckConstraint: "ck", - UniqueConstraint: "uq", - ForeignKeyConstraint: "fk" -} - - -def _get_convention(dict_, key): - - for super_ in key.__mro__: - if super_ in _prefix_dict and _prefix_dict[super_] in dict_: - return dict_[_prefix_dict[super_]] - elif super_ in dict_: - return dict_[super_] - else: - return None - - -def _constraint_name_for_table(const, table): - metadata = table.metadata - convention = _get_convention(metadata.naming_convention, type(const)) - - if isinstance(const.name, conv): - return const.name - elif convention is not None and \ - not isinstance(const.name, conv) and \ - ( - const.name is None or - "constraint_name" in convention or - isinstance(const.name, _defer_name)): - return conv( - convention % ConventionDict(const, table, - metadata.naming_convention) - ) - elif isinstance(convention, _defer_none_name): - return None - - -@event.listens_for(Constraint, "after_parent_attach") -@event.listens_for(Index, "after_parent_attach") -def _constraint_name(const, table): - if isinstance(table, Column): - # for column-attached constraint, set another event - # to link the column attached to the table as this constraint - # associated with the table. - event.listen(table, "after_parent_attach", - lambda col, table: _constraint_name(const, table) - ) - elif isinstance(table, Table): - if isinstance(const.name, (conv, _defer_name)): - return - - newname = _constraint_name_for_table(const, table) - if newname is not None: - const.name = newname diff --git a/python/sqlalchemy/sql/operators.py b/python/sqlalchemy/sql/operators.py deleted file mode 100644 index 17a9d308..00000000 --- a/python/sqlalchemy/sql/operators.py +++ /dev/null @@ -1,902 +0,0 @@ -# sql/operators.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Defines operators used in SQL expressions.""" - -from .. import util - - -from operator import ( - and_, or_, inv, add, mul, sub, mod, truediv, lt, le, ne, gt, ge, eq, neg, - getitem, lshift, rshift -) - -if util.py2k: - from operator import div -else: - div = truediv - - -class Operators(object): - """Base of comparison and logical operators. - - Implements base methods - :meth:`~sqlalchemy.sql.operators.Operators.operate` and - :meth:`~sqlalchemy.sql.operators.Operators.reverse_operate`, as well as - :meth:`~sqlalchemy.sql.operators.Operators.__and__`, - :meth:`~sqlalchemy.sql.operators.Operators.__or__`, - :meth:`~sqlalchemy.sql.operators.Operators.__invert__`. - - Usually is used via its most common subclass - :class:`.ColumnOperators`. - - """ - __slots__ = () - - def __and__(self, other): - """Implement the ``&`` operator. - - When used with SQL expressions, results in an - AND operation, equivalent to - :func:`~.expression.and_`, that is:: - - a & b - - is equivalent to:: - - from sqlalchemy import and_ - and_(a, b) - - Care should be taken when using ``&`` regarding - operator precedence; the ``&`` operator has the highest precedence. - The operands should be enclosed in parenthesis if they contain - further sub expressions:: - - (a == 2) & (b == 4) - - """ - return self.operate(and_, other) - - def __or__(self, other): - """Implement the ``|`` operator. - - When used with SQL expressions, results in an - OR operation, equivalent to - :func:`~.expression.or_`, that is:: - - a | b - - is equivalent to:: - - from sqlalchemy import or_ - or_(a, b) - - Care should be taken when using ``|`` regarding - operator precedence; the ``|`` operator has the highest precedence. - The operands should be enclosed in parenthesis if they contain - further sub expressions:: - - (a == 2) | (b == 4) - - """ - return self.operate(or_, other) - - def __invert__(self): - """Implement the ``~`` operator. - - When used with SQL expressions, results in a - NOT operation, equivalent to - :func:`~.expression.not_`, that is:: - - ~a - - is equivalent to:: - - from sqlalchemy import not_ - not_(a) - - """ - return self.operate(inv) - - def op(self, opstring, precedence=0, is_comparison=False): - """produce a generic operator function. - - e.g.:: - - somecolumn.op("*")(5) - - produces:: - - somecolumn * 5 - - This function can also be used to make bitwise operators explicit. For - example:: - - somecolumn.op('&')(0xff) - - is a bitwise AND of the value in ``somecolumn``. - - :param operator: a string which will be output as the infix operator - between this element and the expression passed to the - generated function. - - :param precedence: precedence to apply to the operator, when - parenthesizing expressions. A lower number will cause the expression - to be parenthesized when applied against another operator with - higher precedence. The default value of ``0`` is lower than all - operators except for the comma (``,``) and ``AS`` operators. - A value of 100 will be higher or equal to all operators, and -100 - will be lower than or equal to all operators. - - .. versionadded:: 0.8 - added the 'precedence' argument. - - :param is_comparison: if True, the operator will be considered as a - "comparison" operator, that is which evaluates to a boolean - true/false value, like ``==``, ``>``, etc. This flag should be set - so that ORM relationships can establish that the operator is a - comparison operator when used in a custom join condition. - - .. versionadded:: 0.9.2 - added the - :paramref:`.Operators.op.is_comparison` flag. - - .. seealso:: - - :ref:`types_operators` - - :ref:`relationship_custom_operator` - - """ - operator = custom_op(opstring, precedence, is_comparison) - - def against(other): - return operator(self, other) - return against - - def operate(self, op, *other, **kwargs): - """Operate on an argument. - - This is the lowest level of operation, raises - :class:`NotImplementedError` by default. - - Overriding this on a subclass can allow common - behavior to be applied to all operations. - For example, overriding :class:`.ColumnOperators` - to apply ``func.lower()`` to the left and right - side:: - - class MyComparator(ColumnOperators): - def operate(self, op, other): - return op(func.lower(self), func.lower(other)) - - :param op: Operator callable. - :param \*other: the 'other' side of the operation. Will - be a single scalar for most operations. - :param \**kwargs: modifiers. These may be passed by special - operators such as :meth:`ColumnOperators.contains`. - - - """ - raise NotImplementedError(str(op)) - - def reverse_operate(self, op, other, **kwargs): - """Reverse operate on an argument. - - Usage is the same as :meth:`operate`. - - """ - raise NotImplementedError(str(op)) - - -class custom_op(object): - """Represent a 'custom' operator. - - :class:`.custom_op` is normally instantitated when the - :meth:`.ColumnOperators.op` method is used to create a - custom operator callable. The class can also be used directly - when programmatically constructing expressions. E.g. - to represent the "factorial" operation:: - - from sqlalchemy.sql import UnaryExpression - from sqlalchemy.sql import operators - from sqlalchemy import Numeric - - unary = UnaryExpression(table.c.somecolumn, - modifier=operators.custom_op("!"), - type_=Numeric) - - """ - __name__ = 'custom_op' - - def __init__(self, opstring, precedence=0, is_comparison=False): - self.opstring = opstring - self.precedence = precedence - self.is_comparison = is_comparison - - def __eq__(self, other): - return isinstance(other, custom_op) and \ - other.opstring == self.opstring - - def __hash__(self): - return id(self) - - def __call__(self, left, right, **kw): - return left.operate(self, right, **kw) - - -class ColumnOperators(Operators): - """Defines boolean, comparison, and other operators for - :class:`.ColumnElement` expressions. - - By default, all methods call down to - :meth:`.operate` or :meth:`.reverse_operate`, - passing in the appropriate operator function from the - Python builtin ``operator`` module or - a SQLAlchemy-specific operator function from - :mod:`sqlalchemy.expression.operators`. For example - the ``__eq__`` function:: - - def __eq__(self, other): - return self.operate(operators.eq, other) - - Where ``operators.eq`` is essentially:: - - def eq(a, b): - return a == b - - The core column expression unit :class:`.ColumnElement` - overrides :meth:`.Operators.operate` and others - to return further :class:`.ColumnElement` constructs, - so that the ``==`` operation above is replaced by a clause - construct. - - See also: - - :ref:`types_operators` - - :attr:`.TypeEngine.comparator_factory` - - :class:`.ColumnOperators` - - :class:`.PropComparator` - - """ - - __slots__ = () - - timetuple = None - """Hack, allows datetime objects to be compared on the LHS.""" - - def __lt__(self, other): - """Implement the ``<`` operator. - - In a column context, produces the clause ``a < b``. - - """ - return self.operate(lt, other) - - def __le__(self, other): - """Implement the ``<=`` operator. - - In a column context, produces the clause ``a <= b``. - - """ - return self.operate(le, other) - - __hash__ = Operators.__hash__ - - def __eq__(self, other): - """Implement the ``==`` operator. - - In a column context, produces the clause ``a = b``. - If the target is ``None``, produces ``a IS NULL``. - - """ - return self.operate(eq, other) - - def __ne__(self, other): - """Implement the ``!=`` operator. - - In a column context, produces the clause ``a != b``. - If the target is ``None``, produces ``a IS NOT NULL``. - - """ - return self.operate(ne, other) - - def __gt__(self, other): - """Implement the ``>`` operator. - - In a column context, produces the clause ``a > b``. - - """ - return self.operate(gt, other) - - def __ge__(self, other): - """Implement the ``>=`` operator. - - In a column context, produces the clause ``a >= b``. - - """ - return self.operate(ge, other) - - def __neg__(self): - """Implement the ``-`` operator. - - In a column context, produces the clause ``-a``. - - """ - return self.operate(neg) - - def __getitem__(self, index): - """Implement the [] operator. - - This can be used by some database-specific types - such as Postgresql ARRAY and HSTORE. - - """ - return self.operate(getitem, index) - - def __lshift__(self, other): - """implement the << operator. - - Not used by SQLAlchemy core, this is provided - for custom operator systems which want to use - << as an extension point. - """ - return self.operate(lshift, other) - - def __rshift__(self, other): - """implement the >> operator. - - Not used by SQLAlchemy core, this is provided - for custom operator systems which want to use - >> as an extension point. - """ - return self.operate(rshift, other) - - def concat(self, other): - """Implement the 'concat' operator. - - In a column context, produces the clause ``a || b``, - or uses the ``concat()`` operator on MySQL. - - """ - return self.operate(concat_op, other) - - def like(self, other, escape=None): - """Implement the ``like`` operator. - - In a column context, produces the clause ``a LIKE other``. - - E.g.:: - - select([sometable]).where(sometable.c.column.like("%foobar%")) - - :param other: expression to be compared - :param escape: optional escape character, renders the ``ESCAPE`` - keyword, e.g.:: - - somecolumn.like("foo/%bar", escape="/") - - .. seealso:: - - :meth:`.ColumnOperators.ilike` - - """ - return self.operate(like_op, other, escape=escape) - - def ilike(self, other, escape=None): - """Implement the ``ilike`` operator. - - In a column context, produces the clause ``a ILIKE other``. - - E.g.:: - - select([sometable]).where(sometable.c.column.ilike("%foobar%")) - - :param other: expression to be compared - :param escape: optional escape character, renders the ``ESCAPE`` - keyword, e.g.:: - - somecolumn.ilike("foo/%bar", escape="/") - - .. seealso:: - - :meth:`.ColumnOperators.like` - - """ - return self.operate(ilike_op, other, escape=escape) - - def in_(self, other): - """Implement the ``in`` operator. - - In a column context, produces the clause ``a IN other``. - "other" may be a tuple/list of column expressions, - or a :func:`~.expression.select` construct. - - """ - return self.operate(in_op, other) - - def notin_(self, other): - """implement the ``NOT IN`` operator. - - This is equivalent to using negation with - :meth:`.ColumnOperators.in_`, i.e. ``~x.in_(y)``. - - .. versionadded:: 0.8 - - .. seealso:: - - :meth:`.ColumnOperators.in_` - - """ - return self.operate(notin_op, other) - - def notlike(self, other, escape=None): - """implement the ``NOT LIKE`` operator. - - This is equivalent to using negation with - :meth:`.ColumnOperators.like`, i.e. ``~x.like(y)``. - - .. versionadded:: 0.8 - - .. seealso:: - - :meth:`.ColumnOperators.like` - - """ - return self.operate(notlike_op, other, escape=escape) - - def notilike(self, other, escape=None): - """implement the ``NOT ILIKE`` operator. - - This is equivalent to using negation with - :meth:`.ColumnOperators.ilike`, i.e. ``~x.ilike(y)``. - - .. versionadded:: 0.8 - - .. seealso:: - - :meth:`.ColumnOperators.ilike` - - """ - return self.operate(notilike_op, other, escape=escape) - - def is_(self, other): - """Implement the ``IS`` operator. - - Normally, ``IS`` is generated automatically when comparing to a - value of ``None``, which resolves to ``NULL``. However, explicit - usage of ``IS`` may be desirable if comparing to boolean values - on certain platforms. - - .. versionadded:: 0.7.9 - - .. seealso:: :meth:`.ColumnOperators.isnot` - - """ - return self.operate(is_, other) - - def isnot(self, other): - """Implement the ``IS NOT`` operator. - - Normally, ``IS NOT`` is generated automatically when comparing to a - value of ``None``, which resolves to ``NULL``. However, explicit - usage of ``IS NOT`` may be desirable if comparing to boolean values - on certain platforms. - - .. versionadded:: 0.7.9 - - .. seealso:: :meth:`.ColumnOperators.is_` - - """ - return self.operate(isnot, other) - - def startswith(self, other, **kwargs): - """Implement the ``startwith`` operator. - - In a column context, produces the clause ``LIKE '%'`` - - """ - return self.operate(startswith_op, other, **kwargs) - - def endswith(self, other, **kwargs): - """Implement the 'endswith' operator. - - In a column context, produces the clause ``LIKE '%'`` - - """ - return self.operate(endswith_op, other, **kwargs) - - def contains(self, other, **kwargs): - """Implement the 'contains' operator. - - In a column context, produces the clause ``LIKE '%%'`` - - """ - return self.operate(contains_op, other, **kwargs) - - def match(self, other, **kwargs): - """Implements a database-specific 'match' operator. - - :meth:`~.ColumnOperators.match` attempts to resolve to - a MATCH-like function or operator provided by the backend. - Examples include: - - * Postgresql - renders ``x @@ to_tsquery(y)`` - * MySQL - renders ``MATCH (x) AGAINST (y IN BOOLEAN MODE)`` - * Oracle - renders ``CONTAINS(x, y)`` - * other backends may provide special implementations. - * Backends without any special implementation will emit - the operator as "MATCH". This is compatible with SQlite, for - example. - - """ - return self.operate(match_op, other, **kwargs) - - def desc(self): - """Produce a :func:`~.expression.desc` clause against the - parent object.""" - return self.operate(desc_op) - - def asc(self): - """Produce a :func:`~.expression.asc` clause against the - parent object.""" - return self.operate(asc_op) - - def nullsfirst(self): - """Produce a :func:`~.expression.nullsfirst` clause against the - parent object.""" - return self.operate(nullsfirst_op) - - def nullslast(self): - """Produce a :func:`~.expression.nullslast` clause against the - parent object.""" - return self.operate(nullslast_op) - - def collate(self, collation): - """Produce a :func:`~.expression.collate` clause against - the parent object, given the collation string.""" - return self.operate(collate, collation) - - def __radd__(self, other): - """Implement the ``+`` operator in reverse. - - See :meth:`.ColumnOperators.__add__`. - - """ - return self.reverse_operate(add, other) - - def __rsub__(self, other): - """Implement the ``-`` operator in reverse. - - See :meth:`.ColumnOperators.__sub__`. - - """ - return self.reverse_operate(sub, other) - - def __rmul__(self, other): - """Implement the ``*`` operator in reverse. - - See :meth:`.ColumnOperators.__mul__`. - - """ - return self.reverse_operate(mul, other) - - def __rdiv__(self, other): - """Implement the ``/`` operator in reverse. - - See :meth:`.ColumnOperators.__div__`. - - """ - return self.reverse_operate(div, other) - - def __rmod__(self, other): - """Implement the ``%`` operator in reverse. - - See :meth:`.ColumnOperators.__mod__`. - - """ - return self.reverse_operate(mod, other) - - def between(self, cleft, cright, symmetric=False): - """Produce a :func:`~.expression.between` clause against - the parent object, given the lower and upper range. - - """ - return self.operate(between_op, cleft, cright, symmetric=symmetric) - - def distinct(self): - """Produce a :func:`~.expression.distinct` clause against the - parent object. - - """ - return self.operate(distinct_op) - - def __add__(self, other): - """Implement the ``+`` operator. - - In a column context, produces the clause ``a + b`` - if the parent object has non-string affinity. - If the parent object has a string affinity, - produces the concatenation operator, ``a || b`` - - see :meth:`.ColumnOperators.concat`. - - """ - return self.operate(add, other) - - def __sub__(self, other): - """Implement the ``-`` operator. - - In a column context, produces the clause ``a - b``. - - """ - return self.operate(sub, other) - - def __mul__(self, other): - """Implement the ``*`` operator. - - In a column context, produces the clause ``a * b``. - - """ - return self.operate(mul, other) - - def __div__(self, other): - """Implement the ``/`` operator. - - In a column context, produces the clause ``a / b``. - - """ - return self.operate(div, other) - - def __mod__(self, other): - """Implement the ``%`` operator. - - In a column context, produces the clause ``a % b``. - - """ - return self.operate(mod, other) - - def __truediv__(self, other): - """Implement the ``//`` operator. - - In a column context, produces the clause ``a / b``. - - """ - return self.operate(truediv, other) - - def __rtruediv__(self, other): - """Implement the ``//`` operator in reverse. - - See :meth:`.ColumnOperators.__truediv__`. - - """ - return self.reverse_operate(truediv, other) - - -def from_(): - raise NotImplementedError() - - -def as_(): - raise NotImplementedError() - - -def exists(): - raise NotImplementedError() - - -def istrue(a): - raise NotImplementedError() - - -def isfalse(a): - raise NotImplementedError() - - -def is_(a, b): - return a.is_(b) - - -def isnot(a, b): - return a.isnot(b) - - -def collate(a, b): - return a.collate(b) - - -def op(a, opstring, b): - return a.op(opstring)(b) - - -def like_op(a, b, escape=None): - return a.like(b, escape=escape) - - -def notlike_op(a, b, escape=None): - return a.notlike(b, escape=escape) - - -def ilike_op(a, b, escape=None): - return a.ilike(b, escape=escape) - - -def notilike_op(a, b, escape=None): - return a.notilike(b, escape=escape) - - -def between_op(a, b, c, symmetric=False): - return a.between(b, c, symmetric=symmetric) - - -def notbetween_op(a, b, c, symmetric=False): - return a.notbetween(b, c, symmetric=symmetric) - - -def in_op(a, b): - return a.in_(b) - - -def notin_op(a, b): - return a.notin_(b) - - -def distinct_op(a): - return a.distinct() - - -def startswith_op(a, b, escape=None): - return a.startswith(b, escape=escape) - - -def notstartswith_op(a, b, escape=None): - return ~a.startswith(b, escape=escape) - - -def endswith_op(a, b, escape=None): - return a.endswith(b, escape=escape) - - -def notendswith_op(a, b, escape=None): - return ~a.endswith(b, escape=escape) - - -def contains_op(a, b, escape=None): - return a.contains(b, escape=escape) - - -def notcontains_op(a, b, escape=None): - return ~a.contains(b, escape=escape) - - -def match_op(a, b, **kw): - return a.match(b, **kw) - - -def notmatch_op(a, b, **kw): - return a.notmatch(b, **kw) - - -def comma_op(a, b): - raise NotImplementedError() - - -def concat_op(a, b): - return a.concat(b) - - -def desc_op(a): - return a.desc() - - -def asc_op(a): - return a.asc() - - -def nullsfirst_op(a): - return a.nullsfirst() - - -def nullslast_op(a): - return a.nullslast() - - -_commutative = set([eq, ne, add, mul]) - -_comparison = set([eq, ne, lt, gt, ge, le, between_op, like_op]) - - -def is_comparison(op): - return op in _comparison or \ - isinstance(op, custom_op) and op.is_comparison - - -def is_commutative(op): - return op in _commutative - - -def is_ordering_modifier(op): - return op in (asc_op, desc_op, - nullsfirst_op, nullslast_op) - -_associative = _commutative.union([concat_op, and_, or_]) - -_natural_self_precedent = _associative.union([getitem]) -"""Operators where if we have (a op b) op c, we don't want to -parenthesize (a op b). - -""" - -_asbool = util.symbol('_asbool', canonical=-10) -_smallest = util.symbol('_smallest', canonical=-100) -_largest = util.symbol('_largest', canonical=100) - -_PRECEDENCE = { - from_: 15, - getitem: 15, - mul: 8, - truediv: 8, - div: 8, - mod: 8, - neg: 8, - add: 7, - sub: 7, - - concat_op: 6, - match_op: 6, - notmatch_op: 6, - - ilike_op: 6, - notilike_op: 6, - like_op: 6, - notlike_op: 6, - in_op: 6, - notin_op: 6, - - is_: 6, - isnot: 6, - - eq: 5, - ne: 5, - gt: 5, - lt: 5, - ge: 5, - le: 5, - - between_op: 5, - notbetween_op: 5, - distinct_op: 5, - inv: 5, - istrue: 5, - isfalse: 5, - and_: 3, - or_: 2, - comma_op: -1, - - desc_op: 3, - asc_op: 3, - collate: 4, - - as_: -1, - exists: 0, - _asbool: -10, - _smallest: _smallest, - _largest: _largest -} - - -def is_precedent(operator, against): - if operator is against and operator in _natural_self_precedent: - return False - else: - return (_PRECEDENCE.get(operator, - getattr(operator, 'precedence', _smallest)) <= - _PRECEDENCE.get(against, - getattr(against, 'precedence', _largest))) diff --git a/python/sqlalchemy/sql/schema.py b/python/sqlalchemy/sql/schema.py deleted file mode 100644 index 13720858..00000000 --- a/python/sqlalchemy/sql/schema.py +++ /dev/null @@ -1,3779 +0,0 @@ -# sql/schema.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""The schema module provides the building blocks for database metadata. - -Each element within this module describes a database entity which can be -created and dropped, or is otherwise part of such an entity. Examples include -tables, columns, sequences, and indexes. - -All entities are subclasses of :class:`~sqlalchemy.schema.SchemaItem`, and as -defined in this module they are intended to be agnostic of any vendor-specific -constructs. - -A collection of entities are grouped into a unit called -:class:`~sqlalchemy.schema.MetaData`. MetaData serves as a logical grouping of -schema elements, and can also be associated with an actual database connection -such that operations involving the contained elements can contact the database -as needed. - -Two of the elements here also build upon their "syntactic" counterparts, which -are defined in :class:`~sqlalchemy.sql.expression.`, specifically -:class:`~sqlalchemy.schema.Table` and :class:`~sqlalchemy.schema.Column`. -Since these objects are part of the SQL expression language, they are usable -as components in SQL expressions. - -""" -from __future__ import absolute_import - -import inspect -from .. import exc, util, event, inspection -from .base import SchemaEventTarget, DialectKWArgs -from . import visitors -from . import type_api -from .base import _bind_or_error, ColumnCollection -from .elements import ClauseElement, ColumnClause, _truncated_label, \ - _as_truncated, TextClause, _literal_as_text,\ - ColumnElement, _find_columns, quoted_name -from .selectable import TableClause -import collections -import sqlalchemy -from . import ddl -import types - -RETAIN_SCHEMA = util.symbol('retain_schema') - - -def _get_table_key(name, schema): - if schema is None: - return name - else: - return schema + "." + name - - -@inspection._self_inspects -class SchemaItem(SchemaEventTarget, visitors.Visitable): - """Base class for items that define a database schema.""" - - __visit_name__ = 'schema_item' - - def _execute_on_connection(self, connection, multiparams, params): - return connection._execute_default(self, multiparams, params) - - def _init_items(self, *args): - """Initialize the list of child items for this SchemaItem.""" - - for item in args: - if item is not None: - item._set_parent_with_dispatch(self) - - def get_children(self, **kwargs): - """used to allow SchemaVisitor access""" - return [] - - def __repr__(self): - return util.generic_repr(self, omit_kwarg=['info']) - - @property - @util.deprecated('0.9', 'Use ``.name.quote``') - def quote(self): - """Return the value of the ``quote`` flag passed - to this schema object, for those schema items which - have a ``name`` field. - - """ - - return self.name.quote - - @util.memoized_property - def info(self): - """Info dictionary associated with the object, allowing user-defined - data to be associated with this :class:`.SchemaItem`. - - The dictionary is automatically generated when first accessed. - It can also be specified in the constructor of some objects, - such as :class:`.Table` and :class:`.Column`. - - """ - return {} - - def _schema_item_copy(self, schema_item): - if 'info' in self.__dict__: - schema_item.info = self.info.copy() - schema_item.dispatch._update(self.dispatch) - return schema_item - - -class Table(DialectKWArgs, SchemaItem, TableClause): - """Represent a table in a database. - - e.g.:: - - mytable = Table("mytable", metadata, - Column('mytable_id', Integer, primary_key=True), - Column('value', String(50)) - ) - - The :class:`.Table` object constructs a unique instance of itself based - on its name and optional schema name within the given - :class:`.MetaData` object. Calling the :class:`.Table` - constructor with the same name and same :class:`.MetaData` argument - a second time will return the *same* :class:`.Table` object - in this way - the :class:`.Table` constructor acts as a registry function. - - .. seealso:: - - :ref:`metadata_describing` - Introduction to database metadata - - Constructor arguments are as follows: - - :param name: The name of this table as represented in the database. - - The table name, along with the value of the ``schema`` parameter, - forms a key which uniquely identifies this :class:`.Table` within - the owning :class:`.MetaData` collection. - Additional calls to :class:`.Table` with the same name, metadata, - and schema name will return the same :class:`.Table` object. - - Names which contain no upper case characters - will be treated as case insensitive names, and will not be quoted - unless they are a reserved word or contain special characters. - A name with any number of upper case characters is considered - to be case sensitive, and will be sent as quoted. - - To enable unconditional quoting for the table name, specify the flag - ``quote=True`` to the constructor, or use the :class:`.quoted_name` - construct to specify the name. - - :param metadata: a :class:`.MetaData` object which will contain this - table. The metadata is used as a point of association of this table - with other tables which are referenced via foreign key. It also - may be used to associate this table with a particular - :class:`.Connectable`. - - :param \*args: Additional positional arguments are used primarily - to add the list of :class:`.Column` objects contained within this - table. Similar to the style of a CREATE TABLE statement, other - :class:`.SchemaItem` constructs may be added here, including - :class:`.PrimaryKeyConstraint`, and :class:`.ForeignKeyConstraint`. - - :param autoload: Defaults to False, unless :paramref:`.Table.autoload_with` - is set in which case it defaults to True; :class:`.Column` objects - for this table should be reflected from the database, possibly - augmenting or replacing existing :class:`.Column` objects that were - expicitly specified. - - .. versionchanged:: 1.0.0 setting the :paramref:`.Table.autoload_with` - parameter implies that :paramref:`.Table.autoload` will default - to True. - - .. seealso:: - - :ref:`metadata_reflection_toplevel` - - :param autoload_replace: Defaults to ``True``; when using - :paramref:`.Table.autoload` - in conjunction with :paramref:`.Table.extend_existing`, indicates - that :class:`.Column` objects present in the already-existing - :class:`.Table` object should be replaced with columns of the same - name retrieved from the autoload process. When ``False``, columns - already present under existing names will be omitted from the - reflection process. - - Note that this setting does not impact :class:`.Column` objects - specified programmatically within the call to :class:`.Table` that - also is autoloading; those :class:`.Column` objects will always - replace existing columns of the same name when - :paramref:`.Table.extend_existing` is ``True``. - - .. versionadded:: 0.7.5 - - .. seealso:: - - :paramref:`.Table.autoload` - - :paramref:`.Table.extend_existing` - - :param autoload_with: An :class:`.Engine` or :class:`.Connection` object - with which this :class:`.Table` object will be reflected; when - set to a non-None value, it implies that :paramref:`.Table.autoload` - is ``True``. If left unset, but :paramref:`.Table.autoload` is - explicitly set to ``True``, an autoload operation will attempt to - proceed by locating an :class:`.Engine` or :class:`.Connection` bound - to the underlying :class:`.MetaData` object. - - .. seealso:: - - :paramref:`.Table.autoload` - - :param extend_existing: When ``True``, indicates that if this - :class:`.Table` is already present in the given :class:`.MetaData`, - apply further arguments within the constructor to the existing - :class:`.Table`. - - If :paramref:`.Table.extend_existing` or - :paramref:`.Table.keep_existing` are not set, and the given name - of the new :class:`.Table` refers to a :class:`.Table` that is - already present in the target :class:`.MetaData` collection, and - this :class:`.Table` specifies additional columns or other constructs - or flags that modify the table's state, an - error is raised. The purpose of these two mutually-exclusive flags - is to specify what action should be taken when a :class:`.Table` - is specified that matches an existing :class:`.Table`, yet specifies - additional constructs. - - :paramref:`.Table.extend_existing` will also work in conjunction - with :paramref:`.Table.autoload` to run a new reflection - operation against the database, even if a :class:`.Table` - of the same name is already present in the target - :class:`.MetaData`; newly reflected :class:`.Column` objects - and other options will be added into the state of the - :class:`.Table`, potentially overwriting existing columns - and options of the same name. - - .. versionchanged:: 0.7.4 :paramref:`.Table.extend_existing` will - invoke a new reflection operation when combined with - :paramref:`.Table.autoload` set to True. - - As is always the case with :paramref:`.Table.autoload`, - :class:`.Column` objects can be specified in the same :class:`.Table` - constructor, which will take precedence. Below, the existing - table ``mytable`` will be augmented with :class:`.Column` objects - both reflected from the database, as well as the given :class:`.Column` - named "y":: - - Table("mytable", metadata, - Column('y', Integer), - extend_existing=True, - autoload=True, - autoload_with=engine - ) - - .. seealso:: - - :paramref:`.Table.autoload` - - :paramref:`.Table.autoload_replace` - - :paramref:`.Table.keep_existing` - - - :param implicit_returning: True by default - indicates that - RETURNING can be used by default to fetch newly inserted primary key - values, for backends which support this. Note that - create_engine() also provides an implicit_returning flag. - - :param include_columns: A list of strings indicating a subset of - columns to be loaded via the ``autoload`` operation; table columns who - aren't present in this list will not be represented on the resulting - ``Table`` object. Defaults to ``None`` which indicates all columns - should be reflected. - - :param info: Optional data dictionary which will be populated into the - :attr:`.SchemaItem.info` attribute of this object. - - :param keep_existing: When ``True``, indicates that if this Table - is already present in the given :class:`.MetaData`, ignore - further arguments within the constructor to the existing - :class:`.Table`, and return the :class:`.Table` object as - originally created. This is to allow a function that wishes - to define a new :class:`.Table` on first call, but on - subsequent calls will return the same :class:`.Table`, - without any of the declarations (particularly constraints) - being applied a second time. - - If :paramref:`.Table.extend_existing` or - :paramref:`.Table.keep_existing` are not set, and the given name - of the new :class:`.Table` refers to a :class:`.Table` that is - already present in the target :class:`.MetaData` collection, and - this :class:`.Table` specifies additional columns or other constructs - or flags that modify the table's state, an - error is raised. The purpose of these two mutually-exclusive flags - is to specify what action should be taken when a :class:`.Table` - is specified that matches an existing :class:`.Table`, yet specifies - additional constructs. - - .. seealso:: - - :paramref:`.Table.extend_existing` - - :param listeners: A list of tuples of the form ``(, )`` - which will be passed to :func:`.event.listen` upon construction. - This alternate hook to :func:`.event.listen` allows the establishment - of a listener function specific to this :class:`.Table` before - the "autoload" process begins. Particularly useful for - the :meth:`.DDLEvents.column_reflect` event:: - - def listen_for_reflect(table, column_info): - "handle the column reflection event" - # ... - - t = Table( - 'sometable', - autoload=True, - listeners=[ - ('column_reflect', listen_for_reflect) - ]) - - :param mustexist: When ``True``, indicates that this Table must already - be present in the given :class:`.MetaData` collection, else - an exception is raised. - - :param prefixes: - A list of strings to insert after CREATE in the CREATE TABLE - statement. They will be separated by spaces. - - :param quote: Force quoting of this table's name on or off, corresponding - to ``True`` or ``False``. When left at its default of ``None``, - the column identifier will be quoted according to whether the name is - case sensitive (identifiers with at least one upper case character are - treated as case sensitive), or if it's a reserved word. This flag - is only needed to force quoting of a reserved word which is not known - by the SQLAlchemy dialect. - - :param quote_schema: same as 'quote' but applies to the schema identifier. - - :param schema: The schema name for this table, which is required if - the table resides in a schema other than the default selected schema - for the engine's database connection. Defaults to ``None``. - - The quoting rules for the schema name are the same as those for the - ``name`` parameter, in that quoting is applied for reserved words or - case-sensitive names; to enable unconditional quoting for the - schema name, specify the flag - ``quote_schema=True`` to the constructor, or use the - :class:`.quoted_name` construct to specify the name. - - :param useexisting: Deprecated. Use :paramref:`.Table.extend_existing`. - - :param \**kw: Additional keyword arguments not mentioned above are - dialect specific, and passed in the form ``_``. - See the documentation regarding an individual dialect at - :ref:`dialect_toplevel` for detail on documented arguments. - - """ - - __visit_name__ = 'table' - - def __new__(cls, *args, **kw): - if not args: - # python3k pickle seems to call this - return object.__new__(cls) - - try: - name, metadata, args = args[0], args[1], args[2:] - except IndexError: - raise TypeError("Table() takes at least two arguments") - - schema = kw.get('schema', None) - if schema is None: - schema = metadata.schema - keep_existing = kw.pop('keep_existing', False) - extend_existing = kw.pop('extend_existing', False) - if 'useexisting' in kw: - msg = "useexisting is deprecated. Use extend_existing." - util.warn_deprecated(msg) - if extend_existing: - msg = "useexisting is synonymous with extend_existing." - raise exc.ArgumentError(msg) - extend_existing = kw.pop('useexisting', False) - - if keep_existing and extend_existing: - msg = "keep_existing and extend_existing are mutually exclusive." - raise exc.ArgumentError(msg) - - mustexist = kw.pop('mustexist', False) - key = _get_table_key(name, schema) - if key in metadata.tables: - if not keep_existing and not extend_existing and bool(args): - raise exc.InvalidRequestError( - "Table '%s' is already defined for this MetaData " - "instance. Specify 'extend_existing=True' " - "to redefine " - "options and columns on an " - "existing Table object." % key) - table = metadata.tables[key] - if extend_existing: - table._init_existing(*args, **kw) - return table - else: - if mustexist: - raise exc.InvalidRequestError( - "Table '%s' not defined" % (key)) - table = object.__new__(cls) - table.dispatch.before_parent_attach(table, metadata) - metadata._add_table(name, schema, table) - try: - table._init(name, metadata, *args, **kw) - table.dispatch.after_parent_attach(table, metadata) - return table - except: - with util.safe_reraise(): - metadata._remove_table(name, schema) - - @property - @util.deprecated('0.9', 'Use ``table.schema.quote``') - def quote_schema(self): - """Return the value of the ``quote_schema`` flag passed - to this :class:`.Table`. - """ - - return self.schema.quote - - def __init__(self, *args, **kw): - """Constructor for :class:`~.schema.Table`. - - This method is a no-op. See the top-level - documentation for :class:`~.schema.Table` - for constructor arguments. - - """ - # __init__ is overridden to prevent __new__ from - # calling the superclass constructor. - - def _init(self, name, metadata, *args, **kwargs): - super(Table, self).__init__( - quoted_name(name, kwargs.pop('quote', None))) - self.metadata = metadata - - self.schema = kwargs.pop('schema', None) - if self.schema is None: - self.schema = metadata.schema - else: - quote_schema = kwargs.pop('quote_schema', None) - self.schema = quoted_name(self.schema, quote_schema) - - self.indexes = set() - self.constraints = set() - self._columns = ColumnCollection() - PrimaryKeyConstraint()._set_parent_with_dispatch(self) - self.foreign_keys = set() - self._extra_dependencies = set() - if self.schema is not None: - self.fullname = "%s.%s" % (self.schema, self.name) - else: - self.fullname = self.name - - autoload_with = kwargs.pop('autoload_with', None) - autoload = kwargs.pop('autoload', autoload_with is not None) - # this argument is only used with _init_existing() - kwargs.pop('autoload_replace', True) - include_columns = kwargs.pop('include_columns', None) - - self.implicit_returning = kwargs.pop('implicit_returning', True) - - if 'info' in kwargs: - self.info = kwargs.pop('info') - if 'listeners' in kwargs: - listeners = kwargs.pop('listeners') - for evt, fn in listeners: - event.listen(self, evt, fn) - - self._prefixes = kwargs.pop('prefixes', []) - - self._extra_kwargs(**kwargs) - - # load column definitions from the database if 'autoload' is defined - # we do it after the table is in the singleton dictionary to support - # circular foreign keys - if autoload: - self._autoload(metadata, autoload_with, include_columns) - - # initialize all the column, etc. objects. done after reflection to - # allow user-overrides - self._init_items(*args) - - def _autoload(self, metadata, autoload_with, include_columns, - exclude_columns=()): - - if autoload_with: - autoload_with.run_callable( - autoload_with.dialect.reflecttable, - self, include_columns, exclude_columns - ) - else: - bind = _bind_or_error( - metadata, - msg="No engine is bound to this Table's MetaData. " - "Pass an engine to the Table via " - "autoload_with=, " - "or associate the MetaData with an engine via " - "metadata.bind=") - bind.run_callable( - bind.dialect.reflecttable, - self, include_columns, exclude_columns - ) - - @property - def _sorted_constraints(self): - """Return the set of constraints as a list, sorted by creation - order. - - """ - return sorted(self.constraints, key=lambda c: c._creation_order) - - @property - def foreign_key_constraints(self): - """:class:`.ForeignKeyConstraint` objects referred to by this - :class:`.Table`. - - This list is produced from the collection of :class:`.ForeignKey` - objects currently associated. - - .. versionadded:: 1.0.0 - - """ - return set(fkc.constraint for fkc in self.foreign_keys) - - def _init_existing(self, *args, **kwargs): - autoload_with = kwargs.pop('autoload_with', None) - autoload = kwargs.pop('autoload', autoload_with is not None) - autoload_replace = kwargs.pop('autoload_replace', True) - schema = kwargs.pop('schema', None) - if schema and schema != self.schema: - raise exc.ArgumentError( - "Can't change schema of existing table from '%s' to '%s'", - (self.schema, schema)) - - include_columns = kwargs.pop('include_columns', None) - - if include_columns is not None: - for c in self.c: - if c.name not in include_columns: - self._columns.remove(c) - - for key in ('quote', 'quote_schema'): - if key in kwargs: - raise exc.ArgumentError( - "Can't redefine 'quote' or 'quote_schema' arguments") - - if 'info' in kwargs: - self.info = kwargs.pop('info') - - if autoload: - if not autoload_replace: - exclude_columns = [c.name for c in self.c] - else: - exclude_columns = () - self._autoload( - self.metadata, autoload_with, - include_columns, exclude_columns) - - self._extra_kwargs(**kwargs) - self._init_items(*args) - - def _extra_kwargs(self, **kwargs): - self._validate_dialect_kwargs(kwargs) - - def _init_collections(self): - pass - - @util.memoized_property - def _autoincrement_column(self): - for col in self.primary_key: - if (col.autoincrement and col.type._type_affinity is not None and - issubclass(col.type._type_affinity, - type_api.INTEGERTYPE._type_affinity) and - (not col.foreign_keys or - col.autoincrement == 'ignore_fk') and - isinstance(col.default, (type(None), Sequence)) and - (col.server_default is None or - col.server_default.reflected)): - return col - - @property - def key(self): - """Return the 'key' for this :class:`.Table`. - - This value is used as the dictionary key within the - :attr:`.MetaData.tables` collection. It is typically the same - as that of :attr:`.Table.name` for a table with no - :attr:`.Table.schema` set; otherwise it is typically of the form - ``schemaname.tablename``. - - """ - return _get_table_key(self.name, self.schema) - - def __repr__(self): - return "Table(%s)" % ', '.join( - [repr(self.name)] + [repr(self.metadata)] + - [repr(x) for x in self.columns] + - ["%s=%s" % (k, repr(getattr(self, k))) for k in ['schema']]) - - def __str__(self): - return _get_table_key(self.description, self.schema) - - @property - def bind(self): - """Return the connectable associated with this Table.""" - - return self.metadata and self.metadata.bind or None - - def add_is_dependent_on(self, table): - """Add a 'dependency' for this Table. - - This is another Table object which must be created - first before this one can, or dropped after this one. - - Usually, dependencies between tables are determined via - ForeignKey objects. However, for other situations that - create dependencies outside of foreign keys (rules, inheriting), - this method can manually establish such a link. - - """ - self._extra_dependencies.add(table) - - def append_column(self, column): - """Append a :class:`~.schema.Column` to this :class:`~.schema.Table`. - - The "key" of the newly added :class:`~.schema.Column`, i.e. the - value of its ``.key`` attribute, will then be available - in the ``.c`` collection of this :class:`~.schema.Table`, and the - column definition will be included in any CREATE TABLE, SELECT, - UPDATE, etc. statements generated from this :class:`~.schema.Table` - construct. - - Note that this does **not** change the definition of the table - as it exists within any underlying database, assuming that - table has already been created in the database. Relational - databases support the addition of columns to existing tables - using the SQL ALTER command, which would need to be - emitted for an already-existing table that doesn't contain - the newly added column. - - """ - - column._set_parent_with_dispatch(self) - - def append_constraint(self, constraint): - """Append a :class:`~.schema.Constraint` to this - :class:`~.schema.Table`. - - This has the effect of the constraint being included in any - future CREATE TABLE statement, assuming specific DDL creation - events have not been associated with the given - :class:`~.schema.Constraint` object. - - Note that this does **not** produce the constraint within the - relational database automatically, for a table that already exists - in the database. To add a constraint to an - existing relational database table, the SQL ALTER command must - be used. SQLAlchemy also provides the - :class:`.AddConstraint` construct which can produce this SQL when - invoked as an executable clause. - - """ - - constraint._set_parent_with_dispatch(self) - - def append_ddl_listener(self, event_name, listener): - """Append a DDL event listener to this ``Table``. - - .. deprecated:: 0.7 - See :class:`.DDLEvents`. - - """ - - def adapt_listener(target, connection, **kw): - listener(event_name, target, connection) - - event.listen(self, "" + event_name.replace('-', '_'), adapt_listener) - - def _set_parent(self, metadata): - metadata._add_table(self.name, self.schema, self) - self.metadata = metadata - - def get_children(self, column_collections=True, - schema_visitor=False, **kw): - if not schema_visitor: - return TableClause.get_children( - self, column_collections=column_collections, **kw) - else: - if column_collections: - return list(self.columns) - else: - return [] - - def exists(self, bind=None): - """Return True if this table exists.""" - - if bind is None: - bind = _bind_or_error(self) - - return bind.run_callable(bind.dialect.has_table, - self.name, schema=self.schema) - - def create(self, bind=None, checkfirst=False): - """Issue a ``CREATE`` statement for this - :class:`.Table`, using the given :class:`.Connectable` - for connectivity. - - .. seealso:: - - :meth:`.MetaData.create_all`. - - """ - - if bind is None: - bind = _bind_or_error(self) - bind._run_visitor(ddl.SchemaGenerator, - self, - checkfirst=checkfirst) - - def drop(self, bind=None, checkfirst=False): - """Issue a ``DROP`` statement for this - :class:`.Table`, using the given :class:`.Connectable` - for connectivity. - - .. seealso:: - - :meth:`.MetaData.drop_all`. - - """ - if bind is None: - bind = _bind_or_error(self) - bind._run_visitor(ddl.SchemaDropper, - self, - checkfirst=checkfirst) - - def tometadata(self, metadata, schema=RETAIN_SCHEMA, - referred_schema_fn=None, name=None): - """Return a copy of this :class:`.Table` associated with a different - :class:`.MetaData`. - - E.g.:: - - m1 = MetaData() - - user = Table('user', m1, Column('id', Integer, priamry_key=True)) - - m2 = MetaData() - user_copy = user.tometadata(m2) - - :param metadata: Target :class:`.MetaData` object, into which the - new :class:`.Table` object will be created. - - :param schema: optional string name indicating the target schema. - Defaults to the special symbol :attr:`.RETAIN_SCHEMA` which indicates - that no change to the schema name should be made in the new - :class:`.Table`. If set to a string name, the new :class:`.Table` - will have this new name as the ``.schema``. If set to ``None``, the - schema will be set to that of the schema set on the target - :class:`.MetaData`, which is typically ``None`` as well, unless - set explicitly:: - - m2 = MetaData(schema='newschema') - - # user_copy_one will have "newschema" as the schema name - user_copy_one = user.tometadata(m2, schema=None) - - m3 = MetaData() # schema defaults to None - - # user_copy_two will have None as the schema name - user_copy_two = user.tometadata(m3, schema=None) - - :param referred_schema_fn: optional callable which can be supplied - in order to provide for the schema name that should be assigned - to the referenced table of a :class:`.ForeignKeyConstraint`. - The callable accepts this parent :class:`.Table`, the - target schema that we are changing to, the - :class:`.ForeignKeyConstraint` object, and the existing - "target schema" of that constraint. The function should return the - string schema name that should be applied. - E.g.:: - - def referred_schema_fn(table, to_schema, - constraint, referred_schema): - if referred_schema == 'base_tables': - return referred_schema - else: - return to_schema - - new_table = table.tometadata(m2, schema="alt_schema", - referred_schema_fn=referred_schema_fn) - - .. versionadded:: 0.9.2 - - :param name: optional string name indicating the target table name. - If not specified or None, the table name is retained. This allows - a :class:`.Table` to be copied to the same :class:`.MetaData` target - with a new name. - - .. versionadded:: 1.0.0 - - """ - if name is None: - name = self.name - if schema is RETAIN_SCHEMA: - schema = self.schema - elif schema is None: - schema = metadata.schema - key = _get_table_key(name, schema) - if key in metadata.tables: - util.warn("Table '%s' already exists within the given " - "MetaData - not copying." % self.description) - return metadata.tables[key] - - args = [] - for c in self.columns: - args.append(c.copy(schema=schema)) - table = Table( - name, metadata, schema=schema, - *args, **self.kwargs - ) - for c in self.constraints: - if isinstance(c, ForeignKeyConstraint): - referred_schema = c._referred_schema - if referred_schema_fn: - fk_constraint_schema = referred_schema_fn( - self, schema, c, referred_schema) - else: - fk_constraint_schema = ( - schema if referred_schema == self.schema else None) - table.append_constraint( - c.copy(schema=fk_constraint_schema, target_table=table)) - - elif not c._type_bound: - table.append_constraint( - c.copy(schema=schema, target_table=table)) - for index in self.indexes: - # skip indexes that would be generated - # by the 'index' flag on Column - if len(index.columns) == 1 and \ - list(index.columns)[0].index: - continue - Index(index.name, - unique=index.unique, - *[table.c[col] for col in index.columns.keys()], - **index.kwargs) - return self._schema_item_copy(table) - - -class Column(SchemaItem, ColumnClause): - """Represents a column in a database table.""" - - __visit_name__ = 'column' - - def __init__(self, *args, **kwargs): - """ - Construct a new ``Column`` object. - - :param name: The name of this column as represented in the database. - This argument may be the first positional argument, or specified - via keyword. - - Names which contain no upper case characters - will be treated as case insensitive names, and will not be quoted - unless they are a reserved word. Names with any number of upper - case characters will be quoted and sent exactly. Note that this - behavior applies even for databases which standardize upper - case names as case insensitive such as Oracle. - - The name field may be omitted at construction time and applied - later, at any time before the Column is associated with a - :class:`.Table`. This is to support convenient - usage within the :mod:`~sqlalchemy.ext.declarative` extension. - - :param type\_: The column's type, indicated using an instance which - subclasses :class:`~sqlalchemy.types.TypeEngine`. If no arguments - are required for the type, the class of the type can be sent - as well, e.g.:: - - # use a type with arguments - Column('data', String(50)) - - # use no arguments - Column('level', Integer) - - The ``type`` argument may be the second positional argument - or specified by keyword. - - If the ``type`` is ``None`` or is omitted, it will first default to - the special type :class:`.NullType`. If and when this - :class:`.Column` is made to refer to another column using - :class:`.ForeignKey` and/or :class:`.ForeignKeyConstraint`, the type - of the remote-referenced column will be copied to this column as - well, at the moment that the foreign key is resolved against that - remote :class:`.Column` object. - - .. versionchanged:: 0.9.0 - Support for propagation of type to a :class:`.Column` from its - :class:`.ForeignKey` object has been improved and should be - more reliable and timely. - - :param \*args: Additional positional arguments include various - :class:`.SchemaItem` derived constructs which will be applied - as options to the column. These include instances of - :class:`.Constraint`, :class:`.ForeignKey`, :class:`.ColumnDefault`, - and :class:`.Sequence`. In some cases an equivalent keyword - argument is available such as ``server_default``, ``default`` - and ``unique``. - - :param autoincrement: This flag may be set to ``False`` to - indicate an integer primary key column that should not be - considered to be the "autoincrement" column, that is - the integer primary key column which generates values - implicitly upon INSERT and whose value is usually returned - via the DBAPI cursor.lastrowid attribute. It defaults - to ``True`` to satisfy the common use case of a table - with a single integer primary key column. If the table - has a composite primary key consisting of more than one - integer column, set this flag to True only on the - column that should be considered "autoincrement". - - The setting *only* has an effect for columns which are: - - * Integer derived (i.e. INT, SMALLINT, BIGINT). - - * Part of the primary key - - * Not refering to another column via :class:`.ForeignKey`, unless - the value is specified as ``'ignore_fk'``:: - - # turn on autoincrement for this column despite - # the ForeignKey() - Column('id', ForeignKey('other.id'), - primary_key=True, autoincrement='ignore_fk') - - It is typically not desirable to have "autoincrement" enabled - on such a column as its value intends to mirror that of a - primary key column elsewhere. - - * have no server side or client side defaults (with the exception - of Postgresql SERIAL). - - The setting has these two effects on columns that meet the - above criteria: - - * DDL issued for the column will include database-specific - keywords intended to signify this column as an - "autoincrement" column, such as AUTO INCREMENT on MySQL, - SERIAL on Postgresql, and IDENTITY on MS-SQL. It does - *not* issue AUTOINCREMENT for SQLite since this is a - special SQLite flag that is not required for autoincrementing - behavior. - - .. seealso:: - - :ref:`sqlite_autoincrement` - - * The column will be considered to be available as - cursor.lastrowid or equivalent, for those dialects which - "post fetch" newly inserted identifiers after a row has - been inserted (SQLite, MySQL, MS-SQL). It does not have - any effect in this regard for databases that use sequences - to generate primary key identifiers (i.e. Firebird, Postgresql, - Oracle). - - .. versionchanged:: 0.7.4 - ``autoincrement`` accepts a special value ``'ignore_fk'`` - to indicate that autoincrementing status regardless of foreign - key references. This applies to certain composite foreign key - setups, such as the one demonstrated in the ORM documentation - at :ref:`post_update`. - - :param default: A scalar, Python callable, or - :class:`.ColumnElement` expression representing the - *default value* for this column, which will be invoked upon insert - if this column is otherwise not specified in the VALUES clause of - the insert. This is a shortcut to using :class:`.ColumnDefault` as - a positional argument; see that class for full detail on the - structure of the argument. - - Contrast this argument to ``server_default`` which creates a - default generator on the database side. - - :param doc: optional String that can be used by the ORM or similar - to document attributes. This attribute does not render SQL - comments (a future attribute 'comment' will achieve that). - - :param key: An optional string identifier which will identify this - ``Column`` object on the :class:`.Table`. When a key is provided, - this is the only identifier referencing the ``Column`` within the - application, including ORM attribute mapping; the ``name`` field - is used only when rendering SQL. - - :param index: When ``True``, indicates that the column is indexed. - This is a shortcut for using a :class:`.Index` construct on the - table. To specify indexes with explicit names or indexes that - contain multiple columns, use the :class:`.Index` construct - instead. - - :param info: Optional data dictionary which will be populated into the - :attr:`.SchemaItem.info` attribute of this object. - - :param nullable: If set to the default of ``True``, indicates the - column will be rendered as allowing NULL, else it's rendered as - NOT NULL. This parameter is only used when issuing CREATE TABLE - statements. - - :param onupdate: A scalar, Python callable, or - :class:`~sqlalchemy.sql.expression.ClauseElement` representing a - default value to be applied to the column within UPDATE - statements, which wil be invoked upon update if this column is not - present in the SET clause of the update. This is a shortcut to - using :class:`.ColumnDefault` as a positional argument with - ``for_update=True``. - - :param primary_key: If ``True``, marks this column as a primary key - column. Multiple columns can have this flag set to specify - composite primary keys. As an alternative, the primary key of a - :class:`.Table` can be specified via an explicit - :class:`.PrimaryKeyConstraint` object. - - :param server_default: A :class:`.FetchedValue` instance, str, Unicode - or :func:`~sqlalchemy.sql.expression.text` construct representing - the DDL DEFAULT value for the column. - - String types will be emitted as-is, surrounded by single quotes:: - - Column('x', Text, server_default="val") - - x TEXT DEFAULT 'val' - - A :func:`~sqlalchemy.sql.expression.text` expression will be - rendered as-is, without quotes:: - - Column('y', DateTime, server_default=text('NOW()')) - - y DATETIME DEFAULT NOW() - - Strings and text() will be converted into a - :class:`.DefaultClause` object upon initialization. - - Use :class:`.FetchedValue` to indicate that an already-existing - column will generate a default value on the database side which - will be available to SQLAlchemy for post-fetch after inserts. This - construct does not specify any DDL and the implementation is left - to the database, such as via a trigger. - - :param server_onupdate: A :class:`.FetchedValue` instance - representing a database-side default generation function. This - indicates to SQLAlchemy that a newly generated value will be - available after updates. This construct does not specify any DDL - and the implementation is left to the database, such as via a - trigger. - - :param quote: Force quoting of this column's name on or off, - corresponding to ``True`` or ``False``. When left at its default - of ``None``, the column identifier will be quoted according to - whether the name is case sensitive (identifiers with at least one - upper case character are treated as case sensitive), or if it's a - reserved word. This flag is only needed to force quoting of a - reserved word which is not known by the SQLAlchemy dialect. - - :param unique: When ``True``, indicates that this column contains a - unique constraint, or if ``index`` is ``True`` as well, indicates - that the :class:`.Index` should be created with the unique flag. - To specify multiple columns in the constraint/index or to specify - an explicit name, use the :class:`.UniqueConstraint` or - :class:`.Index` constructs explicitly. - - :param system: When ``True``, indicates this is a "system" column, - that is a column which is automatically made available by the - database, and should not be included in the columns list for a - ``CREATE TABLE`` statement. - - For more elaborate scenarios where columns should be - conditionally rendered differently on different backends, - consider custom compilation rules for :class:`.CreateColumn`. - - .. versionadded:: 0.8.3 Added the ``system=True`` parameter to - :class:`.Column`. - - """ - - name = kwargs.pop('name', None) - type_ = kwargs.pop('type_', None) - args = list(args) - if args: - if isinstance(args[0], util.string_types): - if name is not None: - raise exc.ArgumentError( - "May not pass name positionally and as a keyword.") - name = args.pop(0) - if args: - coltype = args[0] - - if hasattr(coltype, "_sqla_type"): - if type_ is not None: - raise exc.ArgumentError( - "May not pass type_ positionally and as a keyword.") - type_ = args.pop(0) - - if name is not None: - name = quoted_name(name, kwargs.pop('quote', None)) - elif "quote" in kwargs: - raise exc.ArgumentError("Explicit 'name' is required when " - "sending 'quote' argument") - - super(Column, self).__init__(name, type_) - self.key = kwargs.pop('key', name) - self.primary_key = kwargs.pop('primary_key', False) - self.nullable = kwargs.pop('nullable', not self.primary_key) - self.default = kwargs.pop('default', None) - self.server_default = kwargs.pop('server_default', None) - self.server_onupdate = kwargs.pop('server_onupdate', None) - - # these default to None because .index and .unique is *not* - # an informational flag about Column - there can still be an - # Index or UniqueConstraint referring to this Column. - self.index = kwargs.pop('index', None) - self.unique = kwargs.pop('unique', None) - - self.system = kwargs.pop('system', False) - self.doc = kwargs.pop('doc', None) - self.onupdate = kwargs.pop('onupdate', None) - self.autoincrement = kwargs.pop('autoincrement', True) - self.constraints = set() - self.foreign_keys = set() - - # check if this Column is proxying another column - if '_proxies' in kwargs: - self._proxies = kwargs.pop('_proxies') - # otherwise, add DDL-related events - elif isinstance(self.type, SchemaEventTarget): - self.type._set_parent_with_dispatch(self) - - if self.default is not None: - if isinstance(self.default, (ColumnDefault, Sequence)): - args.append(self.default) - else: - if getattr(self.type, '_warn_on_bytestring', False): - if isinstance(self.default, util.binary_type): - util.warn( - "Unicode column '%s' has non-unicode " - "default value %r specified." % ( - self.key, - self.default - )) - args.append(ColumnDefault(self.default)) - - if self.server_default is not None: - if isinstance(self.server_default, FetchedValue): - args.append(self.server_default._as_for_update(False)) - else: - args.append(DefaultClause(self.server_default)) - - if self.onupdate is not None: - if isinstance(self.onupdate, (ColumnDefault, Sequence)): - args.append(self.onupdate) - else: - args.append(ColumnDefault(self.onupdate, for_update=True)) - - if self.server_onupdate is not None: - if isinstance(self.server_onupdate, FetchedValue): - args.append(self.server_onupdate._as_for_update(True)) - else: - args.append(DefaultClause(self.server_onupdate, - for_update=True)) - self._init_items(*args) - - util.set_creation_order(self) - - if 'info' in kwargs: - self.info = kwargs.pop('info') - - if kwargs: - raise exc.ArgumentError( - "Unknown arguments passed to Column: " + repr(list(kwargs))) - -# @property -# def quote(self): -# return getattr(self.name, "quote", None) - - def __str__(self): - if self.name is None: - return "(no name)" - elif self.table is not None: - if self.table.named_with_column: - return (self.table.description + "." + self.description) - else: - return self.description - else: - return self.description - - def references(self, column): - """Return True if this Column references the given column via foreign - key.""" - - for fk in self.foreign_keys: - if fk.column.proxy_set.intersection(column.proxy_set): - return True - else: - return False - - def append_foreign_key(self, fk): - fk._set_parent_with_dispatch(self) - - def __repr__(self): - kwarg = [] - if self.key != self.name: - kwarg.append('key') - if self.primary_key: - kwarg.append('primary_key') - if not self.nullable: - kwarg.append('nullable') - if self.onupdate: - kwarg.append('onupdate') - if self.default: - kwarg.append('default') - if self.server_default: - kwarg.append('server_default') - return "Column(%s)" % ', '.join( - [repr(self.name)] + [repr(self.type)] + - [repr(x) for x in self.foreign_keys if x is not None] + - [repr(x) for x in self.constraints] + - [(self.table is not None and "table=<%s>" % - self.table.description or "table=None")] + - ["%s=%s" % (k, repr(getattr(self, k))) for k in kwarg]) - - def _set_parent(self, table): - if not self.name: - raise exc.ArgumentError( - "Column must be constructed with a non-blank name or " - "assign a non-blank .name before adding to a Table.") - if self.key is None: - self.key = self.name - - existing = getattr(self, 'table', None) - if existing is not None and existing is not table: - raise exc.ArgumentError( - "Column object '%s' already assigned to Table '%s'" % ( - self.key, - existing.description - )) - - if self.key in table._columns: - col = table._columns.get(self.key) - if col is not self: - for fk in col.foreign_keys: - table.foreign_keys.remove(fk) - if fk.constraint in table.constraints: - # this might have been removed - # already, if it's a composite constraint - # and more than one col being replaced - table.constraints.remove(fk.constraint) - - table._columns.replace(self) - - if self.primary_key: - table.primary_key._replace(self) - Table._autoincrement_column._reset(table) - elif self.key in table.primary_key: - raise exc.ArgumentError( - "Trying to redefine primary-key column '%s' as a " - "non-primary-key column on table '%s'" % ( - self.key, table.fullname)) - self.table = table - - if self.index: - if isinstance(self.index, util.string_types): - raise exc.ArgumentError( - "The 'index' keyword argument on Column is boolean only. " - "To create indexes with a specific name, create an " - "explicit Index object external to the Table.") - Index(None, self, unique=bool(self.unique)) - elif self.unique: - if isinstance(self.unique, util.string_types): - raise exc.ArgumentError( - "The 'unique' keyword argument on Column is boolean " - "only. To create unique constraints or indexes with a " - "specific name, append an explicit UniqueConstraint to " - "the Table's list of elements, or create an explicit " - "Index object external to the Table.") - table.append_constraint(UniqueConstraint(self.key)) - - self._setup_on_memoized_fks(lambda fk: fk._set_remote_table(table)) - - def _setup_on_memoized_fks(self, fn): - fk_keys = [ - ((self.table.key, self.key), False), - ((self.table.key, self.name), True), - ] - for fk_key, link_to_name in fk_keys: - if fk_key in self.table.metadata._fk_memos: - for fk in self.table.metadata._fk_memos[fk_key]: - if fk.link_to_name is link_to_name: - fn(fk) - - def _on_table_attach(self, fn): - if self.table is not None: - fn(self, self.table) - else: - event.listen(self, 'after_parent_attach', fn) - - def copy(self, **kw): - """Create a copy of this ``Column``, unitialized. - - This is used in ``Table.tometadata``. - - """ - - # Constraint objects plus non-constraint-bound ForeignKey objects - args = \ - [c.copy(**kw) for c in self.constraints if not c._type_bound] + \ - [c.copy(**kw) for c in self.foreign_keys if not c.constraint] - - type_ = self.type - if isinstance(type_, SchemaEventTarget): - type_ = type_.copy(**kw) - - c = self._constructor( - name=self.name, - type_=type_, - key=self.key, - primary_key=self.primary_key, - nullable=self.nullable, - unique=self.unique, - system=self.system, - # quote=self.quote, - index=self.index, - autoincrement=self.autoincrement, - default=self.default, - server_default=self.server_default, - onupdate=self.onupdate, - server_onupdate=self.server_onupdate, - doc=self.doc, - *args - ) - return self._schema_item_copy(c) - - def _make_proxy(self, selectable, name=None, key=None, - name_is_truncatable=False, **kw): - """Create a *proxy* for this column. - - This is a copy of this ``Column`` referenced by a different parent - (such as an alias or select statement). The column should - be used only in select scenarios, as its full DDL/default - information is not transferred. - - """ - fk = [ForeignKey(f.column, _constraint=f.constraint) - for f in self.foreign_keys] - if name is None and self.name is None: - raise exc.InvalidRequestError( - "Cannot initialize a sub-selectable" - " with this Column object until its 'name' has " - "been assigned.") - try: - c = self._constructor( - _as_truncated(name or self.name) if - name_is_truncatable else (name or self.name), - self.type, - key=key if key else name if name else self.key, - primary_key=self.primary_key, - nullable=self.nullable, - _proxies=[self], *fk) - except TypeError: - util.raise_from_cause( - TypeError( - "Could not create a copy of this %r object. " - "Ensure the class includes a _constructor() " - "attribute or method which accepts the " - "standard Column constructor arguments, or " - "references the Column class itself." % self.__class__) - ) - - c.table = selectable - selectable._columns.add(c) - if selectable._is_clone_of is not None: - c._is_clone_of = selectable._is_clone_of.columns[c.key] - if self.primary_key: - selectable.primary_key.add(c) - c.dispatch.after_parent_attach(c, selectable) - return c - - def get_children(self, schema_visitor=False, **kwargs): - if schema_visitor: - return [x for x in (self.default, self.onupdate) - if x is not None] + \ - list(self.foreign_keys) + list(self.constraints) - else: - return ColumnClause.get_children(self, **kwargs) - - -class ForeignKey(DialectKWArgs, SchemaItem): - """Defines a dependency between two columns. - - ``ForeignKey`` is specified as an argument to a :class:`.Column` object, - e.g.:: - - t = Table("remote_table", metadata, - Column("remote_id", ForeignKey("main_table.id")) - ) - - Note that ``ForeignKey`` is only a marker object that defines - a dependency between two columns. The actual constraint - is in all cases represented by the :class:`.ForeignKeyConstraint` - object. This object will be generated automatically when - a ``ForeignKey`` is associated with a :class:`.Column` which - in turn is associated with a :class:`.Table`. Conversely, - when :class:`.ForeignKeyConstraint` is applied to a :class:`.Table`, - ``ForeignKey`` markers are automatically generated to be - present on each associated :class:`.Column`, which are also - associated with the constraint object. - - Note that you cannot define a "composite" foreign key constraint, - that is a constraint between a grouping of multiple parent/child - columns, using ``ForeignKey`` objects. To define this grouping, - the :class:`.ForeignKeyConstraint` object must be used, and applied - to the :class:`.Table`. The associated ``ForeignKey`` objects - are created automatically. - - The ``ForeignKey`` objects associated with an individual - :class:`.Column` object are available in the `foreign_keys` collection - of that column. - - Further examples of foreign key configuration are in - :ref:`metadata_foreignkeys`. - - """ - - __visit_name__ = 'foreign_key' - - def __init__(self, column, _constraint=None, use_alter=False, name=None, - onupdate=None, ondelete=None, deferrable=None, - initially=None, link_to_name=False, match=None, - info=None, - **dialect_kw): - """ - Construct a column-level FOREIGN KEY. - - The :class:`.ForeignKey` object when constructed generates a - :class:`.ForeignKeyConstraint` which is associated with the parent - :class:`.Table` object's collection of constraints. - - :param column: A single target column for the key relationship. A - :class:`.Column` object or a column name as a string: - ``tablename.columnkey`` or ``schema.tablename.columnkey``. - ``columnkey`` is the ``key`` which has been assigned to the column - (defaults to the column name itself), unless ``link_to_name`` is - ``True`` in which case the rendered name of the column is used. - - .. versionadded:: 0.7.4 - Note that if the schema name is not included, and the - underlying :class:`.MetaData` has a "schema", that value will - be used. - - :param name: Optional string. An in-database name for the key if - `constraint` is not provided. - - :param onupdate: Optional string. If set, emit ON UPDATE when - issuing DDL for this constraint. Typical values include CASCADE, - DELETE and RESTRICT. - - :param ondelete: Optional string. If set, emit ON DELETE when - issuing DDL for this constraint. Typical values include CASCADE, - DELETE and RESTRICT. - - :param deferrable: Optional bool. If set, emit DEFERRABLE or NOT - DEFERRABLE when issuing DDL for this constraint. - - :param initially: Optional string. If set, emit INITIALLY when - issuing DDL for this constraint. - - :param link_to_name: if True, the string name given in ``column`` is - the rendered name of the referenced column, not its locally - assigned ``key``. - - :param use_alter: passed to the underlying - :class:`.ForeignKeyConstraint` to indicate the constraint should - be generated/dropped externally from the CREATE TABLE/ DROP TABLE - statement. See :paramref:`.ForeignKeyConstraint.use_alter` - for further description. - - .. seealso:: - - :paramref:`.ForeignKeyConstraint.use_alter` - - :ref:`use_alter` - - :param match: Optional string. If set, emit MATCH when issuing - DDL for this constraint. Typical values include SIMPLE, PARTIAL - and FULL. - - :param info: Optional data dictionary which will be populated into the - :attr:`.SchemaItem.info` attribute of this object. - - .. versionadded:: 1.0.0 - - :param \**dialect_kw: Additional keyword arguments are dialect - specific, and passed in the form ``_``. The - arguments are ultimately handled by a corresponding - :class:`.ForeignKeyConstraint`. See the documentation regarding - an individual dialect at :ref:`dialect_toplevel` for detail on - documented arguments. - - .. versionadded:: 0.9.2 - - """ - - self._colspec = column - if isinstance(self._colspec, util.string_types): - self._table_column = None - else: - if hasattr(self._colspec, '__clause_element__'): - self._table_column = self._colspec.__clause_element__() - else: - self._table_column = self._colspec - - if not isinstance(self._table_column, ColumnClause): - raise exc.ArgumentError( - "String, Column, or Column-bound argument " - "expected, got %r" % self._table_column) - elif not isinstance( - self._table_column.table, (util.NoneType, TableClause)): - raise exc.ArgumentError( - "ForeignKey received Column not bound " - "to a Table, got: %r" % self._table_column.table - ) - - # the linked ForeignKeyConstraint. - # ForeignKey will create this when parent Column - # is attached to a Table, *or* ForeignKeyConstraint - # object passes itself in when creating ForeignKey - # markers. - self.constraint = _constraint - self.parent = None - self.use_alter = use_alter - self.name = name - self.onupdate = onupdate - self.ondelete = ondelete - self.deferrable = deferrable - self.initially = initially - self.link_to_name = link_to_name - self.match = match - if info: - self.info = info - self._unvalidated_dialect_kw = dialect_kw - - def __repr__(self): - return "ForeignKey(%r)" % self._get_colspec() - - def copy(self, schema=None): - """Produce a copy of this :class:`.ForeignKey` object. - - The new :class:`.ForeignKey` will not be bound - to any :class:`.Column`. - - This method is usually used by the internal - copy procedures of :class:`.Column`, :class:`.Table`, - and :class:`.MetaData`. - - :param schema: The returned :class:`.ForeignKey` will - reference the original table and column name, qualified - by the given string schema name. - - """ - - fk = ForeignKey( - self._get_colspec(schema=schema), - use_alter=self.use_alter, - name=self.name, - onupdate=self.onupdate, - ondelete=self.ondelete, - deferrable=self.deferrable, - initially=self.initially, - link_to_name=self.link_to_name, - match=self.match, - **self._unvalidated_dialect_kw - ) - return self._schema_item_copy(fk) - - def _get_colspec(self, schema=None, table_name=None): - """Return a string based 'column specification' for this - :class:`.ForeignKey`. - - This is usually the equivalent of the string-based "tablename.colname" - argument first passed to the object's constructor. - - """ - if schema: - _schema, tname, colname = self._column_tokens - if table_name is not None: - tname = table_name - return "%s.%s.%s" % (schema, tname, colname) - elif table_name: - schema, tname, colname = self._column_tokens - if schema: - return "%s.%s.%s" % (schema, table_name, colname) - else: - return "%s.%s" % (table_name, colname) - elif self._table_column is not None: - return "%s.%s" % ( - self._table_column.table.fullname, self._table_column.key) - else: - return self._colspec - - @property - def _referred_schema(self): - return self._column_tokens[0] - - def _table_key(self): - if self._table_column is not None: - if self._table_column.table is None: - return None - else: - return self._table_column.table.key - else: - schema, tname, colname = self._column_tokens - return _get_table_key(tname, schema) - - target_fullname = property(_get_colspec) - - def references(self, table): - """Return True if the given :class:`.Table` is referenced by this - :class:`.ForeignKey`.""" - - return table.corresponding_column(self.column) is not None - - def get_referent(self, table): - """Return the :class:`.Column` in the given :class:`.Table` - referenced by this :class:`.ForeignKey`. - - Returns None if this :class:`.ForeignKey` does not reference the given - :class:`.Table`. - - """ - - return table.corresponding_column(self.column) - - @util.memoized_property - def _column_tokens(self): - """parse a string-based _colspec into its component parts.""" - - m = self._get_colspec().split('.') - if m is None: - raise exc.ArgumentError( - "Invalid foreign key column specification: %s" % - self._colspec) - if (len(m) == 1): - tname = m.pop() - colname = None - else: - colname = m.pop() - tname = m.pop() - - # A FK between column 'bar' and table 'foo' can be - # specified as 'foo', 'foo.bar', 'dbo.foo.bar', - # 'otherdb.dbo.foo.bar'. Once we have the column name and - # the table name, treat everything else as the schema - # name. Some databases (e.g. Sybase) support - # inter-database foreign keys. See tickets#1341 and -- - # indirectly related -- Ticket #594. This assumes that '.' - # will never appear *within* any component of the FK. - - if (len(m) > 0): - schema = '.'.join(m) - else: - schema = None - return schema, tname, colname - - def _resolve_col_tokens(self): - if self.parent is None: - raise exc.InvalidRequestError( - "this ForeignKey object does not yet have a " - "parent Column associated with it.") - - elif self.parent.table is None: - raise exc.InvalidRequestError( - "this ForeignKey's parent column is not yet associated " - "with a Table.") - - parenttable = self.parent.table - - # assertion, can be commented out. - # basically Column._make_proxy() sends the actual - # target Column to the ForeignKey object, so the - # string resolution here is never called. - for c in self.parent.base_columns: - if isinstance(c, Column): - assert c.table is parenttable - break - else: - assert False - ###################### - - schema, tname, colname = self._column_tokens - - if schema is None and parenttable.metadata.schema is not None: - schema = parenttable.metadata.schema - - tablekey = _get_table_key(tname, schema) - return parenttable, tablekey, colname - - def _link_to_col_by_colstring(self, parenttable, table, colname): - if not hasattr(self.constraint, '_referred_table'): - self.constraint._referred_table = table - else: - assert self.constraint._referred_table is table - - _column = None - if colname is None: - # colname is None in the case that ForeignKey argument - # was specified as table name only, in which case we - # match the column name to the same column on the - # parent. - key = self.parent - _column = table.c.get(self.parent.key, None) - elif self.link_to_name: - key = colname - for c in table.c: - if c.name == colname: - _column = c - else: - key = colname - _column = table.c.get(colname, None) - - if _column is None: - raise exc.NoReferencedColumnError( - "Could not initialize target column " - "for ForeignKey '%s' on table '%s': " - "table '%s' has no column named '%s'" % - (self._colspec, parenttable.name, table.name, key), - table.name, key) - - self._set_target_column(_column) - - def _set_target_column(self, column): - # propagate TypeEngine to parent if it didn't have one - if self.parent.type._isnull: - self.parent.type = column.type - - # super-edgy case, if other FKs point to our column, - # they'd get the type propagated out also. - if isinstance(self.parent.table, Table): - - def set_type(fk): - if fk.parent.type._isnull: - fk.parent.type = column.type - self.parent._setup_on_memoized_fks(set_type) - - self.column = column - - @util.memoized_property - def column(self): - """Return the target :class:`.Column` referenced by this - :class:`.ForeignKey`. - - If no target column has been established, an exception - is raised. - - .. versionchanged:: 0.9.0 - Foreign key target column resolution now occurs as soon as both - the ForeignKey object and the remote Column to which it refers - are both associated with the same MetaData object. - - """ - - if isinstance(self._colspec, util.string_types): - - parenttable, tablekey, colname = self._resolve_col_tokens() - - if tablekey not in parenttable.metadata: - raise exc.NoReferencedTableError( - "Foreign key associated with column '%s' could not find " - "table '%s' with which to generate a " - "foreign key to target column '%s'" % - (self.parent, tablekey, colname), - tablekey) - elif parenttable.key not in parenttable.metadata: - raise exc.InvalidRequestError( - "Table %s is no longer associated with its " - "parent MetaData" % parenttable) - else: - raise exc.NoReferencedColumnError( - "Could not initialize target column for " - "ForeignKey '%s' on table '%s': " - "table '%s' has no column named '%s'" % ( - self._colspec, parenttable.name, tablekey, colname), - tablekey, colname) - elif hasattr(self._colspec, '__clause_element__'): - _column = self._colspec.__clause_element__() - return _column - else: - _column = self._colspec - return _column - - def _set_parent(self, column): - if self.parent is not None and self.parent is not column: - raise exc.InvalidRequestError( - "This ForeignKey already has a parent !") - self.parent = column - self.parent.foreign_keys.add(self) - self.parent._on_table_attach(self._set_table) - - def _set_remote_table(self, table): - parenttable, tablekey, colname = self._resolve_col_tokens() - self._link_to_col_by_colstring(parenttable, table, colname) - self.constraint._validate_dest_table(table) - - def _remove_from_metadata(self, metadata): - parenttable, table_key, colname = self._resolve_col_tokens() - fk_key = (table_key, colname) - - if self in metadata._fk_memos[fk_key]: - # TODO: no test coverage for self not in memos - metadata._fk_memos[fk_key].remove(self) - - def _set_table(self, column, table): - # standalone ForeignKey - create ForeignKeyConstraint - # on the hosting Table when attached to the Table. - if self.constraint is None and isinstance(table, Table): - self.constraint = ForeignKeyConstraint( - [], [], use_alter=self.use_alter, name=self.name, - onupdate=self.onupdate, ondelete=self.ondelete, - deferrable=self.deferrable, initially=self.initially, - match=self.match, - **self._unvalidated_dialect_kw - ) - self.constraint._append_element(column, self) - self.constraint._set_parent_with_dispatch(table) - table.foreign_keys.add(self) - - # set up remote ".column" attribute, or a note to pick it - # up when the other Table/Column shows up - if isinstance(self._colspec, util.string_types): - parenttable, table_key, colname = self._resolve_col_tokens() - fk_key = (table_key, colname) - if table_key in parenttable.metadata.tables: - table = parenttable.metadata.tables[table_key] - try: - self._link_to_col_by_colstring( - parenttable, table, colname) - except exc.NoReferencedColumnError: - # this is OK, we'll try later - pass - parenttable.metadata._fk_memos[fk_key].append(self) - elif hasattr(self._colspec, '__clause_element__'): - _column = self._colspec.__clause_element__() - self._set_target_column(_column) - else: - _column = self._colspec - self._set_target_column(_column) - - -class _NotAColumnExpr(object): - def _not_a_column_expr(self): - raise exc.InvalidRequestError( - "This %s cannot be used directly " - "as a column expression." % self.__class__.__name__) - - __clause_element__ = self_group = lambda self: self._not_a_column_expr() - _from_objects = property(lambda self: self._not_a_column_expr()) - - -class DefaultGenerator(_NotAColumnExpr, SchemaItem): - """Base class for column *default* values.""" - - __visit_name__ = 'default_generator' - - is_sequence = False - is_server_default = False - column = None - - def __init__(self, for_update=False): - self.for_update = for_update - - def _set_parent(self, column): - self.column = column - if self.for_update: - self.column.onupdate = self - else: - self.column.default = self - - def execute(self, bind=None, **kwargs): - if bind is None: - bind = _bind_or_error(self) - return bind._execute_default(self, **kwargs) - - @property - def bind(self): - """Return the connectable associated with this default.""" - if getattr(self, 'column', None) is not None: - return self.column.table.bind - else: - return None - - -class ColumnDefault(DefaultGenerator): - """A plain default value on a column. - - This could correspond to a constant, a callable function, - or a SQL clause. - - :class:`.ColumnDefault` is generated automatically - whenever the ``default``, ``onupdate`` arguments of - :class:`.Column` are used. A :class:`.ColumnDefault` - can be passed positionally as well. - - For example, the following:: - - Column('foo', Integer, default=50) - - Is equivalent to:: - - Column('foo', Integer, ColumnDefault(50)) - - - """ - - def __init__(self, arg, **kwargs): - """"Construct a new :class:`.ColumnDefault`. - - - :param arg: argument representing the default value. - May be one of the following: - - * a plain non-callable Python value, such as a - string, integer, boolean, or other simple type. - The default value will be used as is each time. - * a SQL expression, that is one which derives from - :class:`.ColumnElement`. The SQL expression will - be rendered into the INSERT or UPDATE statement, - or in the case of a primary key column when - RETURNING is not used may be - pre-executed before an INSERT within a SELECT. - * A Python callable. The function will be invoked for each - new row subject to an INSERT or UPDATE. - The callable must accept exactly - zero or one positional arguments. The one-argument form - will receive an instance of the :class:`.ExecutionContext`, - which provides contextual information as to the current - :class:`.Connection` in use as well as the current - statement and parameters. - - """ - super(ColumnDefault, self).__init__(**kwargs) - if isinstance(arg, FetchedValue): - raise exc.ArgumentError( - "ColumnDefault may not be a server-side default type.") - if util.callable(arg): - arg = self._maybe_wrap_callable(arg) - self.arg = arg - - @util.memoized_property - def is_callable(self): - return util.callable(self.arg) - - @util.memoized_property - def is_clause_element(self): - return isinstance(self.arg, ClauseElement) - - @util.memoized_property - def is_scalar(self): - return not self.is_callable and \ - not self.is_clause_element and \ - not self.is_sequence - - def _maybe_wrap_callable(self, fn): - """Wrap callables that don't accept a context. - - This is to allow easy compatibility with default callables - that aren't specific to accepting of a context. - - """ - try: - argspec = util.get_callable_argspec(fn, no_self=True) - except TypeError: - return lambda ctx: fn() - - defaulted = argspec[3] is not None and len(argspec[3]) or 0 - positionals = len(argspec[0]) - defaulted - - if positionals == 0: - return lambda ctx: fn() - elif positionals == 1: - return fn - else: - raise exc.ArgumentError( - "ColumnDefault Python function takes zero or one " - "positional arguments") - - def _visit_name(self): - if self.for_update: - return "column_onupdate" - else: - return "column_default" - __visit_name__ = property(_visit_name) - - def __repr__(self): - return "ColumnDefault(%r)" % self.arg - - -class Sequence(DefaultGenerator): - """Represents a named database sequence. - - The :class:`.Sequence` object represents the name and configurational - parameters of a database sequence. It also represents - a construct that can be "executed" by a SQLAlchemy :class:`.Engine` - or :class:`.Connection`, rendering the appropriate "next value" function - for the target database and returning a result. - - The :class:`.Sequence` is typically associated with a primary key column:: - - some_table = Table( - 'some_table', metadata, - Column('id', Integer, Sequence('some_table_seq'), - primary_key=True) - ) - - When CREATE TABLE is emitted for the above :class:`.Table`, if the - target platform supports sequences, a CREATE SEQUENCE statement will - be emitted as well. For platforms that don't support sequences, - the :class:`.Sequence` construct is ignored. - - .. seealso:: - - :class:`.CreateSequence` - - :class:`.DropSequence` - - """ - - __visit_name__ = 'sequence' - - is_sequence = True - - def __init__(self, name, start=None, increment=None, minvalue=None, - maxvalue=None, nominvalue=None, nomaxvalue=None, cycle=None, - schema=None, optional=False, quote=None, metadata=None, - quote_schema=None, - for_update=False): - """Construct a :class:`.Sequence` object. - - :param name: The name of the sequence. - :param start: the starting index of the sequence. This value is - used when the CREATE SEQUENCE command is emitted to the database - as the value of the "START WITH" clause. If ``None``, the - clause is omitted, which on most platforms indicates a starting - value of 1. - :param increment: the increment value of the sequence. This - value is used when the CREATE SEQUENCE command is emitted to - the database as the value of the "INCREMENT BY" clause. If ``None``, - the clause is omitted, which on most platforms indicates an - increment of 1. - :param minvalue: the minimum value of the sequence. This - value is used when the CREATE SEQUENCE command is emitted to - the database as the value of the "MINVALUE" clause. If ``None``, - the clause is omitted, which on most platforms indicates a - minvalue of 1 and -2^63-1 for ascending and descending sequences, - respectively. - - .. versionadded:: 1.0.7 - - :param maxvalue: the maximum value of the sequence. This - value is used when the CREATE SEQUENCE command is emitted to - the database as the value of the "MAXVALUE" clause. If ``None``, - the clause is omitted, which on most platforms indicates a - maxvalue of 2^63-1 and -1 for ascending and descending sequences, - respectively. - - .. versionadded:: 1.0.7 - - :param nominvalue: no minimum value of the sequence. This - value is used when the CREATE SEQUENCE command is emitted to - the database as the value of the "NO MINVALUE" clause. If ``None``, - the clause is omitted, which on most platforms indicates a - minvalue of 1 and -2^63-1 for ascending and descending sequences, - respectively. - - .. versionadded:: 1.0.7 - - :param nomaxvalue: no maximum value of the sequence. This - value is used when the CREATE SEQUENCE command is emitted to - the database as the value of the "NO MAXVALUE" clause. If ``None``, - the clause is omitted, which on most platforms indicates a - maxvalue of 2^63-1 and -1 for ascending and descending sequences, - respectively. - - .. versionadded:: 1.0.7 - - :param cycle: allows the sequence to wrap around when the maxvalue - or minvalue has been reached by an ascending or descending sequence - respectively. This value is used when the CREATE SEQUENCE command - is emitted to the database as the "CYCLE" clause. If the limit is - reached, the next number generated will be the minvalue or maxvalue, - respectively. If cycle=False (the default) any calls to nextval - after the sequence has reached its maximum value will return an - error. - - .. versionadded:: 1.0.7 - - :param schema: Optional schema name for the sequence, if located - in a schema other than the default. - :param optional: boolean value, when ``True``, indicates that this - :class:`.Sequence` object only needs to be explicitly generated - on backends that don't provide another way to generate primary - key identifiers. Currently, it essentially means, "don't create - this sequence on the Postgresql backend, where the SERIAL keyword - creates a sequence for us automatically". - :param quote: boolean value, when ``True`` or ``False``, explicitly - forces quoting of the schema name on or off. When left at its - default of ``None``, normal quoting rules based on casing and - reserved words take place. - :param quote_schema: set the quoting preferences for the ``schema`` - name. - :param metadata: optional :class:`.MetaData` object which will be - associated with this :class:`.Sequence`. A :class:`.Sequence` - that is associated with a :class:`.MetaData` gains access to the - ``bind`` of that :class:`.MetaData`, meaning the - :meth:`.Sequence.create` and :meth:`.Sequence.drop` methods will - make usage of that engine automatically. - - .. versionchanged:: 0.7 - Additionally, the appropriate CREATE SEQUENCE/ - DROP SEQUENCE DDL commands will be emitted corresponding to this - :class:`.Sequence` when :meth:`.MetaData.create_all` and - :meth:`.MetaData.drop_all` are invoked. - - Note that when a :class:`.Sequence` is applied to a :class:`.Column`, - the :class:`.Sequence` is automatically associated with the - :class:`.MetaData` object of that column's parent :class:`.Table`, - when that association is made. The :class:`.Sequence` will then - be subject to automatic CREATE SEQUENCE/DROP SEQUENCE corresponding - to when the :class:`.Table` object itself is created or dropped, - rather than that of the :class:`.MetaData` object overall. - :param for_update: Indicates this :class:`.Sequence`, when associated - with a :class:`.Column`, should be invoked for UPDATE statements - on that column's table, rather than for INSERT statements, when - no value is otherwise present for that column in the statement. - - """ - super(Sequence, self).__init__(for_update=for_update) - self.name = quoted_name(name, quote) - self.start = start - self.increment = increment - self.minvalue = minvalue - self.maxvalue = maxvalue - self.nominvalue = nominvalue - self.nomaxvalue = nomaxvalue - self.cycle = cycle - self.optional = optional - if metadata is not None and schema is None and metadata.schema: - self.schema = schema = metadata.schema - else: - self.schema = quoted_name(schema, quote_schema) - self.metadata = metadata - self._key = _get_table_key(name, schema) - if metadata: - self._set_metadata(metadata) - - @util.memoized_property - def is_callable(self): - return False - - @util.memoized_property - def is_clause_element(self): - return False - - @util.dependencies("sqlalchemy.sql.functions.func") - def next_value(self, func): - """Return a :class:`.next_value` function element - which will render the appropriate increment function - for this :class:`.Sequence` within any SQL expression. - - """ - return func.next_value(self, bind=self.bind) - - def _set_parent(self, column): - super(Sequence, self)._set_parent(column) - column._on_table_attach(self._set_table) - - def _set_table(self, column, table): - self._set_metadata(table.metadata) - - def _set_metadata(self, metadata): - self.metadata = metadata - self.metadata._sequences[self._key] = self - - @property - def bind(self): - if self.metadata: - return self.metadata.bind - else: - return None - - def create(self, bind=None, checkfirst=True): - """Creates this sequence in the database.""" - - if bind is None: - bind = _bind_or_error(self) - bind._run_visitor(ddl.SchemaGenerator, - self, - checkfirst=checkfirst) - - def drop(self, bind=None, checkfirst=True): - """Drops this sequence from the database.""" - - if bind is None: - bind = _bind_or_error(self) - bind._run_visitor(ddl.SchemaDropper, - self, - checkfirst=checkfirst) - - def _not_a_column_expr(self): - raise exc.InvalidRequestError( - "This %s cannot be used directly " - "as a column expression. Use func.next_value(sequence) " - "to produce a 'next value' function that's usable " - "as a column element." - % self.__class__.__name__) - - -@inspection._self_inspects -class FetchedValue(_NotAColumnExpr, SchemaEventTarget): - """A marker for a transparent database-side default. - - Use :class:`.FetchedValue` when the database is configured - to provide some automatic default for a column. - - E.g.:: - - Column('foo', Integer, FetchedValue()) - - Would indicate that some trigger or default generator - will create a new value for the ``foo`` column during an - INSERT. - - .. seealso:: - - :ref:`triggered_columns` - - """ - is_server_default = True - reflected = False - has_argument = False - - def __init__(self, for_update=False): - self.for_update = for_update - - def _as_for_update(self, for_update): - if for_update == self.for_update: - return self - else: - return self._clone(for_update) - - def _clone(self, for_update): - n = self.__class__.__new__(self.__class__) - n.__dict__.update(self.__dict__) - n.__dict__.pop('column', None) - n.for_update = for_update - return n - - def _set_parent(self, column): - self.column = column - if self.for_update: - self.column.server_onupdate = self - else: - self.column.server_default = self - - def __repr__(self): - return util.generic_repr(self) - - -class DefaultClause(FetchedValue): - """A DDL-specified DEFAULT column value. - - :class:`.DefaultClause` is a :class:`.FetchedValue` - that also generates a "DEFAULT" clause when - "CREATE TABLE" is emitted. - - :class:`.DefaultClause` is generated automatically - whenever the ``server_default``, ``server_onupdate`` arguments of - :class:`.Column` are used. A :class:`.DefaultClause` - can be passed positionally as well. - - For example, the following:: - - Column('foo', Integer, server_default="50") - - Is equivalent to:: - - Column('foo', Integer, DefaultClause("50")) - - """ - - has_argument = True - - def __init__(self, arg, for_update=False, _reflected=False): - util.assert_arg_type(arg, (util.string_types[0], - ClauseElement, - TextClause), 'arg') - super(DefaultClause, self).__init__(for_update) - self.arg = arg - self.reflected = _reflected - - def __repr__(self): - return "DefaultClause(%r, for_update=%r)" % \ - (self.arg, self.for_update) - - -class PassiveDefault(DefaultClause): - """A DDL-specified DEFAULT column value. - - .. deprecated:: 0.6 - :class:`.PassiveDefault` is deprecated. - Use :class:`.DefaultClause`. - """ - @util.deprecated("0.6", - ":class:`.PassiveDefault` is deprecated. " - "Use :class:`.DefaultClause`.", - False) - def __init__(self, *arg, **kw): - DefaultClause.__init__(self, *arg, **kw) - - -class Constraint(DialectKWArgs, SchemaItem): - """A table-level SQL constraint.""" - - __visit_name__ = 'constraint' - - def __init__(self, name=None, deferrable=None, initially=None, - _create_rule=None, info=None, _type_bound=False, - **dialect_kw): - """Create a SQL constraint. - - :param name: - Optional, the in-database name of this ``Constraint``. - - :param deferrable: - Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when - issuing DDL for this constraint. - - :param initially: - Optional string. If set, emit INITIALLY when issuing DDL - for this constraint. - - :param info: Optional data dictionary which will be populated into the - :attr:`.SchemaItem.info` attribute of this object. - - .. versionadded:: 1.0.0 - - :param _create_rule: - a callable which is passed the DDLCompiler object during - compilation. Returns True or False to signal inline generation of - this Constraint. - - The AddConstraint and DropConstraint DDL constructs provide - DDLElement's more comprehensive "conditional DDL" approach that is - passed a database connection when DDL is being issued. _create_rule - is instead called during any CREATE TABLE compilation, where there - may not be any transaction/connection in progress. However, it - allows conditional compilation of the constraint even for backends - which do not support addition of constraints through ALTER TABLE, - which currently includes SQLite. - - _create_rule is used by some types to create constraints. - Currently, its call signature is subject to change at any time. - - :param \**dialect_kw: Additional keyword arguments are dialect - specific, and passed in the form ``_``. See - the documentation regarding an individual dialect at - :ref:`dialect_toplevel` for detail on documented arguments. - - """ - - self.name = name - self.deferrable = deferrable - self.initially = initially - if info: - self.info = info - self._create_rule = _create_rule - self._type_bound = _type_bound - util.set_creation_order(self) - self._validate_dialect_kwargs(dialect_kw) - - @property - def table(self): - try: - if isinstance(self.parent, Table): - return self.parent - except AttributeError: - pass - raise exc.InvalidRequestError( - "This constraint is not bound to a table. Did you " - "mean to call table.append_constraint(constraint) ?") - - def _set_parent(self, parent): - self.parent = parent - parent.constraints.add(self) - - def copy(self, **kw): - raise NotImplementedError() - - -def _to_schema_column(element): - if hasattr(element, '__clause_element__'): - element = element.__clause_element__() - if not isinstance(element, Column): - raise exc.ArgumentError("schema.Column object expected") - return element - - -def _to_schema_column_or_string(element): - if hasattr(element, '__clause_element__'): - element = element.__clause_element__() - if not isinstance(element, util.string_types + (ColumnElement, )): - msg = "Element %r is not a string name or column element" - raise exc.ArgumentError(msg % element) - return element - - -class ColumnCollectionMixin(object): - - columns = None - """A :class:`.ColumnCollection` of :class:`.Column` objects. - - This collection represents the columns which are referred to by - this object. - - """ - - _allow_multiple_tables = False - - def __init__(self, *columns, **kw): - _autoattach = kw.pop('_autoattach', True) - self.columns = ColumnCollection() - self._pending_colargs = [_to_schema_column_or_string(c) - for c in columns] - if _autoattach and self._pending_colargs: - self._check_attach() - - @classmethod - def _extract_col_expression_collection(cls, expressions): - for expr in expressions: - strname = None - column = None - if not isinstance(expr, ClauseElement): - # this assumes a string - strname = expr - else: - cols = [] - visitors.traverse(expr, {}, {'column': cols.append}) - if cols: - column = cols[0] - add_element = column if column is not None else strname - yield expr, column, strname, add_element - - def _check_attach(self, evt=False): - col_objs = [ - c for c in self._pending_colargs - if isinstance(c, Column) - ] - - cols_w_table = [ - c for c in col_objs if isinstance(c.table, Table) - ] - - cols_wo_table = set(col_objs).difference(cols_w_table) - - if cols_wo_table: - # feature #3341 - place event listeners for Column objects - # such that when all those cols are attached, we autoattach. - assert not evt, "Should not reach here on event call" - - # issue #3411 - don't do the per-column auto-attach if some of the - # columns are specified as strings. - has_string_cols = set(self._pending_colargs).difference(col_objs) - if not has_string_cols: - def _col_attached(column, table): - cols_wo_table.discard(column) - if not cols_wo_table: - self._check_attach(evt=True) - self._cols_wo_table = cols_wo_table - for col in cols_wo_table: - col._on_table_attach(_col_attached) - return - - columns = cols_w_table - - tables = set([c.table for c in columns]) - if len(tables) == 1: - self._set_parent_with_dispatch(tables.pop()) - elif len(tables) > 1 and not self._allow_multiple_tables: - table = columns[0].table - others = [c for c in columns[1:] if c.table is not table] - if others: - raise exc.ArgumentError( - "Column(s) %s are not part of table '%s'." % - (", ".join("'%s'" % c for c in others), - table.description) - ) - - def _set_parent(self, table): - for col in self._pending_colargs: - if isinstance(col, util.string_types): - col = table.c[col] - self.columns.add(col) - - -class ColumnCollectionConstraint(ColumnCollectionMixin, Constraint): - """A constraint that proxies a ColumnCollection.""" - - def __init__(self, *columns, **kw): - """ - :param \*columns: - A sequence of column names or Column objects. - - :param name: - Optional, the in-database name of this constraint. - - :param deferrable: - Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when - issuing DDL for this constraint. - - :param initially: - Optional string. If set, emit INITIALLY when issuing DDL - for this constraint. - - :param \**kw: other keyword arguments including dialect-specific - arguments are propagated to the :class:`.Constraint` superclass. - - """ - _autoattach = kw.pop('_autoattach', True) - Constraint.__init__(self, **kw) - ColumnCollectionMixin.__init__(self, *columns, _autoattach=_autoattach) - - def _set_parent(self, table): - Constraint._set_parent(self, table) - ColumnCollectionMixin._set_parent(self, table) - - def __contains__(self, x): - return x in self.columns - - def copy(self, **kw): - c = self.__class__(name=self.name, deferrable=self.deferrable, - initially=self.initially, *self.columns.keys()) - return self._schema_item_copy(c) - - def contains_column(self, col): - """Return True if this constraint contains the given column. - - Note that this object also contains an attribute ``.columns`` - which is a :class:`.ColumnCollection` of :class:`.Column` objects. - - """ - - return self.columns.contains_column(col) - - def __iter__(self): - # inlining of - # return iter(self.columns) - # ColumnCollection->OrderedProperties->OrderedDict - ordered_dict = self.columns._data - return (ordered_dict[key] for key in ordered_dict._list) - - def __len__(self): - return len(self.columns._data) - - -class CheckConstraint(ColumnCollectionConstraint): - """A table- or column-level CHECK constraint. - - Can be included in the definition of a Table or Column. - """ - - _allow_multiple_tables = True - - def __init__(self, sqltext, name=None, deferrable=None, - initially=None, table=None, info=None, _create_rule=None, - _autoattach=True, _type_bound=False): - """Construct a CHECK constraint. - - :param sqltext: - A string containing the constraint definition, which will be used - verbatim, or a SQL expression construct. If given as a string, - the object is converted to a :class:`.Text` object. If the textual - string includes a colon character, escape this using a backslash:: - - CheckConstraint(r"foo ~ E'a(?\:b|c)d") - - :param name: - Optional, the in-database name of the constraint. - - :param deferrable: - Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when - issuing DDL for this constraint. - - :param initially: - Optional string. If set, emit INITIALLY when issuing DDL - for this constraint. - - :param info: Optional data dictionary which will be populated into the - :attr:`.SchemaItem.info` attribute of this object. - - .. versionadded:: 1.0.0 - - """ - - self.sqltext = _literal_as_text(sqltext, warn=False) - - columns = [] - visitors.traverse(self.sqltext, {}, {'column': columns.append}) - - super(CheckConstraint, self).\ - __init__( - name=name, deferrable=deferrable, - initially=initially, _create_rule=_create_rule, info=info, - _type_bound=_type_bound, _autoattach=_autoattach, - *columns) - if table is not None: - self._set_parent_with_dispatch(table) - - def __visit_name__(self): - if isinstance(self.parent, Table): - return "check_constraint" - else: - return "column_check_constraint" - __visit_name__ = property(__visit_name__) - - def copy(self, target_table=None, **kw): - if target_table is not None: - def replace(col): - if self.table.c.contains_column(col): - return target_table.c[col.key] - else: - return None - sqltext = visitors.replacement_traverse(self.sqltext, {}, replace) - else: - sqltext = self.sqltext - c = CheckConstraint(sqltext, - name=self.name, - initially=self.initially, - deferrable=self.deferrable, - _create_rule=self._create_rule, - table=target_table, - _autoattach=False, - _type_bound=self._type_bound) - return self._schema_item_copy(c) - - -class ForeignKeyConstraint(ColumnCollectionConstraint): - """A table-level FOREIGN KEY constraint. - - Defines a single column or composite FOREIGN KEY ... REFERENCES - constraint. For a no-frills, single column foreign key, adding a - :class:`.ForeignKey` to the definition of a :class:`.Column` is a - shorthand equivalent for an unnamed, single column - :class:`.ForeignKeyConstraint`. - - Examples of foreign key configuration are in :ref:`metadata_foreignkeys`. - - """ - __visit_name__ = 'foreign_key_constraint' - - def __init__(self, columns, refcolumns, name=None, onupdate=None, - ondelete=None, deferrable=None, initially=None, - use_alter=False, link_to_name=False, match=None, - table=None, info=None, **dialect_kw): - """Construct a composite-capable FOREIGN KEY. - - :param columns: A sequence of local column names. The named columns - must be defined and present in the parent Table. The names should - match the ``key`` given to each column (defaults to the name) unless - ``link_to_name`` is True. - - :param refcolumns: A sequence of foreign column names or Column - objects. The columns must all be located within the same Table. - - :param name: Optional, the in-database name of the key. - - :param onupdate: Optional string. If set, emit ON UPDATE when - issuing DDL for this constraint. Typical values include CASCADE, - DELETE and RESTRICT. - - :param ondelete: Optional string. If set, emit ON DELETE when - issuing DDL for this constraint. Typical values include CASCADE, - DELETE and RESTRICT. - - :param deferrable: Optional bool. If set, emit DEFERRABLE or NOT - DEFERRABLE when issuing DDL for this constraint. - - :param initially: Optional string. If set, emit INITIALLY when - issuing DDL for this constraint. - - :param link_to_name: if True, the string name given in ``column`` is - the rendered name of the referenced column, not its locally assigned - ``key``. - - :param use_alter: If True, do not emit the DDL for this constraint as - part of the CREATE TABLE definition. Instead, generate it via an - ALTER TABLE statement issued after the full collection of tables - have been created, and drop it via an ALTER TABLE statement before - the full collection of tables are dropped. - - The use of :paramref:`.ForeignKeyConstraint.use_alter` is - particularly geared towards the case where two or more tables - are established within a mutually-dependent foreign key constraint - relationship; however, the :meth:`.MetaData.create_all` and - :meth:`.MetaData.drop_all` methods will perform this resolution - automatically, so the flag is normally not needed. - - .. versionchanged:: 1.0.0 Automatic resolution of foreign key - cycles has been added, removing the need to use the - :paramref:`.ForeignKeyConstraint.use_alter` in typical use - cases. - - .. seealso:: - - :ref:`use_alter` - - :param match: Optional string. If set, emit MATCH when issuing - DDL for this constraint. Typical values include SIMPLE, PARTIAL - and FULL. - - :param info: Optional data dictionary which will be populated into the - :attr:`.SchemaItem.info` attribute of this object. - - .. versionadded:: 1.0.0 - - :param \**dialect_kw: Additional keyword arguments are dialect - specific, and passed in the form ``_``. See - the documentation regarding an individual dialect at - :ref:`dialect_toplevel` for detail on documented arguments. - - .. versionadded:: 0.9.2 - - """ - - Constraint.__init__( - self, name=name, deferrable=deferrable, initially=initially, - info=info, **dialect_kw) - self.onupdate = onupdate - self.ondelete = ondelete - self.link_to_name = link_to_name - self.use_alter = use_alter - self.match = match - - # standalone ForeignKeyConstraint - create - # associated ForeignKey objects which will be applied to hosted - # Column objects (in col.foreign_keys), either now or when attached - # to the Table for string-specified names - self.elements = [ - ForeignKey( - refcol, - _constraint=self, - name=self.name, - onupdate=self.onupdate, - ondelete=self.ondelete, - use_alter=self.use_alter, - link_to_name=self.link_to_name, - match=self.match, - deferrable=self.deferrable, - initially=self.initially, - **self.dialect_kwargs - ) for refcol in refcolumns - ] - - ColumnCollectionMixin.__init__(self, *columns) - if table is not None: - if hasattr(self, "parent"): - assert table is self.parent - self._set_parent_with_dispatch(table) - - def _append_element(self, column, fk): - self.columns.add(column) - self.elements.append(fk) - - @property - def _elements(self): - # legacy - provide a dictionary view of (column_key, fk) - return util.OrderedDict( - zip(self.column_keys, self.elements) - ) - - @property - def _referred_schema(self): - for elem in self.elements: - return elem._referred_schema - else: - return None - - @property - def referred_table(self): - """The :class:`.Table` object to which this - :class:`.ForeignKeyConstraint` references. - - This is a dynamically calculated attribute which may not be available - if the constraint and/or parent table is not yet associated with - a metadata collection that contains the referred table. - - .. versionadded:: 1.0.0 - - """ - return self.elements[0].column.table - - def _validate_dest_table(self, table): - table_keys = set([elem._table_key() - for elem in self.elements]) - if None not in table_keys and len(table_keys) > 1: - elem0, elem1 = sorted(table_keys)[0:2] - raise exc.ArgumentError( - 'ForeignKeyConstraint on %s(%s) refers to ' - 'multiple remote tables: %s and %s' % ( - table.fullname, - self._col_description, - elem0, - elem1 - )) - - @property - def column_keys(self): - """Return a list of string keys representing the local - columns in this :class:`.ForeignKeyConstraint`. - - This list is either the original string arguments sent - to the constructor of the :class:`.ForeignKeyConstraint`, - or if the constraint has been initialized with :class:`.Column` - objects, is the string .key of each element. - - .. versionadded:: 1.0.0 - - """ - if hasattr(self, "parent"): - return self.columns.keys() - else: - return [ - col.key if isinstance(col, ColumnElement) - else str(col) for col in self._pending_colargs - ] - - @property - def _col_description(self): - return ", ".join(self.column_keys) - - def _set_parent(self, table): - Constraint._set_parent(self, table) - - try: - ColumnCollectionConstraint._set_parent(self, table) - except KeyError as ke: - raise exc.ArgumentError( - "Can't create ForeignKeyConstraint " - "on table '%s': no column " - "named '%s' is present." % (table.description, ke.args[0])) - - for col, fk in zip(self.columns, self.elements): - if not hasattr(fk, 'parent') or \ - fk.parent is not col: - fk._set_parent_with_dispatch(col) - - self._validate_dest_table(table) - - def copy(self, schema=None, target_table=None, **kw): - fkc = ForeignKeyConstraint( - [x.parent.key for x in self.elements], - [x._get_colspec( - schema=schema, - table_name=target_table.name - if target_table is not None - and x._table_key() == x.parent.table.key - else None) - for x in self.elements], - name=self.name, - onupdate=self.onupdate, - ondelete=self.ondelete, - use_alter=self.use_alter, - deferrable=self.deferrable, - initially=self.initially, - link_to_name=self.link_to_name, - match=self.match - ) - for self_fk, other_fk in zip( - self.elements, - fkc.elements): - self_fk._schema_item_copy(other_fk) - return self._schema_item_copy(fkc) - - -class PrimaryKeyConstraint(ColumnCollectionConstraint): - """A table-level PRIMARY KEY constraint. - - The :class:`.PrimaryKeyConstraint` object is present automatically - on any :class:`.Table` object; it is assigned a set of - :class:`.Column` objects corresponding to those marked with - the :paramref:`.Column.primary_key` flag:: - - >>> my_table = Table('mytable', metadata, - ... Column('id', Integer, primary_key=True), - ... Column('version_id', Integer, primary_key=True), - ... Column('data', String(50)) - ... ) - >>> my_table.primary_key - PrimaryKeyConstraint( - Column('id', Integer(), table=, - primary_key=True, nullable=False), - Column('version_id', Integer(), table=, - primary_key=True, nullable=False) - ) - - The primary key of a :class:`.Table` can also be specified by using - a :class:`.PrimaryKeyConstraint` object explicitly; in this mode of usage, - the "name" of the constraint can also be specified, as well as other - options which may be recognized by dialects:: - - my_table = Table('mytable', metadata, - Column('id', Integer), - Column('version_id', Integer), - Column('data', String(50)), - PrimaryKeyConstraint('id', 'version_id', - name='mytable_pk') - ) - - The two styles of column-specification should generally not be mixed. - An warning is emitted if the columns present in the - :class:`.PrimaryKeyConstraint` - don't match the columns that were marked as ``primary_key=True``, if both - are present; in this case, the columns are taken strictly from the - :class:`.PrimaryKeyConstraint` declaration, and those columns otherwise - marked as ``primary_key=True`` are ignored. This behavior is intended to - be backwards compatible with previous behavior. - - .. versionchanged:: 0.9.2 Using a mixture of columns within a - :class:`.PrimaryKeyConstraint` in addition to columns marked as - ``primary_key=True`` now emits a warning if the lists don't match. - The ultimate behavior of ignoring those columns marked with the flag - only is currently maintained for backwards compatibility; this warning - may raise an exception in a future release. - - For the use case where specific options are to be specified on the - :class:`.PrimaryKeyConstraint`, but the usual style of using - ``primary_key=True`` flags is still desirable, an empty - :class:`.PrimaryKeyConstraint` may be specified, which will take on the - primary key column collection from the :class:`.Table` based on the - flags:: - - my_table = Table('mytable', metadata, - Column('id', Integer, primary_key=True), - Column('version_id', Integer, primary_key=True), - Column('data', String(50)), - PrimaryKeyConstraint(name='mytable_pk', - mssql_clustered=True) - ) - - .. versionadded:: 0.9.2 an empty :class:`.PrimaryKeyConstraint` may now - be specified for the purposes of establishing keyword arguments with - the constraint, independently of the specification of "primary key" - columns within the :class:`.Table` itself; columns marked as - ``primary_key=True`` will be gathered into the empty constraint's - column collection. - - """ - - __visit_name__ = 'primary_key_constraint' - - def _set_parent(self, table): - super(PrimaryKeyConstraint, self)._set_parent(table) - - if table.primary_key is not self: - table.constraints.discard(table.primary_key) - table.primary_key = self - table.constraints.add(self) - - table_pks = [c for c in table.c if c.primary_key] - if self.columns and table_pks and \ - set(table_pks) != set(self.columns.values()): - util.warn( - "Table '%s' specifies columns %s as primary_key=True, " - "not matching locally specified columns %s; setting the " - "current primary key columns to %s. This warning " - "may become an exception in a future release" % - ( - table.name, - ", ".join("'%s'" % c.name for c in table_pks), - ", ".join("'%s'" % c.name for c in self.columns), - ", ".join("'%s'" % c.name for c in self.columns) - ) - ) - table_pks[:] = [] - - for c in self.columns: - c.primary_key = True - c.nullable = False - self.columns.extend(table_pks) - - def _reload(self, columns): - """repopulate this :class:`.PrimaryKeyConstraint` given - a set of columns. - - Existing columns in the table that are marked as primary_key=True - are maintained. - - Also fires a new event. - - This is basically like putting a whole new - :class:`.PrimaryKeyConstraint` object on the parent - :class:`.Table` object without actually replacing the object. - - The ordering of the given list of columns is also maintained; these - columns will be appended to the list of columns after any which - are already present. - - """ - - # set the primary key flag on new columns. - # note any existing PK cols on the table also have their - # flag still set. - for col in columns: - col.primary_key = True - - self.columns.extend(columns) - - self._set_parent_with_dispatch(self.table) - - def _replace(self, col): - self.columns.replace(col) - - -class UniqueConstraint(ColumnCollectionConstraint): - """A table-level UNIQUE constraint. - - Defines a single column or composite UNIQUE constraint. For a no-frills, - single column constraint, adding ``unique=True`` to the ``Column`` - definition is a shorthand equivalent for an unnamed, single column - UniqueConstraint. - """ - - __visit_name__ = 'unique_constraint' - - -class Index(DialectKWArgs, ColumnCollectionMixin, SchemaItem): - """A table-level INDEX. - - Defines a composite (one or more column) INDEX. - - E.g.:: - - sometable = Table("sometable", metadata, - Column("name", String(50)), - Column("address", String(100)) - ) - - Index("some_index", sometable.c.name) - - For a no-frills, single column index, adding - :class:`.Column` also supports ``index=True``:: - - sometable = Table("sometable", metadata, - Column("name", String(50), index=True) - ) - - For a composite index, multiple columns can be specified:: - - Index("some_index", sometable.c.name, sometable.c.address) - - Functional indexes are supported as well, typically by using the - :data:`.func` construct in conjunction with table-bound - :class:`.Column` objects:: - - Index("some_index", func.lower(sometable.c.name)) - - .. versionadded:: 0.8 support for functional and expression-based indexes. - - An :class:`.Index` can also be manually associated with a :class:`.Table`, - either through inline declaration or using - :meth:`.Table.append_constraint`. When this approach is used, the names - of the indexed columns can be specified as strings:: - - Table("sometable", metadata, - Column("name", String(50)), - Column("address", String(100)), - Index("some_index", "name", "address") - ) - - To support functional or expression-based indexes in this form, the - :func:`.text` construct may be used:: - - from sqlalchemy import text - - Table("sometable", metadata, - Column("name", String(50)), - Column("address", String(100)), - Index("some_index", text("lower(name)")) - ) - - .. versionadded:: 0.9.5 the :func:`.text` construct may be used to - specify :class:`.Index` expressions, provided the :class:`.Index` - is explicitly associated with the :class:`.Table`. - - - .. seealso:: - - :ref:`schema_indexes` - General information on :class:`.Index`. - - :ref:`postgresql_indexes` - PostgreSQL-specific options available for - the :class:`.Index` construct. - - :ref:`mysql_indexes` - MySQL-specific options available for the - :class:`.Index` construct. - - :ref:`mssql_indexes` - MSSQL-specific options available for the - :class:`.Index` construct. - - """ - - __visit_name__ = 'index' - - def __init__(self, name, *expressions, **kw): - """Construct an index object. - - :param name: - The name of the index - - :param \*expressions: - Column expressions to include in the index. The expressions - are normally instances of :class:`.Column`, but may also - be arbitrary SQL expressions which ultimately refer to a - :class:`.Column`. - - :param unique=False: - Keyword only argument; if True, create a unique index. - - :param quote=None: - Keyword only argument; whether to apply quoting to the name of - the index. Works in the same manner as that of - :paramref:`.Column.quote`. - - :param info=None: Optional data dictionary which will be populated - into the :attr:`.SchemaItem.info` attribute of this object. - - .. versionadded:: 1.0.0 - - :param \**kw: Additional keyword arguments not mentioned above are - dialect specific, and passed in the form - ``_``. See the documentation regarding an - individual dialect at :ref:`dialect_toplevel` for detail on - documented arguments. - - """ - self.table = None - - columns = [] - for expr, column, strname, add_element in self.\ - _extract_col_expression_collection(expressions): - if add_element is not None: - columns.append(add_element) - - self.expressions = expressions - self.name = quoted_name(name, kw.pop("quote", None)) - self.unique = kw.pop('unique', False) - if 'info' in kw: - self.info = kw.pop('info') - self._validate_dialect_kwargs(kw) - - # will call _set_parent() if table-bound column - # objects are present - ColumnCollectionMixin.__init__(self, *columns) - - def _set_parent(self, table): - ColumnCollectionMixin._set_parent(self, table) - - if self.table is not None and table is not self.table: - raise exc.ArgumentError( - "Index '%s' is against table '%s', and " - "cannot be associated with table '%s'." % ( - self.name, - self.table.description, - table.description - ) - ) - self.table = table - table.indexes.add(self) - - self.expressions = [ - expr if isinstance(expr, ClauseElement) - else colexpr - for expr, colexpr in util.zip_longest(self.expressions, - self.columns) - ] - - @property - def bind(self): - """Return the connectable associated with this Index.""" - - return self.table.bind - - def create(self, bind=None): - """Issue a ``CREATE`` statement for this - :class:`.Index`, using the given :class:`.Connectable` - for connectivity. - - .. seealso:: - - :meth:`.MetaData.create_all`. - - """ - if bind is None: - bind = _bind_or_error(self) - bind._run_visitor(ddl.SchemaGenerator, self) - return self - - def drop(self, bind=None): - """Issue a ``DROP`` statement for this - :class:`.Index`, using the given :class:`.Connectable` - for connectivity. - - .. seealso:: - - :meth:`.MetaData.drop_all`. - - """ - if bind is None: - bind = _bind_or_error(self) - bind._run_visitor(ddl.SchemaDropper, self) - - def __repr__(self): - return 'Index(%s)' % ( - ", ".join( - [repr(self.name)] + - [repr(e) for e in self.expressions] + - (self.unique and ["unique=True"] or []) - )) - - -DEFAULT_NAMING_CONVENTION = util.immutabledict({ - "ix": 'ix_%(column_0_label)s' -}) - - -class MetaData(SchemaItem): - """A collection of :class:`.Table` objects and their associated schema - constructs. - - Holds a collection of :class:`.Table` objects as well as - an optional binding to an :class:`.Engine` or - :class:`.Connection`. If bound, the :class:`.Table` objects - in the collection and their columns may participate in implicit SQL - execution. - - The :class:`.Table` objects themselves are stored in the - :attr:`.MetaData.tables` dictionary. - - :class:`.MetaData` is a thread-safe object for read operations. - Construction of new tables within a single :class:`.MetaData` object, - either explicitly or via reflection, may not be completely thread-safe. - - .. seealso:: - - :ref:`metadata_describing` - Introduction to database metadata - - """ - - __visit_name__ = 'metadata' - - def __init__(self, bind=None, reflect=False, schema=None, - quote_schema=None, - naming_convention=DEFAULT_NAMING_CONVENTION, - info=None - ): - """Create a new MetaData object. - - :param bind: - An Engine or Connection to bind to. May also be a string or URL - instance, these are passed to create_engine() and this MetaData will - be bound to the resulting engine. - - :param reflect: - Optional, automatically load all tables from the bound database. - Defaults to False. ``bind`` is required when this option is set. - - .. deprecated:: 0.8 - Please use the :meth:`.MetaData.reflect` method. - - :param schema: - The default schema to use for the :class:`.Table`, - :class:`.Sequence`, and other objects associated with this - :class:`.MetaData`. Defaults to ``None``. - - :param quote_schema: - Sets the ``quote_schema`` flag for those :class:`.Table`, - :class:`.Sequence`, and other objects which make usage of the - local ``schema`` name. - - :param info: Optional data dictionary which will be populated into the - :attr:`.SchemaItem.info` attribute of this object. - - .. versionadded:: 1.0.0 - - :param naming_convention: a dictionary referring to values which - will establish default naming conventions for :class:`.Constraint` - and :class:`.Index` objects, for those objects which are not given - a name explicitly. - - The keys of this dictionary may be: - - * a constraint or Index class, e.g. the :class:`.UniqueConstraint`, - :class:`.ForeignKeyConstraint` class, the :class:`.Index` class - - * a string mnemonic for one of the known constraint classes; - ``"fk"``, ``"pk"``, ``"ix"``, ``"ck"``, ``"uq"`` for foreign key, - primary key, index, check, and unique constraint, respectively. - - * the string name of a user-defined "token" that can be used - to define new naming tokens. - - The values associated with each "constraint class" or "constraint - mnemonic" key are string naming templates, such as - ``"uq_%(table_name)s_%(column_0_name)s"``, - which describe how the name should be composed. The values - associated with user-defined "token" keys should be callables of the - form ``fn(constraint, table)``, which accepts the constraint/index - object and :class:`.Table` as arguments, returning a string - result. - - The built-in names are as follows, some of which may only be - available for certain types of constraint: - - * ``%(table_name)s`` - the name of the :class:`.Table` object - associated with the constraint. - - * ``%(referred_table_name)s`` - the name of the :class:`.Table` - object associated with the referencing target of a - :class:`.ForeignKeyConstraint`. - - * ``%(column_0_name)s`` - the name of the :class:`.Column` at - index position "0" within the constraint. - - * ``%(column_0_label)s`` - the label of the :class:`.Column` at - index position "0", e.g. :attr:`.Column.label` - - * ``%(column_0_key)s`` - the key of the :class:`.Column` at - index position "0", e.g. :attr:`.Column.key` - - * ``%(referred_column_0_name)s`` - the name of a :class:`.Column` - at index position "0" referenced by a - :class:`.ForeignKeyConstraint`. - - * ``%(constraint_name)s`` - a special key that refers to the - existing name given to the constraint. When this key is - present, the :class:`.Constraint` object's existing name will be - replaced with one that is composed from template string that - uses this token. When this token is present, it is required that - the :class:`.Constraint` is given an expicit name ahead of time. - - * user-defined: any additional token may be implemented by passing - it along with a ``fn(constraint, table)`` callable to the - naming_convention dictionary. - - .. versionadded:: 0.9.2 - - .. seealso:: - - :ref:`constraint_naming_conventions` - for detailed usage - examples. - - """ - self.tables = util.immutabledict() - self.schema = quoted_name(schema, quote_schema) - self.naming_convention = naming_convention - if info: - self.info = info - self._schemas = set() - self._sequences = {} - self._fk_memos = collections.defaultdict(list) - - self.bind = bind - if reflect: - util.warn_deprecated("reflect=True is deprecate; please " - "use the reflect() method.") - if not bind: - raise exc.ArgumentError( - "A bind must be supplied in conjunction " - "with reflect=True") - self.reflect() - - tables = None - """A dictionary of :class:`.Table` objects keyed to their name or "table key". - - The exact key is that determined by the :attr:`.Table.key` attribute; - for a table with no :attr:`.Table.schema` attribute, this is the same - as :attr:`.Table.name`. For a table with a schema, it is typically of the - form ``schemaname.tablename``. - - .. seealso:: - - :attr:`.MetaData.sorted_tables` - - """ - - def __repr__(self): - return 'MetaData(bind=%r)' % self.bind - - def __contains__(self, table_or_key): - if not isinstance(table_or_key, util.string_types): - table_or_key = table_or_key.key - return table_or_key in self.tables - - def _add_table(self, name, schema, table): - key = _get_table_key(name, schema) - dict.__setitem__(self.tables, key, table) - if schema: - self._schemas.add(schema) - - def _remove_table(self, name, schema): - key = _get_table_key(name, schema) - removed = dict.pop(self.tables, key, None) - if removed is not None: - for fk in removed.foreign_keys: - fk._remove_from_metadata(self) - if self._schemas: - self._schemas = set([t.schema - for t in self.tables.values() - if t.schema is not None]) - - def __getstate__(self): - return {'tables': self.tables, - 'schema': self.schema, - 'schemas': self._schemas, - 'sequences': self._sequences, - 'fk_memos': self._fk_memos, - 'naming_convention': self.naming_convention - } - - def __setstate__(self, state): - self.tables = state['tables'] - self.schema = state['schema'] - self.naming_convention = state['naming_convention'] - self._bind = None - self._sequences = state['sequences'] - self._schemas = state['schemas'] - self._fk_memos = state['fk_memos'] - - def is_bound(self): - """True if this MetaData is bound to an Engine or Connection.""" - - return self._bind is not None - - def bind(self): - """An :class:`.Engine` or :class:`.Connection` to which this - :class:`.MetaData` is bound. - - Typically, a :class:`.Engine` is assigned to this attribute - so that "implicit execution" may be used, or alternatively - as a means of providing engine binding information to an - ORM :class:`.Session` object:: - - engine = create_engine("someurl://") - metadata.bind = engine - - .. seealso:: - - :ref:`dbengine_implicit` - background on "bound metadata" - - """ - return self._bind - - @util.dependencies("sqlalchemy.engine.url") - def _bind_to(self, url, bind): - """Bind this MetaData to an Engine, Connection, string or URL.""" - - if isinstance(bind, util.string_types + (url.URL, )): - self._bind = sqlalchemy.create_engine(bind) - else: - self._bind = bind - bind = property(bind, _bind_to) - - def clear(self): - """Clear all Table objects from this MetaData.""" - - dict.clear(self.tables) - self._schemas.clear() - self._fk_memos.clear() - - def remove(self, table): - """Remove the given Table object from this MetaData.""" - - self._remove_table(table.name, table.schema) - - @property - def sorted_tables(self): - """Returns a list of :class:`.Table` objects sorted in order of - foreign key dependency. - - The sorting will place :class:`.Table` objects that have dependencies - first, before the dependencies themselves, representing the - order in which they can be created. To get the order in which - the tables would be dropped, use the ``reversed()`` Python built-in. - - .. warning:: - - The :attr:`.sorted_tables` accessor cannot by itself accommodate - automatic resolution of dependency cycles between tables, which - are usually caused by mutually dependent foreign key constraints. - To resolve these cycles, either the - :paramref:`.ForeignKeyConstraint.use_alter` parameter may be appled - to those constraints, or use the - :func:`.schema.sort_tables_and_constraints` function which will break - out foreign key constraints involved in cycles separately. - - .. seealso:: - - :func:`.schema.sort_tables` - - :func:`.schema.sort_tables_and_constraints` - - :attr:`.MetaData.tables` - - :meth:`.Inspector.get_table_names` - - :meth:`.Inspector.get_sorted_table_and_fkc_names` - - - """ - return ddl.sort_tables(sorted(self.tables.values(), key=lambda t: t.key)) - - def reflect(self, bind=None, schema=None, views=False, only=None, - extend_existing=False, - autoload_replace=True, - **dialect_kwargs): - """Load all available table definitions from the database. - - Automatically creates ``Table`` entries in this ``MetaData`` for any - table available in the database but not yet present in the - ``MetaData``. May be called multiple times to pick up tables recently - added to the database, however no special action is taken if a table - in this ``MetaData`` no longer exists in the database. - - :param bind: - A :class:`.Connectable` used to access the database; if None, uses - the existing bind on this ``MetaData``, if any. - - :param schema: - Optional, query and reflect tables from an alterate schema. - If None, the schema associated with this :class:`.MetaData` - is used, if any. - - :param views: - If True, also reflect views. - - :param only: - Optional. Load only a sub-set of available named tables. May be - specified as a sequence of names or a callable. - - If a sequence of names is provided, only those tables will be - reflected. An error is raised if a table is requested but not - available. Named tables already present in this ``MetaData`` are - ignored. - - If a callable is provided, it will be used as a boolean predicate to - filter the list of potential table names. The callable is called - with a table name and this ``MetaData`` instance as positional - arguments and should return a true value for any table to reflect. - - :param extend_existing: Passed along to each :class:`.Table` as - :paramref:`.Table.extend_existing`. - - .. versionadded:: 0.9.1 - - :param autoload_replace: Passed along to each :class:`.Table` as - :paramref:`.Table.autoload_replace`. - - .. versionadded:: 0.9.1 - - :param \**dialect_kwargs: Additional keyword arguments not mentioned - above are dialect specific, and passed in the form - ``_``. See the documentation regarding an - individual dialect at :ref:`dialect_toplevel` for detail on - documented arguments. - - .. versionadded:: 0.9.2 - Added - :paramref:`.MetaData.reflect.**dialect_kwargs` to support - dialect-level reflection options for all :class:`.Table` - objects reflected. - - """ - if bind is None: - bind = _bind_or_error(self) - - with bind.connect() as conn: - - reflect_opts = { - 'autoload': True, - 'autoload_with': conn, - 'extend_existing': extend_existing, - 'autoload_replace': autoload_replace - } - - reflect_opts.update(dialect_kwargs) - - if schema is None: - schema = self.schema - - if schema is not None: - reflect_opts['schema'] = schema - - available = util.OrderedSet( - bind.engine.table_names(schema, connection=conn)) - if views: - available.update( - bind.dialect.get_view_names(conn, schema) - ) - - if schema is not None: - available_w_schema = util.OrderedSet(["%s.%s" % (schema, name) - for name in available]) - else: - available_w_schema = available - - current = set(self.tables) - - if only is None: - load = [name for name, schname in - zip(available, available_w_schema) - if extend_existing or schname not in current] - elif util.callable(only): - load = [name for name, schname in - zip(available, available_w_schema) - if (extend_existing or schname not in current) - and only(name, self)] - else: - missing = [name for name in only if name not in available] - if missing: - s = schema and (" schema '%s'" % schema) or '' - raise exc.InvalidRequestError( - 'Could not reflect: requested table(s) not available ' - 'in %s%s: (%s)' % - (bind.engine.url, s, ', '.join(missing))) - load = [name for name in only if extend_existing or - name not in current] - - for name in load: - Table(name, self, **reflect_opts) - - def append_ddl_listener(self, event_name, listener): - """Append a DDL event listener to this ``MetaData``. - - .. deprecated:: 0.7 - See :class:`.DDLEvents`. - - """ - def adapt_listener(target, connection, **kw): - tables = kw['tables'] - listener(event, target, connection, tables=tables) - - event.listen(self, "" + event_name.replace('-', '_'), adapt_listener) - - def create_all(self, bind=None, tables=None, checkfirst=True): - """Create all tables stored in this metadata. - - Conditional by default, will not attempt to recreate tables already - present in the target database. - - :param bind: - A :class:`.Connectable` used to access the - database; if None, uses the existing bind on this ``MetaData``, if - any. - - :param tables: - Optional list of ``Table`` objects, which is a subset of the total - tables in the ``MetaData`` (others are ignored). - - :param checkfirst: - Defaults to True, don't issue CREATEs for tables already present - in the target database. - - """ - if bind is None: - bind = _bind_or_error(self) - bind._run_visitor(ddl.SchemaGenerator, - self, - checkfirst=checkfirst, - tables=tables) - - def drop_all(self, bind=None, tables=None, checkfirst=True): - """Drop all tables stored in this metadata. - - Conditional by default, will not attempt to drop tables not present in - the target database. - - :param bind: - A :class:`.Connectable` used to access the - database; if None, uses the existing bind on this ``MetaData``, if - any. - - :param tables: - Optional list of ``Table`` objects, which is a subset of the - total tables in the ``MetaData`` (others are ignored). - - :param checkfirst: - Defaults to True, only issue DROPs for tables confirmed to be - present in the target database. - - """ - if bind is None: - bind = _bind_or_error(self) - bind._run_visitor(ddl.SchemaDropper, - self, - checkfirst=checkfirst, - tables=tables) - - -class ThreadLocalMetaData(MetaData): - """A MetaData variant that presents a different ``bind`` in every thread. - - Makes the ``bind`` property of the MetaData a thread-local value, allowing - this collection of tables to be bound to different ``Engine`` - implementations or connections in each thread. - - The ThreadLocalMetaData starts off bound to None in each thread. Binds - must be made explicitly by assigning to the ``bind`` property or using - ``connect()``. You can also re-bind dynamically multiple times per - thread, just like a regular ``MetaData``. - - """ - - __visit_name__ = 'metadata' - - def __init__(self): - """Construct a ThreadLocalMetaData.""" - - self.context = util.threading.local() - self.__engines = {} - super(ThreadLocalMetaData, self).__init__() - - def bind(self): - """The bound Engine or Connection for this thread. - - This property may be assigned an Engine or Connection, or assigned a - string or URL to automatically create a basic Engine for this bind - with ``create_engine()``.""" - - return getattr(self.context, '_engine', None) - - @util.dependencies("sqlalchemy.engine.url") - def _bind_to(self, url, bind): - """Bind to a Connectable in the caller's thread.""" - - if isinstance(bind, util.string_types + (url.URL, )): - try: - self.context._engine = self.__engines[bind] - except KeyError: - e = sqlalchemy.create_engine(bind) - self.__engines[bind] = e - self.context._engine = e - else: - # TODO: this is squirrely. we shouldn't have to hold onto engines - # in a case like this - if bind not in self.__engines: - self.__engines[bind] = bind - self.context._engine = bind - - bind = property(bind, _bind_to) - - def is_bound(self): - """True if there is a bind for this thread.""" - return (hasattr(self.context, '_engine') and - self.context._engine is not None) - - def dispose(self): - """Dispose all bound engines, in all thread contexts.""" - - for e in self.__engines.values(): - if hasattr(e, 'dispose'): - e.dispose() diff --git a/python/sqlalchemy/sql/selectable.py b/python/sqlalchemy/sql/selectable.py deleted file mode 100644 index bfba35de..00000000 --- a/python/sqlalchemy/sql/selectable.py +++ /dev/null @@ -1,3436 +0,0 @@ -# sql/selectable.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""The :class:`.FromClause` class of SQL expression elements, representing -SQL tables and derived rowsets. - -""" - -from .elements import ClauseElement, TextClause, ClauseList, \ - and_, Grouping, UnaryExpression, literal_column, BindParameter -from .elements import _clone, \ - _literal_as_text, _interpret_as_column_or_from, _expand_cloned,\ - _select_iterables, _anonymous_label, _clause_element_as_expr,\ - _cloned_intersection, _cloned_difference, True_, \ - _literal_as_label_reference, _literal_and_labels_as_label_reference -from .base import Immutable, Executable, _generative, \ - ColumnCollection, ColumnSet, _from_objects, Generative -from . import type_api -from .. import inspection -from .. import util -from .. import exc -from operator import attrgetter -from . import operators -import operator -import collections -from .annotation import Annotated -import itertools -from sqlalchemy.sql.visitors import Visitable - - -def _interpret_as_from(element): - insp = inspection.inspect(element, raiseerr=False) - if insp is None: - if isinstance(element, util.string_types): - util.warn_limited( - "Textual SQL FROM expression %(expr)r should be " - "explicitly declared as text(%(expr)r), " - "or use table(%(expr)r) for more specificity", - {"expr": util.ellipses_string(element)}) - - return TextClause(util.text_type(element)) - try: - return insp.selectable - except AttributeError: - raise exc.ArgumentError("FROM expression expected") - - -def _interpret_as_select(element): - element = _interpret_as_from(element) - if isinstance(element, Alias): - element = element.original - if not isinstance(element, SelectBase): - element = element.select() - return element - - -class _OffsetLimitParam(BindParameter): - @property - def _limit_offset_value(self): - return self.effective_value - - -def _offset_or_limit_clause(element, name=None, type_=None): - """Convert the given value to an "offset or limit" clause. - - This handles incoming integers and converts to an expression; if - an expression is already given, it is passed through. - - """ - if element is None: - return None - elif hasattr(element, '__clause_element__'): - return element.__clause_element__() - elif isinstance(element, Visitable): - return element - else: - value = util.asint(element) - return _OffsetLimitParam(name, value, type_=type_, unique=True) - - -def _offset_or_limit_clause_asint(clause, attrname): - """Convert the "offset or limit" clause of a select construct to an - integer. - - This is only possible if the value is stored as a simple bound parameter. - Otherwise, a compilation error is raised. - - """ - if clause is None: - return None - try: - value = clause._limit_offset_value - except AttributeError: - raise exc.CompileError( - "This SELECT structure does not use a simple " - "integer value for %s" % attrname) - else: - return util.asint(value) - - -def subquery(alias, *args, **kwargs): - """Return an :class:`.Alias` object derived - from a :class:`.Select`. - - name - alias name - - \*args, \**kwargs - - all other arguments are delivered to the - :func:`select` function. - - """ - return Select(*args, **kwargs).alias(alias) - - -def alias(selectable, name=None, flat=False): - """Return an :class:`.Alias` object. - - An :class:`.Alias` represents any :class:`.FromClause` - with an alternate name assigned within SQL, typically using the ``AS`` - clause when generated, e.g. ``SELECT * FROM table AS aliasname``. - - Similar functionality is available via the - :meth:`~.FromClause.alias` method - available on all :class:`.FromClause` subclasses. - - When an :class:`.Alias` is created from a :class:`.Table` object, - this has the effect of the table being rendered - as ``tablename AS aliasname`` in a SELECT statement. - - For :func:`.select` objects, the effect is that of creating a named - subquery, i.e. ``(select ...) AS aliasname``. - - The ``name`` parameter is optional, and provides the name - to use in the rendered SQL. If blank, an "anonymous" name - will be deterministically generated at compile time. - Deterministic means the name is guaranteed to be unique against - other constructs used in the same statement, and will also be the - same name for each successive compilation of the same statement - object. - - :param selectable: any :class:`.FromClause` subclass, - such as a table, select statement, etc. - - :param name: string name to be assigned as the alias. - If ``None``, a name will be deterministically generated - at compile time. - - :param flat: Will be passed through to if the given selectable - is an instance of :class:`.Join` - see :meth:`.Join.alias` - for details. - - .. versionadded:: 0.9.0 - - """ - return selectable.alias(name=name, flat=flat) - - -class Selectable(ClauseElement): - """mark a class as being selectable""" - __visit_name__ = 'selectable' - - is_selectable = True - - @property - def selectable(self): - return self - - -class HasPrefixes(object): - _prefixes = () - - @_generative - def prefix_with(self, *expr, **kw): - """Add one or more expressions following the statement keyword, i.e. - SELECT, INSERT, UPDATE, or DELETE. Generative. - - This is used to support backend-specific prefix keywords such as those - provided by MySQL. - - E.g.:: - - stmt = table.insert().prefix_with("LOW_PRIORITY", dialect="mysql") - - Multiple prefixes can be specified by multiple calls - to :meth:`.prefix_with`. - - :param \*expr: textual or :class:`.ClauseElement` construct which - will be rendered following the INSERT, UPDATE, or DELETE - keyword. - :param \**kw: A single keyword 'dialect' is accepted. This is an - optional string dialect name which will - limit rendering of this prefix to only that dialect. - - """ - dialect = kw.pop('dialect', None) - if kw: - raise exc.ArgumentError("Unsupported argument(s): %s" % - ",".join(kw)) - self._setup_prefixes(expr, dialect) - - def _setup_prefixes(self, prefixes, dialect=None): - self._prefixes = self._prefixes + tuple( - [(_literal_as_text(p, warn=False), dialect) for p in prefixes]) - - -class HasSuffixes(object): - _suffixes = () - - @_generative - def suffix_with(self, *expr, **kw): - """Add one or more expressions following the statement as a whole. - - This is used to support backend-specific suffix keywords on - certain constructs. - - E.g.:: - - stmt = select([col1, col2]).cte().suffix_with( - "cycle empno set y_cycle to 1 default 0", dialect="oracle") - - Multiple suffixes can be specified by multiple calls - to :meth:`.suffix_with`. - - :param \*expr: textual or :class:`.ClauseElement` construct which - will be rendered following the target clause. - :param \**kw: A single keyword 'dialect' is accepted. This is an - optional string dialect name which will - limit rendering of this suffix to only that dialect. - - """ - dialect = kw.pop('dialect', None) - if kw: - raise exc.ArgumentError("Unsupported argument(s): %s" % - ",".join(kw)) - self._setup_suffixes(expr, dialect) - - def _setup_suffixes(self, suffixes, dialect=None): - self._suffixes = self._suffixes + tuple( - [(_literal_as_text(p, warn=False), dialect) for p in suffixes]) - - -class FromClause(Selectable): - """Represent an element that can be used within the ``FROM`` - clause of a ``SELECT`` statement. - - The most common forms of :class:`.FromClause` are the - :class:`.Table` and the :func:`.select` constructs. Key - features common to all :class:`.FromClause` objects include: - - * a :attr:`.c` collection, which provides per-name access to a collection - of :class:`.ColumnElement` objects. - * a :attr:`.primary_key` attribute, which is a collection of all those - :class:`.ColumnElement` objects that indicate the ``primary_key`` flag. - * Methods to generate various derivations of a "from" clause, including - :meth:`.FromClause.alias`, :meth:`.FromClause.join`, - :meth:`.FromClause.select`. - - - """ - __visit_name__ = 'fromclause' - named_with_column = False - _hide_froms = [] - - _is_join = False - _is_select = False - _is_from_container = False - - _textual = False - """a marker that allows us to easily distinguish a :class:`.TextAsFrom` - or similar object from other kinds of :class:`.FromClause` objects.""" - - schema = None - """Define the 'schema' attribute for this :class:`.FromClause`. - - This is typically ``None`` for most objects except that of - :class:`.Table`, where it is taken as the value of the - :paramref:`.Table.schema` argument. - - """ - - _memoized_property = util.group_expirable_memoized_property(["_columns"]) - - @util.dependencies("sqlalchemy.sql.functions") - def count(self, functions, whereclause=None, **params): - """return a SELECT COUNT generated against this - :class:`.FromClause`.""" - - if self.primary_key: - col = list(self.primary_key)[0] - else: - col = list(self.columns)[0] - return Select( - [functions.func.count(col).label('tbl_row_count')], - whereclause, - from_obj=[self], - **params) - - def select(self, whereclause=None, **params): - """return a SELECT of this :class:`.FromClause`. - - .. seealso:: - - :func:`~.sql.expression.select` - general purpose - method which allows for arbitrary column lists. - - """ - - return Select([self], whereclause, **params) - - def join(self, right, onclause=None, isouter=False): - """Return a :class:`.Join` from this :class:`.FromClause` - to another :class:`FromClause`. - - E.g.:: - - from sqlalchemy import join - - j = user_table.join(address_table, - user_table.c.id == address_table.c.user_id) - stmt = select([user_table]).select_from(j) - - would emit SQL along the lines of:: - - SELECT user.id, user.name FROM user - JOIN address ON user.id = address.user_id - - :param right: the right side of the join; this is any - :class:`.FromClause` object such as a :class:`.Table` object, and - may also be a selectable-compatible object such as an ORM-mapped - class. - - :param onclause: a SQL expression representing the ON clause of the - join. If left at ``None``, :meth:`.FromClause.join` will attempt to - join the two tables based on a foreign key relationship. - - :param isouter: if True, render a LEFT OUTER JOIN, instead of JOIN. - - .. seealso:: - - :func:`.join` - standalone function - - :class:`.Join` - the type of object produced - - """ - - return Join(self, right, onclause, isouter) - - def outerjoin(self, right, onclause=None): - """Return a :class:`.Join` from this :class:`.FromClause` - to another :class:`FromClause`, with the "isouter" flag set to - True. - - E.g.:: - - from sqlalchemy import outerjoin - - j = user_table.outerjoin(address_table, - user_table.c.id == address_table.c.user_id) - - The above is equivalent to:: - - j = user_table.join( - address_table, - user_table.c.id == address_table.c.user_id, - isouter=True) - - :param right: the right side of the join; this is any - :class:`.FromClause` object such as a :class:`.Table` object, and - may also be a selectable-compatible object such as an ORM-mapped - class. - - :param onclause: a SQL expression representing the ON clause of the - join. If left at ``None``, :meth:`.FromClause.join` will attempt to - join the two tables based on a foreign key relationship. - - .. seealso:: - - :meth:`.FromClause.join` - - :class:`.Join` - - """ - - return Join(self, right, onclause, True) - - def alias(self, name=None, flat=False): - """return an alias of this :class:`.FromClause`. - - This is shorthand for calling:: - - from sqlalchemy import alias - a = alias(self, name=name) - - See :func:`~.expression.alias` for details. - - """ - - return Alias(self, name) - - def is_derived_from(self, fromclause): - """Return True if this FromClause is 'derived' from the given - FromClause. - - An example would be an Alias of a Table is derived from that Table. - - """ - # this is essentially an "identity" check in the base class. - # Other constructs override this to traverse through - # contained elements. - return fromclause in self._cloned_set - - def _is_lexical_equivalent(self, other): - """Return True if this FromClause and the other represent - the same lexical identity. - - This tests if either one is a copy of the other, or - if they are the same via annotation identity. - - """ - return self._cloned_set.intersection(other._cloned_set) - - @util.dependencies("sqlalchemy.sql.util") - def replace_selectable(self, sqlutil, old, alias): - """replace all occurrences of FromClause 'old' with the given Alias - object, returning a copy of this :class:`.FromClause`. - - """ - - return sqlutil.ClauseAdapter(alias).traverse(self) - - def correspond_on_equivalents(self, column, equivalents): - """Return corresponding_column for the given column, or if None - search for a match in the given dictionary. - - """ - col = self.corresponding_column(column, require_embedded=True) - if col is None and col in equivalents: - for equiv in equivalents[col]: - nc = self.corresponding_column(equiv, require_embedded=True) - if nc: - return nc - return col - - def corresponding_column(self, column, require_embedded=False): - """Given a :class:`.ColumnElement`, return the exported - :class:`.ColumnElement` object from this :class:`.Selectable` - which corresponds to that original - :class:`~sqlalchemy.schema.Column` via a common ancestor - column. - - :param column: the target :class:`.ColumnElement` to be matched - - :param require_embedded: only return corresponding columns for - the given :class:`.ColumnElement`, if the given - :class:`.ColumnElement` is actually present within a sub-element - of this :class:`.FromClause`. Normally the column will match if - it merely shares a common ancestor with one of the exported - columns of this :class:`.FromClause`. - - """ - - def embedded(expanded_proxy_set, target_set): - for t in target_set.difference(expanded_proxy_set): - if not set(_expand_cloned([t]) - ).intersection(expanded_proxy_set): - return False - return True - - # don't dig around if the column is locally present - if self.c.contains_column(column): - return column - col, intersect = None, None - target_set = column.proxy_set - cols = self.c._all_columns - for c in cols: - expanded_proxy_set = set(_expand_cloned(c.proxy_set)) - i = target_set.intersection(expanded_proxy_set) - if i and (not require_embedded - or embedded(expanded_proxy_set, target_set)): - if col is None: - - # no corresponding column yet, pick this one. - - col, intersect = c, i - elif len(i) > len(intersect): - - # 'c' has a larger field of correspondence than - # 'col'. i.e. selectable.c.a1_x->a1.c.x->table.c.x - # matches a1.c.x->table.c.x better than - # selectable.c.x->table.c.x does. - - col, intersect = c, i - elif i == intersect: - - # they have the same field of correspondence. see - # which proxy_set has fewer columns in it, which - # indicates a closer relationship with the root - # column. Also take into account the "weight" - # attribute which CompoundSelect() uses to give - # higher precedence to columns based on vertical - # position in the compound statement, and discard - # columns that have no reference to the target - # column (also occurs with CompoundSelect) - - col_distance = util.reduce( - operator.add, - [sc._annotations.get('weight', 1) for sc in - col.proxy_set if sc.shares_lineage(column)]) - c_distance = util.reduce( - operator.add, - [sc._annotations.get('weight', 1) for sc in - c.proxy_set if sc.shares_lineage(column)]) - if c_distance < col_distance: - col, intersect = c, i - return col - - @property - def description(self): - """a brief description of this FromClause. - - Used primarily for error message formatting. - - """ - return getattr(self, 'name', self.__class__.__name__ + " object") - - def _reset_exported(self): - """delete memoized collections when a FromClause is cloned.""" - - self._memoized_property.expire_instance(self) - - @_memoized_property - def columns(self): - """A named-based collection of :class:`.ColumnElement` objects - maintained by this :class:`.FromClause`. - - The :attr:`.columns`, or :attr:`.c` collection, is the gateway - to the construction of SQL expressions using table-bound or - other selectable-bound columns:: - - select([mytable]).where(mytable.c.somecolumn == 5) - - """ - - if '_columns' not in self.__dict__: - self._init_collections() - self._populate_column_collection() - return self._columns.as_immutable() - - @_memoized_property - def primary_key(self): - """Return the collection of Column objects which comprise the - primary key of this FromClause.""" - - self._init_collections() - self._populate_column_collection() - return self.primary_key - - @_memoized_property - def foreign_keys(self): - """Return the collection of ForeignKey objects which this - FromClause references.""" - - self._init_collections() - self._populate_column_collection() - return self.foreign_keys - - c = property(attrgetter('columns'), - doc="An alias for the :attr:`.columns` attribute.") - _select_iterable = property(attrgetter('columns')) - - def _init_collections(self): - assert '_columns' not in self.__dict__ - assert 'primary_key' not in self.__dict__ - assert 'foreign_keys' not in self.__dict__ - - self._columns = ColumnCollection() - self.primary_key = ColumnSet() - self.foreign_keys = set() - - @property - def _cols_populated(self): - return '_columns' in self.__dict__ - - def _populate_column_collection(self): - """Called on subclasses to establish the .c collection. - - Each implementation has a different way of establishing - this collection. - - """ - - def _refresh_for_new_column(self, column): - """Given a column added to the .c collection of an underlying - selectable, produce the local version of that column, assuming this - selectable ultimately should proxy this column. - - this is used to "ping" a derived selectable to add a new column - to its .c. collection when a Column has been added to one of the - Table objects it ultimtely derives from. - - If the given selectable hasn't populated its .c. collection yet, - it should at least pass on the message to the contained selectables, - but it will return None. - - This method is currently used by Declarative to allow Table - columns to be added to a partially constructed inheritance - mapping that may have already produced joins. The method - isn't public right now, as the full span of implications - and/or caveats aren't yet clear. - - It's also possible that this functionality could be invoked by - default via an event, which would require that - selectables maintain a weak referencing collection of all - derivations. - - """ - if not self._cols_populated: - return None - elif (column.key in self.columns and - self.columns[column.key] is column): - return column - else: - return None - - -class Join(FromClause): - """represent a ``JOIN`` construct between two :class:`.FromClause` - elements. - - The public constructor function for :class:`.Join` is the module-level - :func:`.join()` function, as well as the :meth:`.FromClause.join` method - of any :class:`.FromClause` (e.g. such as :class:`.Table`). - - .. seealso:: - - :func:`.join` - - :meth:`.FromClause.join` - - """ - __visit_name__ = 'join' - - _is_join = True - - def __init__(self, left, right, onclause=None, isouter=False): - """Construct a new :class:`.Join`. - - The usual entrypoint here is the :func:`~.expression.join` - function or the :meth:`.FromClause.join` method of any - :class:`.FromClause` object. - - """ - self.left = _interpret_as_from(left) - self.right = _interpret_as_from(right).self_group() - - if onclause is None: - self.onclause = self._match_primaries(self.left, self.right) - else: - self.onclause = onclause - - self.isouter = isouter - - @classmethod - def _create_outerjoin(cls, left, right, onclause=None): - """Return an ``OUTER JOIN`` clause element. - - The returned object is an instance of :class:`.Join`. - - Similar functionality is also available via the - :meth:`~.FromClause.outerjoin()` method on any - :class:`.FromClause`. - - :param left: The left side of the join. - - :param right: The right side of the join. - - :param onclause: Optional criterion for the ``ON`` clause, is - derived from foreign key relationships established between - left and right otherwise. - - To chain joins together, use the :meth:`.FromClause.join` or - :meth:`.FromClause.outerjoin` methods on the resulting - :class:`.Join` object. - - """ - return cls(left, right, onclause, isouter=True) - - @classmethod - def _create_join(cls, left, right, onclause=None, isouter=False): - """Produce a :class:`.Join` object, given two :class:`.FromClause` - expressions. - - E.g.:: - - j = join(user_table, address_table, - user_table.c.id == address_table.c.user_id) - stmt = select([user_table]).select_from(j) - - would emit SQL along the lines of:: - - SELECT user.id, user.name FROM user - JOIN address ON user.id = address.user_id - - Similar functionality is available given any - :class:`.FromClause` object (e.g. such as a :class:`.Table`) using - the :meth:`.FromClause.join` method. - - :param left: The left side of the join. - - :param right: the right side of the join; this is any - :class:`.FromClause` object such as a :class:`.Table` object, and - may also be a selectable-compatible object such as an ORM-mapped - class. - - :param onclause: a SQL expression representing the ON clause of the - join. If left at ``None``, :meth:`.FromClause.join` will attempt to - join the two tables based on a foreign key relationship. - - :param isouter: if True, render a LEFT OUTER JOIN, instead of JOIN. - - .. seealso:: - - :meth:`.FromClause.join` - method form, based on a given left side - - :class:`.Join` - the type of object produced - - """ - - return cls(left, right, onclause, isouter) - - @property - def description(self): - return "Join object on %s(%d) and %s(%d)" % ( - self.left.description, - id(self.left), - self.right.description, - id(self.right)) - - def is_derived_from(self, fromclause): - return fromclause is self or \ - self.left.is_derived_from(fromclause) or \ - self.right.is_derived_from(fromclause) - - def self_group(self, against=None): - return FromGrouping(self) - - @util.dependencies("sqlalchemy.sql.util") - def _populate_column_collection(self, sqlutil): - columns = [c for c in self.left.columns] + \ - [c for c in self.right.columns] - - self.primary_key.extend(sqlutil.reduce_columns( - (c for c in columns if c.primary_key), self.onclause)) - self._columns.update((col._label, col) for col in columns) - self.foreign_keys.update(itertools.chain( - *[col.foreign_keys for col in columns])) - - def _refresh_for_new_column(self, column): - col = self.left._refresh_for_new_column(column) - if col is None: - col = self.right._refresh_for_new_column(column) - if col is not None: - if self._cols_populated: - self._columns[col._label] = col - self.foreign_keys.add(col) - if col.primary_key: - self.primary_key.add(col) - return col - return None - - def _copy_internals(self, clone=_clone, **kw): - self._reset_exported() - self.left = clone(self.left, **kw) - self.right = clone(self.right, **kw) - self.onclause = clone(self.onclause, **kw) - - def get_children(self, **kwargs): - return self.left, self.right, self.onclause - - def _match_primaries(self, left, right): - if isinstance(left, Join): - left_right = left.right - else: - left_right = None - return self._join_condition(left, right, a_subset=left_right) - - @classmethod - def _join_condition(cls, a, b, ignore_nonexistent_tables=False, - a_subset=None, - consider_as_foreign_keys=None): - """create a join condition between two tables or selectables. - - e.g.:: - - join_condition(tablea, tableb) - - would produce an expression along the lines of:: - - tablea.c.id==tableb.c.tablea_id - - The join is determined based on the foreign key relationships - between the two selectables. If there are multiple ways - to join, or no way to join, an error is raised. - - :param ignore_nonexistent_tables: Deprecated - this - flag is no longer used. Only resolution errors regarding - the two given tables are propagated. - - :param a_subset: An optional expression that is a sub-component - of ``a``. An attempt will be made to join to just this sub-component - first before looking at the full ``a`` construct, and if found - will be successful even if there are other ways to join to ``a``. - This allows the "right side" of a join to be passed thereby - providing a "natural join". - - """ - constraints = cls._joincond_scan_left_right( - a, a_subset, b, consider_as_foreign_keys) - - if len(constraints) > 1: - cls._joincond_trim_constraints( - a, b, constraints, consider_as_foreign_keys) - - if len(constraints) == 0: - if isinstance(b, FromGrouping): - hint = " Perhaps you meant to convert the right side to a "\ - "subquery using alias()?" - else: - hint = "" - raise exc.NoForeignKeysError( - "Can't find any foreign key relationships " - "between '%s' and '%s'.%s" % - (a.description, b.description, hint)) - - crit = [(x == y) for x, y in list(constraints.values())[0]] - if len(crit) == 1: - return (crit[0]) - else: - return and_(*crit) - - @classmethod - def _joincond_scan_left_right( - cls, a, a_subset, b, consider_as_foreign_keys): - constraints = collections.defaultdict(list) - - for left in (a_subset, a): - if left is None: - continue - for fk in sorted( - b.foreign_keys, - key=lambda fk: fk.parent._creation_order): - if consider_as_foreign_keys is not None and \ - fk.parent not in consider_as_foreign_keys: - continue - try: - col = fk.get_referent(left) - except exc.NoReferenceError as nrte: - if nrte.table_name == left.name: - raise - else: - continue - - if col is not None: - constraints[fk.constraint].append((col, fk.parent)) - if left is not b: - for fk in sorted( - left.foreign_keys, - key=lambda fk: fk.parent._creation_order): - if consider_as_foreign_keys is not None and \ - fk.parent not in consider_as_foreign_keys: - continue - try: - col = fk.get_referent(b) - except exc.NoReferenceError as nrte: - if nrte.table_name == b.name: - raise - else: - continue - - if col is not None: - constraints[fk.constraint].append((col, fk.parent)) - if constraints: - break - return constraints - - @classmethod - def _joincond_trim_constraints( - cls, a, b, constraints, consider_as_foreign_keys): - # more than one constraint matched. narrow down the list - # to include just those FKCs that match exactly to - # "consider_as_foreign_keys". - if consider_as_foreign_keys: - for const in list(constraints): - if set(f.parent for f in const.elements) != set( - consider_as_foreign_keys): - del constraints[const] - - # if still multiple constraints, but - # they all refer to the exact same end result, use it. - if len(constraints) > 1: - dedupe = set(tuple(crit) for crit in constraints.values()) - if len(dedupe) == 1: - key = list(constraints)[0] - constraints = {key: constraints[key]} - - if len(constraints) != 1: - raise exc.AmbiguousForeignKeysError( - "Can't determine join between '%s' and '%s'; " - "tables have more than one foreign key " - "constraint relationship between them. " - "Please specify the 'onclause' of this " - "join explicitly." % (a.description, b.description)) - - def select(self, whereclause=None, **kwargs): - """Create a :class:`.Select` from this :class:`.Join`. - - The equivalent long-hand form, given a :class:`.Join` object - ``j``, is:: - - from sqlalchemy import select - j = select([j.left, j.right], **kw).\\ - where(whereclause).\\ - select_from(j) - - :param whereclause: the WHERE criterion that will be sent to - the :func:`select()` function - - :param \**kwargs: all other kwargs are sent to the - underlying :func:`select()` function. - - """ - collist = [self.left, self.right] - - return Select(collist, whereclause, from_obj=[self], **kwargs) - - @property - def bind(self): - return self.left.bind or self.right.bind - - @util.dependencies("sqlalchemy.sql.util") - def alias(self, sqlutil, name=None, flat=False): - """return an alias of this :class:`.Join`. - - The default behavior here is to first produce a SELECT - construct from this :class:`.Join`, then to produce an - :class:`.Alias` from that. So given a join of the form:: - - j = table_a.join(table_b, table_a.c.id == table_b.c.a_id) - - The JOIN by itself would look like:: - - table_a JOIN table_b ON table_a.id = table_b.a_id - - Whereas the alias of the above, ``j.alias()``, would in a - SELECT context look like:: - - (SELECT table_a.id AS table_a_id, table_b.id AS table_b_id, - table_b.a_id AS table_b_a_id - FROM table_a - JOIN table_b ON table_a.id = table_b.a_id) AS anon_1 - - The equivalent long-hand form, given a :class:`.Join` object - ``j``, is:: - - from sqlalchemy import select, alias - j = alias( - select([j.left, j.right]).\\ - select_from(j).\\ - with_labels(True).\\ - correlate(False), - name=name - ) - - The selectable produced by :meth:`.Join.alias` features the same - columns as that of the two individual selectables presented under - a single name - the individual columns are "auto-labeled", meaning - the ``.c.`` collection of the resulting :class:`.Alias` represents - the names of the individual columns using a - ``_`` scheme:: - - j.c.table_a_id - j.c.table_b_a_id - - :meth:`.Join.alias` also features an alternate - option for aliasing joins which produces no enclosing SELECT and - does not normally apply labels to the column names. The - ``flat=True`` option will call :meth:`.FromClause.alias` - against the left and right sides individually. - Using this option, no new ``SELECT`` is produced; - we instead, from a construct as below:: - - j = table_a.join(table_b, table_a.c.id == table_b.c.a_id) - j = j.alias(flat=True) - - we get a result like this:: - - table_a AS table_a_1 JOIN table_b AS table_b_1 ON - table_a_1.id = table_b_1.a_id - - The ``flat=True`` argument is also propagated to the contained - selectables, so that a composite join such as:: - - j = table_a.join( - table_b.join(table_c, - table_b.c.id == table_c.c.b_id), - table_b.c.a_id == table_a.c.id - ).alias(flat=True) - - Will produce an expression like:: - - table_a AS table_a_1 JOIN ( - table_b AS table_b_1 JOIN table_c AS table_c_1 - ON table_b_1.id = table_c_1.b_id - ) ON table_a_1.id = table_b_1.a_id - - The standalone :func:`~.expression.alias` function as well as the - base :meth:`.FromClause.alias` method also support the ``flat=True`` - argument as a no-op, so that the argument can be passed to the - ``alias()`` method of any selectable. - - .. versionadded:: 0.9.0 Added the ``flat=True`` option to create - "aliases" of joins without enclosing inside of a SELECT - subquery. - - :param name: name given to the alias. - - :param flat: if True, produce an alias of the left and right - sides of this :class:`.Join` and return the join of those - two selectables. This produces join expression that does not - include an enclosing SELECT. - - .. versionadded:: 0.9.0 - - .. seealso:: - - :func:`~.expression.alias` - - """ - if flat: - assert name is None, "Can't send name argument with flat" - left_a, right_a = self.left.alias(flat=True), \ - self.right.alias(flat=True) - adapter = sqlutil.ClauseAdapter(left_a).\ - chain(sqlutil.ClauseAdapter(right_a)) - - return left_a.join(right_a, adapter.traverse(self.onclause), - isouter=self.isouter) - else: - return self.select(use_labels=True, correlate=False).alias(name) - - @property - def _hide_froms(self): - return itertools.chain(*[_from_objects(x.left, x.right) - for x in self._cloned_set]) - - @property - def _from_objects(self): - return [self] + \ - self.onclause._from_objects + \ - self.left._from_objects + \ - self.right._from_objects - - -class Alias(FromClause): - """Represents an table or selectable alias (AS). - - Represents an alias, as typically applied to any table or - sub-select within a SQL statement using the ``AS`` keyword (or - without the keyword on certain databases such as Oracle). - - This object is constructed from the :func:`~.expression.alias` module - level function as well as the :meth:`.FromClause.alias` method available - on all :class:`.FromClause` subclasses. - - """ - - __visit_name__ = 'alias' - named_with_column = True - - _is_from_container = True - - def __init__(self, selectable, name=None): - baseselectable = selectable - while isinstance(baseselectable, Alias): - baseselectable = baseselectable.element - self.original = baseselectable - self.supports_execution = baseselectable.supports_execution - if self.supports_execution: - self._execution_options = baseselectable._execution_options - self.element = selectable - if name is None: - if self.original.named_with_column: - name = getattr(self.original, 'name', None) - name = _anonymous_label('%%(%d %s)s' % (id(self), name - or 'anon')) - self.name = name - - @property - def description(self): - if util.py3k: - return self.name - else: - return self.name.encode('ascii', 'backslashreplace') - - def as_scalar(self): - try: - return self.element.as_scalar() - except AttributeError: - raise AttributeError("Element %s does not support " - "'as_scalar()'" % self.element) - - def is_derived_from(self, fromclause): - if fromclause in self._cloned_set: - return True - return self.element.is_derived_from(fromclause) - - def _populate_column_collection(self): - for col in self.element.columns._all_columns: - col._make_proxy(self) - - def _refresh_for_new_column(self, column): - col = self.element._refresh_for_new_column(column) - if col is not None: - if not self._cols_populated: - return None - else: - return col._make_proxy(self) - else: - return None - - def _copy_internals(self, clone=_clone, **kw): - # don't apply anything to an aliased Table - # for now. May want to drive this from - # the given **kw. - if isinstance(self.element, TableClause): - return - self._reset_exported() - self.element = clone(self.element, **kw) - baseselectable = self.element - while isinstance(baseselectable, Alias): - baseselectable = baseselectable.element - self.original = baseselectable - - def get_children(self, column_collections=True, **kw): - if column_collections: - for c in self.c: - yield c - yield self.element - - @property - def _from_objects(self): - return [self] - - @property - def bind(self): - return self.element.bind - - -class CTE(Generative, HasSuffixes, Alias): - """Represent a Common Table Expression. - - The :class:`.CTE` object is obtained using the - :meth:`.SelectBase.cte` method from any selectable. - See that method for complete examples. - - .. versionadded:: 0.7.6 - - """ - __visit_name__ = 'cte' - - def __init__(self, selectable, - name=None, - recursive=False, - _cte_alias=None, - _restates=frozenset(), - _suffixes=None): - self.recursive = recursive - self._cte_alias = _cte_alias - self._restates = _restates - if _suffixes: - self._suffixes = _suffixes - super(CTE, self).__init__(selectable, name=name) - - def alias(self, name=None, flat=False): - return CTE( - self.original, - name=name, - recursive=self.recursive, - _cte_alias=self, - _suffixes=self._suffixes - ) - - def union(self, other): - return CTE( - self.original.union(other), - name=self.name, - recursive=self.recursive, - _restates=self._restates.union([self]), - _suffixes=self._suffixes - ) - - def union_all(self, other): - return CTE( - self.original.union_all(other), - name=self.name, - recursive=self.recursive, - _restates=self._restates.union([self]), - _suffixes=self._suffixes - ) - - -class FromGrouping(FromClause): - """Represent a grouping of a FROM clause""" - __visit_name__ = 'grouping' - - def __init__(self, element): - self.element = element - - def _init_collections(self): - pass - - @property - def columns(self): - return self.element.columns - - @property - def primary_key(self): - return self.element.primary_key - - @property - def foreign_keys(self): - return self.element.foreign_keys - - def is_derived_from(self, element): - return self.element.is_derived_from(element) - - def alias(self, **kw): - return FromGrouping(self.element.alias(**kw)) - - @property - def _hide_froms(self): - return self.element._hide_froms - - def get_children(self, **kwargs): - return self.element, - - def _copy_internals(self, clone=_clone, **kw): - self.element = clone(self.element, **kw) - - @property - def _from_objects(self): - return self.element._from_objects - - def __getattr__(self, attr): - return getattr(self.element, attr) - - def __getstate__(self): - return {'element': self.element} - - def __setstate__(self, state): - self.element = state['element'] - - -class TableClause(Immutable, FromClause): - """Represents a minimal "table" construct. - - This is a lightweight table object that has only a name and a - collection of columns, which are typically produced - by the :func:`.expression.column` function:: - - from sqlalchemy import table, column - - user = table("user", - column("id"), - column("name"), - column("description"), - ) - - The :class:`.TableClause` construct serves as the base for - the more commonly used :class:`~.schema.Table` object, providing - the usual set of :class:`~.expression.FromClause` services including - the ``.c.`` collection and statement generation methods. - - It does **not** provide all the additional schema-level services - of :class:`~.schema.Table`, including constraints, references to other - tables, or support for :class:`.MetaData`-level services. It's useful - on its own as an ad-hoc construct used to generate quick SQL - statements when a more fully fledged :class:`~.schema.Table` - is not on hand. - - """ - - __visit_name__ = 'table' - - named_with_column = True - - implicit_returning = False - """:class:`.TableClause` doesn't support having a primary key or column - -level defaults, so implicit returning doesn't apply.""" - - _autoincrement_column = None - """No PK or default support so no autoincrement column.""" - - def __init__(self, name, *columns): - """Produce a new :class:`.TableClause`. - - The object returned is an instance of :class:`.TableClause`, which - represents the "syntactical" portion of the schema-level - :class:`~.schema.Table` object. - It may be used to construct lightweight table constructs. - - .. versionchanged:: 1.0.0 :func:`.expression.table` can now - be imported from the plain ``sqlalchemy`` namespace like any - other SQL element. - - :param name: Name of the table. - - :param columns: A collection of :func:`.expression.column` constructs. - - """ - - super(TableClause, self).__init__() - self.name = self.fullname = name - self._columns = ColumnCollection() - self.primary_key = ColumnSet() - self.foreign_keys = set() - for c in columns: - self.append_column(c) - - def _init_collections(self): - pass - - @util.memoized_property - def description(self): - if util.py3k: - return self.name - else: - return self.name.encode('ascii', 'backslashreplace') - - def append_column(self, c): - self._columns[c.key] = c - c.table = self - - def get_children(self, column_collections=True, **kwargs): - if column_collections: - return [c for c in self.c] - else: - return [] - - @util.dependencies("sqlalchemy.sql.functions") - def count(self, functions, whereclause=None, **params): - """return a SELECT COUNT generated against this - :class:`.TableClause`.""" - - if self.primary_key: - col = list(self.primary_key)[0] - else: - col = list(self.columns)[0] - return Select( - [functions.func.count(col).label('tbl_row_count')], - whereclause, - from_obj=[self], - **params) - - @util.dependencies("sqlalchemy.sql.dml") - def insert(self, dml, values=None, inline=False, **kwargs): - """Generate an :func:`.insert` construct against this - :class:`.TableClause`. - - E.g.:: - - table.insert().values(name='foo') - - See :func:`.insert` for argument and usage information. - - """ - - return dml.Insert(self, values=values, inline=inline, **kwargs) - - @util.dependencies("sqlalchemy.sql.dml") - def update( - self, dml, whereclause=None, values=None, inline=False, **kwargs): - """Generate an :func:`.update` construct against this - :class:`.TableClause`. - - E.g.:: - - table.update().where(table.c.id==7).values(name='foo') - - See :func:`.update` for argument and usage information. - - """ - - return dml.Update(self, whereclause=whereclause, - values=values, inline=inline, **kwargs) - - @util.dependencies("sqlalchemy.sql.dml") - def delete(self, dml, whereclause=None, **kwargs): - """Generate a :func:`.delete` construct against this - :class:`.TableClause`. - - E.g.:: - - table.delete().where(table.c.id==7) - - See :func:`.delete` for argument and usage information. - - """ - - return dml.Delete(self, whereclause, **kwargs) - - @property - def _from_objects(self): - return [self] - - -class ForUpdateArg(ClauseElement): - - @classmethod - def parse_legacy_select(self, arg): - """Parse the for_update arugment of :func:`.select`. - - :param mode: Defines the lockmode to use. - - ``None`` - translates to no lockmode - - ``'update'`` - translates to ``FOR UPDATE`` - (standard SQL, supported by most dialects) - - ``'nowait'`` - translates to ``FOR UPDATE NOWAIT`` - (supported by Oracle, PostgreSQL 8.1 upwards) - - ``'read'`` - translates to ``LOCK IN SHARE MODE`` (for MySQL), - and ``FOR SHARE`` (for PostgreSQL) - - ``'read_nowait'`` - translates to ``FOR SHARE NOWAIT`` - (supported by PostgreSQL). ``FOR SHARE`` and - ``FOR SHARE NOWAIT`` (PostgreSQL). - - """ - if arg in (None, False): - return None - - nowait = read = False - if arg == 'nowait': - nowait = True - elif arg == 'read': - read = True - elif arg == 'read_nowait': - read = nowait = True - elif arg is not True: - raise exc.ArgumentError("Unknown for_update argument: %r" % arg) - - return ForUpdateArg(read=read, nowait=nowait) - - @property - def legacy_for_update_value(self): - if self.read and not self.nowait: - return "read" - elif self.read and self.nowait: - return "read_nowait" - elif self.nowait: - return "nowait" - else: - return True - - def _copy_internals(self, clone=_clone, **kw): - if self.of is not None: - self.of = [clone(col, **kw) for col in self.of] - - def __init__(self, nowait=False, read=False, of=None): - """Represents arguments specified to :meth:`.Select.for_update`. - - .. versionadded:: 0.9.0 - """ - - self.nowait = nowait - self.read = read - if of is not None: - self.of = [_interpret_as_column_or_from(elem) - for elem in util.to_list(of)] - else: - self.of = None - - -class SelectBase(Executable, FromClause): - """Base class for SELECT statements. - - - This includes :class:`.Select`, :class:`.CompoundSelect` and - :class:`.TextAsFrom`. - - - """ - - def as_scalar(self): - """return a 'scalar' representation of this selectable, which can be - used as a column expression. - - Typically, a select statement which has only one column in its columns - clause is eligible to be used as a scalar expression. - - The returned object is an instance of - :class:`ScalarSelect`. - - """ - return ScalarSelect(self) - - def label(self, name): - """return a 'scalar' representation of this selectable, embedded as a - subquery with a label. - - .. seealso:: - - :meth:`~.SelectBase.as_scalar`. - - """ - return self.as_scalar().label(name) - - def cte(self, name=None, recursive=False): - """Return a new :class:`.CTE`, or Common Table Expression instance. - - Common table expressions are a SQL standard whereby SELECT - statements can draw upon secondary statements specified along - with the primary statement, using a clause called "WITH". - Special semantics regarding UNION can also be employed to - allow "recursive" queries, where a SELECT statement can draw - upon the set of rows that have previously been selected. - - SQLAlchemy detects :class:`.CTE` objects, which are treated - similarly to :class:`.Alias` objects, as special elements - to be delivered to the FROM clause of the statement as well - as to a WITH clause at the top of the statement. - - .. versionadded:: 0.7.6 - - :param name: name given to the common table expression. Like - :meth:`._FromClause.alias`, the name can be left as ``None`` - in which case an anonymous symbol will be used at query - compile time. - :param recursive: if ``True``, will render ``WITH RECURSIVE``. - A recursive common table expression is intended to be used in - conjunction with UNION ALL in order to derive rows - from those already selected. - - The following examples illustrate two examples from - Postgresql's documentation at - http://www.postgresql.org/docs/8.4/static/queries-with.html. - - Example 1, non recursive:: - - from sqlalchemy import (Table, Column, String, Integer, - MetaData, select, func) - - metadata = MetaData() - - orders = Table('orders', metadata, - Column('region', String), - Column('amount', Integer), - Column('product', String), - Column('quantity', Integer) - ) - - regional_sales = select([ - orders.c.region, - func.sum(orders.c.amount).label('total_sales') - ]).group_by(orders.c.region).cte("regional_sales") - - - top_regions = select([regional_sales.c.region]).\\ - where( - regional_sales.c.total_sales > - select([ - func.sum(regional_sales.c.total_sales)/10 - ]) - ).cte("top_regions") - - statement = select([ - orders.c.region, - orders.c.product, - func.sum(orders.c.quantity).label("product_units"), - func.sum(orders.c.amount).label("product_sales") - ]).where(orders.c.region.in_( - select([top_regions.c.region]) - )).group_by(orders.c.region, orders.c.product) - - result = conn.execute(statement).fetchall() - - Example 2, WITH RECURSIVE:: - - from sqlalchemy import (Table, Column, String, Integer, - MetaData, select, func) - - metadata = MetaData() - - parts = Table('parts', metadata, - Column('part', String), - Column('sub_part', String), - Column('quantity', Integer), - ) - - included_parts = select([ - parts.c.sub_part, - parts.c.part, - parts.c.quantity]).\\ - where(parts.c.part=='our part').\\ - cte(recursive=True) - - - incl_alias = included_parts.alias() - parts_alias = parts.alias() - included_parts = included_parts.union_all( - select([ - parts_alias.c.sub_part, - parts_alias.c.part, - parts_alias.c.quantity - ]). - where(parts_alias.c.part==incl_alias.c.sub_part) - ) - - statement = select([ - included_parts.c.sub_part, - func.sum(included_parts.c.quantity). - label('total_quantity') - ]).\\ - group_by(included_parts.c.sub_part) - - result = conn.execute(statement).fetchall() - - - .. seealso:: - - :meth:`.orm.query.Query.cte` - ORM version of - :meth:`.SelectBase.cte`. - - """ - return CTE(self, name=name, recursive=recursive) - - @_generative - @util.deprecated('0.6', - message="``autocommit()`` is deprecated. Use " - ":meth:`.Executable.execution_options` with the " - "'autocommit' flag.") - def autocommit(self): - """return a new selectable with the 'autocommit' flag set to - True. - """ - - self._execution_options = \ - self._execution_options.union({'autocommit': True}) - - def _generate(self): - """Override the default _generate() method to also clear out - exported collections.""" - - s = self.__class__.__new__(self.__class__) - s.__dict__ = self.__dict__.copy() - s._reset_exported() - return s - - @property - def _from_objects(self): - return [self] - - -class GenerativeSelect(SelectBase): - """Base class for SELECT statements where additional elements can be - added. - - This serves as the base for :class:`.Select` and :class:`.CompoundSelect` - where elements such as ORDER BY, GROUP BY can be added and column - rendering can be controlled. Compare to :class:`.TextAsFrom`, which, - while it subclasses :class:`.SelectBase` and is also a SELECT construct, - represents a fixed textual string which cannot be altered at this level, - only wrapped as a subquery. - - .. versionadded:: 0.9.0 :class:`.GenerativeSelect` was added to - provide functionality specific to :class:`.Select` and - :class:`.CompoundSelect` while allowing :class:`.SelectBase` to be - used for other SELECT-like objects, e.g. :class:`.TextAsFrom`. - - """ - _order_by_clause = ClauseList() - _group_by_clause = ClauseList() - _limit_clause = None - _offset_clause = None - _for_update_arg = None - - def __init__(self, - use_labels=False, - for_update=False, - limit=None, - offset=None, - order_by=None, - group_by=None, - bind=None, - autocommit=None): - self.use_labels = use_labels - - if for_update is not False: - self._for_update_arg = (ForUpdateArg. - parse_legacy_select(for_update)) - - if autocommit is not None: - util.warn_deprecated('autocommit on select() is ' - 'deprecated. Use .execution_options(a' - 'utocommit=True)') - self._execution_options = \ - self._execution_options.union( - {'autocommit': autocommit}) - if limit is not None: - self._limit_clause = _offset_or_limit_clause(limit) - if offset is not None: - self._offset_clause = _offset_or_limit_clause(offset) - self._bind = bind - - if order_by is not None: - self._order_by_clause = ClauseList( - *util.to_list(order_by), - _literal_as_text=_literal_and_labels_as_label_reference) - if group_by is not None: - self._group_by_clause = ClauseList( - *util.to_list(group_by), - _literal_as_text=_literal_as_label_reference) - - @property - def for_update(self): - """Provide legacy dialect support for the ``for_update`` attribute. - """ - if self._for_update_arg is not None: - return self._for_update_arg.legacy_for_update_value - else: - return None - - @for_update.setter - def for_update(self, value): - self._for_update_arg = ForUpdateArg.parse_legacy_select(value) - - @_generative - def with_for_update(self, nowait=False, read=False, of=None): - """Specify a ``FOR UPDATE`` clause for this :class:`.GenerativeSelect`. - - E.g.:: - - stmt = select([table]).with_for_update(nowait=True) - - On a database like Postgresql or Oracle, the above would render a - statement like:: - - SELECT table.a, table.b FROM table FOR UPDATE NOWAIT - - on other backends, the ``nowait`` option is ignored and instead - would produce:: - - SELECT table.a, table.b FROM table FOR UPDATE - - When called with no arguments, the statement will render with - the suffix ``FOR UPDATE``. Additional arguments can then be - provided which allow for common database-specific - variants. - - :param nowait: boolean; will render ``FOR UPDATE NOWAIT`` on Oracle - and Postgresql dialects. - - :param read: boolean; will render ``LOCK IN SHARE MODE`` on MySQL, - ``FOR SHARE`` on Postgresql. On Postgresql, when combined with - ``nowait``, will render ``FOR SHARE NOWAIT``. - - :param of: SQL expression or list of SQL expression elements - (typically :class:`.Column` objects or a compatible expression) which - will render into a ``FOR UPDATE OF`` clause; supported by PostgreSQL - and Oracle. May render as a table or as a column depending on - backend. - - .. versionadded:: 0.9.0 - - """ - self._for_update_arg = ForUpdateArg(nowait=nowait, read=read, of=of) - - @_generative - def apply_labels(self): - """return a new selectable with the 'use_labels' flag set to True. - - This will result in column expressions being generated using labels - against their table name, such as "SELECT somecolumn AS - tablename_somecolumn". This allows selectables which contain multiple - FROM clauses to produce a unique set of column names regardless of - name conflicts among the individual FROM clauses. - - """ - self.use_labels = True - - @property - def _limit(self): - """Get an integer value for the limit. This should only be used - by code that cannot support a limit as a BindParameter or - other custom clause as it will throw an exception if the limit - isn't currently set to an integer. - - """ - return _offset_or_limit_clause_asint(self._limit_clause, "limit") - - @property - def _simple_int_limit(self): - """True if the LIMIT clause is a simple integer, False - if it is not present or is a SQL expression. - """ - return isinstance(self._limit_clause, _OffsetLimitParam) - - @property - def _simple_int_offset(self): - """True if the OFFSET clause is a simple integer, False - if it is not present or is a SQL expression. - """ - return isinstance(self._offset_clause, _OffsetLimitParam) - - @property - def _offset(self): - """Get an integer value for the offset. This should only be used - by code that cannot support an offset as a BindParameter or - other custom clause as it will throw an exception if the - offset isn't currently set to an integer. - - """ - return _offset_or_limit_clause_asint(self._offset_clause, "offset") - - @_generative - def limit(self, limit): - """return a new selectable with the given LIMIT criterion - applied. - - This is a numerical value which usually renders as a ``LIMIT`` - expression in the resulting select. Backends that don't - support ``LIMIT`` will attempt to provide similar - functionality. - - .. versionchanged:: 1.0.0 - :meth:`.Select.limit` can now - accept arbitrary SQL expressions as well as integer values. - - :param limit: an integer LIMIT parameter, or a SQL expression - that provides an integer result. - - """ - - self._limit_clause = _offset_or_limit_clause(limit) - - @_generative - def offset(self, offset): - """return a new selectable with the given OFFSET criterion - applied. - - - This is a numeric value which usually renders as an ``OFFSET`` - expression in the resulting select. Backends that don't - support ``OFFSET`` will attempt to provide similar - functionality. - - - .. versionchanged:: 1.0.0 - :meth:`.Select.offset` can now - accept arbitrary SQL expressions as well as integer values. - - :param offset: an integer OFFSET parameter, or a SQL expression - that provides an integer result. - - """ - - self._offset_clause = _offset_or_limit_clause(offset) - - @_generative - def order_by(self, *clauses): - """return a new selectable with the given list of ORDER BY - criterion applied. - - The criterion will be appended to any pre-existing ORDER BY - criterion. - - """ - - self.append_order_by(*clauses) - - @_generative - def group_by(self, *clauses): - """return a new selectable with the given list of GROUP BY - criterion applied. - - The criterion will be appended to any pre-existing GROUP BY - criterion. - - """ - - self.append_group_by(*clauses) - - def append_order_by(self, *clauses): - """Append the given ORDER BY criterion applied to this selectable. - - The criterion will be appended to any pre-existing ORDER BY criterion. - - This is an **in-place** mutation method; the - :meth:`~.GenerativeSelect.order_by` method is preferred, as it - provides standard :term:`method chaining`. - - """ - if len(clauses) == 1 and clauses[0] is None: - self._order_by_clause = ClauseList() - else: - if getattr(self, '_order_by_clause', None) is not None: - clauses = list(self._order_by_clause) + list(clauses) - self._order_by_clause = ClauseList( - *clauses, - _literal_as_text=_literal_and_labels_as_label_reference) - - def append_group_by(self, *clauses): - """Append the given GROUP BY criterion applied to this selectable. - - The criterion will be appended to any pre-existing GROUP BY criterion. - - This is an **in-place** mutation method; the - :meth:`~.GenerativeSelect.group_by` method is preferred, as it - provides standard :term:`method chaining`. - - """ - if len(clauses) == 1 and clauses[0] is None: - self._group_by_clause = ClauseList() - else: - if getattr(self, '_group_by_clause', None) is not None: - clauses = list(self._group_by_clause) + list(clauses) - self._group_by_clause = ClauseList( - *clauses, _literal_as_text=_literal_as_label_reference) - - @property - def _label_resolve_dict(self): - raise NotImplementedError() - - def _copy_internals(self, clone=_clone, **kw): - if self._limit_clause is not None: - self._limit_clause = clone(self._limit_clause, **kw) - if self._offset_clause is not None: - self._offset_clause = clone(self._offset_clause, **kw) - - -class CompoundSelect(GenerativeSelect): - """Forms the basis of ``UNION``, ``UNION ALL``, and other - SELECT-based set operations. - - - .. seealso:: - - :func:`.union` - - :func:`.union_all` - - :func:`.intersect` - - :func:`.intersect_all` - - :func:`.except` - - :func:`.except_all` - - """ - - __visit_name__ = 'compound_select' - - UNION = util.symbol('UNION') - UNION_ALL = util.symbol('UNION ALL') - EXCEPT = util.symbol('EXCEPT') - EXCEPT_ALL = util.symbol('EXCEPT ALL') - INTERSECT = util.symbol('INTERSECT') - INTERSECT_ALL = util.symbol('INTERSECT ALL') - - _is_from_container = True - - def __init__(self, keyword, *selects, **kwargs): - self._auto_correlate = kwargs.pop('correlate', False) - self.keyword = keyword - self.selects = [] - - numcols = None - - # some DBs do not like ORDER BY in the inner queries of a UNION, etc. - for n, s in enumerate(selects): - s = _clause_element_as_expr(s) - - if not numcols: - numcols = len(s.c._all_columns) - elif len(s.c._all_columns) != numcols: - raise exc.ArgumentError( - 'All selectables passed to ' - 'CompoundSelect must have identical numbers of ' - 'columns; select #%d has %d columns, select ' - '#%d has %d' % - (1, len(self.selects[0].c._all_columns), - n + 1, len(s.c._all_columns)) - ) - - self.selects.append(s.self_group(self)) - - GenerativeSelect.__init__(self, **kwargs) - - @property - def _label_resolve_dict(self): - d = dict( - (c.key, c) for c in self.c - ) - return d, d - - @classmethod - def _create_union(cls, *selects, **kwargs): - """Return a ``UNION`` of multiple selectables. - - The returned object is an instance of - :class:`.CompoundSelect`. - - A similar :func:`union()` method is available on all - :class:`.FromClause` subclasses. - - \*selects - a list of :class:`.Select` instances. - - \**kwargs - available keyword arguments are the same as those of - :func:`select`. - - """ - return CompoundSelect(CompoundSelect.UNION, *selects, **kwargs) - - @classmethod - def _create_union_all(cls, *selects, **kwargs): - """Return a ``UNION ALL`` of multiple selectables. - - The returned object is an instance of - :class:`.CompoundSelect`. - - A similar :func:`union_all()` method is available on all - :class:`.FromClause` subclasses. - - \*selects - a list of :class:`.Select` instances. - - \**kwargs - available keyword arguments are the same as those of - :func:`select`. - - """ - return CompoundSelect(CompoundSelect.UNION_ALL, *selects, **kwargs) - - @classmethod - def _create_except(cls, *selects, **kwargs): - """Return an ``EXCEPT`` of multiple selectables. - - The returned object is an instance of - :class:`.CompoundSelect`. - - \*selects - a list of :class:`.Select` instances. - - \**kwargs - available keyword arguments are the same as those of - :func:`select`. - - """ - return CompoundSelect(CompoundSelect.EXCEPT, *selects, **kwargs) - - @classmethod - def _create_except_all(cls, *selects, **kwargs): - """Return an ``EXCEPT ALL`` of multiple selectables. - - The returned object is an instance of - :class:`.CompoundSelect`. - - \*selects - a list of :class:`.Select` instances. - - \**kwargs - available keyword arguments are the same as those of - :func:`select`. - - """ - return CompoundSelect(CompoundSelect.EXCEPT_ALL, *selects, **kwargs) - - @classmethod - def _create_intersect(cls, *selects, **kwargs): - """Return an ``INTERSECT`` of multiple selectables. - - The returned object is an instance of - :class:`.CompoundSelect`. - - \*selects - a list of :class:`.Select` instances. - - \**kwargs - available keyword arguments are the same as those of - :func:`select`. - - """ - return CompoundSelect(CompoundSelect.INTERSECT, *selects, **kwargs) - - @classmethod - def _create_intersect_all(cls, *selects, **kwargs): - """Return an ``INTERSECT ALL`` of multiple selectables. - - The returned object is an instance of - :class:`.CompoundSelect`. - - \*selects - a list of :class:`.Select` instances. - - \**kwargs - available keyword arguments are the same as those of - :func:`select`. - - """ - return CompoundSelect( - CompoundSelect.INTERSECT_ALL, *selects, **kwargs) - - def _scalar_type(self): - return self.selects[0]._scalar_type() - - def self_group(self, against=None): - return FromGrouping(self) - - def is_derived_from(self, fromclause): - for s in self.selects: - if s.is_derived_from(fromclause): - return True - return False - - def _populate_column_collection(self): - for cols in zip(*[s.c._all_columns for s in self.selects]): - - # this is a slightly hacky thing - the union exports a - # column that resembles just that of the *first* selectable. - # to get at a "composite" column, particularly foreign keys, - # you have to dig through the proxies collection which we - # generate below. We may want to improve upon this, such as - # perhaps _make_proxy can accept a list of other columns - # that are "shared" - schema.column can then copy all the - # ForeignKeys in. this would allow the union() to have all - # those fks too. - - proxy = cols[0]._make_proxy( - self, name=cols[0]._label if self.use_labels else None, - key=cols[0]._key_label if self.use_labels else None) - - # hand-construct the "_proxies" collection to include all - # derived columns place a 'weight' annotation corresponding - # to how low in the list of select()s the column occurs, so - # that the corresponding_column() operation can resolve - # conflicts - - proxy._proxies = [ - c._annotate({'weight': i + 1}) for (i, c) in enumerate(cols)] - - def _refresh_for_new_column(self, column): - for s in self.selects: - s._refresh_for_new_column(column) - - if not self._cols_populated: - return None - - raise NotImplementedError("CompoundSelect constructs don't support " - "addition of columns to underlying " - "selectables") - - def _copy_internals(self, clone=_clone, **kw): - super(CompoundSelect, self)._copy_internals(clone, **kw) - self._reset_exported() - self.selects = [clone(s, **kw) for s in self.selects] - if hasattr(self, '_col_map'): - del self._col_map - for attr in ( - '_order_by_clause', '_group_by_clause', '_for_update_arg'): - if getattr(self, attr) is not None: - setattr(self, attr, clone(getattr(self, attr), **kw)) - - def get_children(self, column_collections=True, **kwargs): - return (column_collections and list(self.c) or []) \ - + [self._order_by_clause, self._group_by_clause] \ - + list(self.selects) - - def bind(self): - if self._bind: - return self._bind - for s in self.selects: - e = s.bind - if e: - return e - else: - return None - - def _set_bind(self, bind): - self._bind = bind - bind = property(bind, _set_bind) - - -class Select(HasPrefixes, HasSuffixes, GenerativeSelect): - """Represents a ``SELECT`` statement. - - """ - - __visit_name__ = 'select' - - _prefixes = () - _suffixes = () - _hints = util.immutabledict() - _statement_hints = () - _distinct = False - _from_cloned = None - _correlate = () - _correlate_except = None - _memoized_property = SelectBase._memoized_property - _is_select = True - - def __init__(self, - columns=None, - whereclause=None, - from_obj=None, - distinct=False, - having=None, - correlate=True, - prefixes=None, - suffixes=None, - **kwargs): - """Construct a new :class:`.Select`. - - Similar functionality is also available via the - :meth:`.FromClause.select` method on any :class:`.FromClause`. - - All arguments which accept :class:`.ClauseElement` arguments also - accept string arguments, which will be converted as appropriate into - either :func:`text()` or :func:`literal_column()` constructs. - - .. seealso:: - - :ref:`coretutorial_selecting` - Core Tutorial description of - :func:`.select`. - - :param columns: - A list of :class:`.ColumnElement` or :class:`.FromClause` - objects which will form the columns clause of the resulting - statement. For those objects that are instances of - :class:`.FromClause` (typically :class:`.Table` or :class:`.Alias` - objects), the :attr:`.FromClause.c` collection is extracted - to form a collection of :class:`.ColumnElement` objects. - - This parameter will also accept :class:`.Text` constructs as - given, as well as ORM-mapped classes. - - .. note:: - - The :paramref:`.select.columns` parameter is not available - in the method form of :func:`.select`, e.g. - :meth:`.FromClause.select`. - - .. seealso:: - - :meth:`.Select.column` - - :meth:`.Select.with_only_columns` - - :param whereclause: - A :class:`.ClauseElement` expression which will be used to form the - ``WHERE`` clause. It is typically preferable to add WHERE - criterion to an existing :class:`.Select` using method chaining - with :meth:`.Select.where`. - - .. seealso:: - - :meth:`.Select.where` - - :param from_obj: - A list of :class:`.ClauseElement` objects which will be added to the - ``FROM`` clause of the resulting statement. This is equivalent - to calling :meth:`.Select.select_from` using method chaining on - an existing :class:`.Select` object. - - .. seealso:: - - :meth:`.Select.select_from` - full description of explicit - FROM clause specification. - - :param autocommit: - Deprecated. Use ``.execution_options(autocommit=)`` - to set the autocommit option. - - .. seealso:: - - :meth:`.Executable.execution_options` - - :param bind=None: - an :class:`~.Engine` or :class:`~.Connection` instance - to which the - resulting :class:`.Select` object will be bound. The - :class:`.Select` object will otherwise automatically bind to - whatever :class:`~.base.Connectable` instances can be located within - its contained :class:`.ClauseElement` members. - - :param correlate=True: - indicates that this :class:`.Select` object should have its - contained :class:`.FromClause` elements "correlated" to an enclosing - :class:`.Select` object. It is typically preferable to specify - correlations on an existing :class:`.Select` construct using - :meth:`.Select.correlate`. - - .. seealso:: - - :meth:`.Select.correlate` - full description of correlation. - - :param distinct=False: - when ``True``, applies a ``DISTINCT`` qualifier to the columns - clause of the resulting statement. - - The boolean argument may also be a column expression or list - of column expressions - this is a special calling form which - is understood by the Postgresql dialect to render the - ``DISTINCT ON ()`` syntax. - - ``distinct`` is also available on an existing :class:`.Select` - object via the :meth:`~.Select.distinct` method. - - .. seealso:: - - :meth:`.Select.distinct` - - :param for_update=False: - when ``True``, applies ``FOR UPDATE`` to the end of the - resulting statement. - - .. deprecated:: 0.9.0 - use - :meth:`.Select.with_for_update` to specify the - structure of the ``FOR UPDATE`` clause. - - ``for_update`` accepts various string values interpreted by - specific backends, including: - - * ``"read"`` - on MySQL, translates to ``LOCK IN SHARE MODE``; - on Postgresql, translates to ``FOR SHARE``. - * ``"nowait"`` - on Postgresql and Oracle, translates to - ``FOR UPDATE NOWAIT``. - * ``"read_nowait"`` - on Postgresql, translates to - ``FOR SHARE NOWAIT``. - - .. seealso:: - - :meth:`.Select.with_for_update` - improved API for - specifying the ``FOR UPDATE`` clause. - - :param group_by: - a list of :class:`.ClauseElement` objects which will comprise the - ``GROUP BY`` clause of the resulting select. This parameter - is typically specified more naturally using the - :meth:`.Select.group_by` method on an existing :class:`.Select`. - - .. seealso:: - - :meth:`.Select.group_by` - - :param having: - a :class:`.ClauseElement` that will comprise the ``HAVING`` clause - of the resulting select when ``GROUP BY`` is used. This parameter - is typically specified more naturally using the - :meth:`.Select.having` method on an existing :class:`.Select`. - - .. seealso:: - - :meth:`.Select.having` - - :param limit=None: - a numerical value which usually renders as a ``LIMIT`` - expression in the resulting select. Backends that don't - support ``LIMIT`` will attempt to provide similar - functionality. This parameter is typically specified more naturally - using the :meth:`.Select.limit` method on an existing - :class:`.Select`. - - .. seealso:: - - :meth:`.Select.limit` - - :param offset=None: - a numeric value which usually renders as an ``OFFSET`` - expression in the resulting select. Backends that don't - support ``OFFSET`` will attempt to provide similar - functionality. This parameter is typically specified more naturally - using the :meth:`.Select.offset` method on an existing - :class:`.Select`. - - .. seealso:: - - :meth:`.Select.offset` - - :param order_by: - a scalar or list of :class:`.ClauseElement` objects which will - comprise the ``ORDER BY`` clause of the resulting select. - This parameter is typically specified more naturally using the - :meth:`.Select.order_by` method on an existing :class:`.Select`. - - .. seealso:: - - :meth:`.Select.order_by` - - :param use_labels=False: - when ``True``, the statement will be generated using labels - for each column in the columns clause, which qualify each - column with its parent table's (or aliases) name so that name - conflicts between columns in different tables don't occur. - The format of the label is _. The "c" - collection of the resulting :class:`.Select` object will use these - names as well for targeting column members. - - This parameter can also be specified on an existing - :class:`.Select` object using the :meth:`.Select.apply_labels` - method. - - .. seealso:: - - :meth:`.Select.apply_labels` - - """ - self._auto_correlate = correlate - if distinct is not False: - if distinct is True: - self._distinct = True - else: - self._distinct = [ - _literal_as_text(e) - for e in util.to_list(distinct) - ] - - if from_obj is not None: - self._from_obj = util.OrderedSet( - _interpret_as_from(f) - for f in util.to_list(from_obj)) - else: - self._from_obj = util.OrderedSet() - - try: - cols_present = bool(columns) - except TypeError: - raise exc.ArgumentError("columns argument to select() must " - "be a Python list or other iterable") - - if cols_present: - self._raw_columns = [] - for c in columns: - c = _interpret_as_column_or_from(c) - if isinstance(c, ScalarSelect): - c = c.self_group(against=operators.comma_op) - self._raw_columns.append(c) - else: - self._raw_columns = [] - - if whereclause is not None: - self._whereclause = _literal_as_text( - whereclause).self_group(against=operators._asbool) - else: - self._whereclause = None - - if having is not None: - self._having = _literal_as_text( - having).self_group(against=operators._asbool) - else: - self._having = None - - if prefixes: - self._setup_prefixes(prefixes) - - if suffixes: - self._setup_suffixes(suffixes) - - GenerativeSelect.__init__(self, **kwargs) - - @property - def _froms(self): - # would love to cache this, - # but there's just enough edge cases, particularly now that - # declarative encourages construction of SQL expressions - # without tables present, to just regen this each time. - froms = [] - seen = set() - translate = self._from_cloned - - for item in itertools.chain( - _from_objects(*self._raw_columns), - _from_objects(self._whereclause) - if self._whereclause is not None else (), - self._from_obj - ): - if item is self: - raise exc.InvalidRequestError( - "select() construct refers to itself as a FROM") - if translate and item in translate: - item = translate[item] - if not seen.intersection(item._cloned_set): - froms.append(item) - seen.update(item._cloned_set) - - return froms - - def _get_display_froms(self, explicit_correlate_froms=None, - implicit_correlate_froms=None): - """Return the full list of 'from' clauses to be displayed. - - Takes into account a set of existing froms which may be - rendered in the FROM clause of enclosing selects; this Select - may want to leave those absent if it is automatically - correlating. - - """ - froms = self._froms - - toremove = set(itertools.chain(*[ - _expand_cloned(f._hide_froms) - for f in froms])) - if toremove: - # if we're maintaining clones of froms, - # add the copies out to the toremove list. only include - # clones that are lexical equivalents. - if self._from_cloned: - toremove.update( - self._from_cloned[f] for f in - toremove.intersection(self._from_cloned) - if self._from_cloned[f]._is_lexical_equivalent(f) - ) - # filter out to FROM clauses not in the list, - # using a list to maintain ordering - froms = [f for f in froms if f not in toremove] - - if self._correlate: - to_correlate = self._correlate - if to_correlate: - froms = [ - f for f in froms if f not in - _cloned_intersection( - _cloned_intersection( - froms, explicit_correlate_froms or ()), - to_correlate - ) - ] - - if self._correlate_except is not None: - - froms = [ - f for f in froms if f not in - _cloned_difference( - _cloned_intersection( - froms, explicit_correlate_froms or ()), - self._correlate_except - ) - ] - - if self._auto_correlate and \ - implicit_correlate_froms and \ - len(froms) > 1: - - froms = [ - f for f in froms if f not in - _cloned_intersection(froms, implicit_correlate_froms) - ] - - if not len(froms): - raise exc.InvalidRequestError("Select statement '%s" - "' returned no FROM clauses " - "due to auto-correlation; " - "specify correlate() " - "to control correlation " - "manually." % self) - - return froms - - def _scalar_type(self): - elem = self._raw_columns[0] - cols = list(elem._select_iterable) - return cols[0].type - - @property - def froms(self): - """Return the displayed list of FromClause elements.""" - - return self._get_display_froms() - - def with_statement_hint(self, text, dialect_name='*'): - """add a statement hint to this :class:`.Select`. - - This method is similar to :meth:`.Select.with_hint` except that - it does not require an individual table, and instead applies to the - statement as a whole. - - Hints here are specific to the backend database and may include - directives such as isolation levels, file directives, fetch directives, - etc. - - .. versionadded:: 1.0.0 - - .. seealso:: - - :meth:`.Select.with_hint` - - """ - return self.with_hint(None, text, dialect_name) - - @_generative - def with_hint(self, selectable, text, dialect_name='*'): - """Add an indexing or other executional context hint for the given - selectable to this :class:`.Select`. - - The text of the hint is rendered in the appropriate - location for the database backend in use, relative - to the given :class:`.Table` or :class:`.Alias` passed as the - ``selectable`` argument. The dialect implementation - typically uses Python string substitution syntax - with the token ``%(name)s`` to render the name of - the table or alias. E.g. when using Oracle, the - following:: - - select([mytable]).\\ - with_hint(mytable, "index(%(name)s ix_mytable)") - - Would render SQL as:: - - select /*+ index(mytable ix_mytable) */ ... from mytable - - The ``dialect_name`` option will limit the rendering of a particular - hint to a particular backend. Such as, to add hints for both Oracle - and Sybase simultaneously:: - - select([mytable]).\\ - with_hint(mytable, "index(%(name)s ix_mytable)", 'oracle').\\ - with_hint(mytable, "WITH INDEX ix_mytable", 'sybase') - - .. seealso:: - - :meth:`.Select.with_statement_hint` - - """ - if selectable is None: - self._statement_hints += ((dialect_name, text), ) - else: - self._hints = self._hints.union( - {(selectable, dialect_name): text}) - - @property - def type(self): - raise exc.InvalidRequestError("Select objects don't have a type. " - "Call as_scalar() on this Select " - "object to return a 'scalar' version " - "of this Select.") - - @_memoized_property.method - def locate_all_froms(self): - """return a Set of all FromClause elements referenced by this Select. - - This set is a superset of that returned by the ``froms`` property, - which is specifically for those FromClause elements that would - actually be rendered. - - """ - froms = self._froms - return froms + list(_from_objects(*froms)) - - @property - def inner_columns(self): - """an iterator of all ColumnElement expressions which would - be rendered into the columns clause of the resulting SELECT statement. - - """ - return _select_iterables(self._raw_columns) - - @_memoized_property - def _label_resolve_dict(self): - with_cols = dict( - (c._resolve_label or c._label or c.key, c) - for c in _select_iterables(self._raw_columns) - if c._allow_label_resolve) - only_froms = dict( - (c.key, c) for c in - _select_iterables(self.froms) if c._allow_label_resolve) - for key, value in only_froms.items(): - with_cols.setdefault(key, value) - - return with_cols, only_froms - - def is_derived_from(self, fromclause): - if self in fromclause._cloned_set: - return True - - for f in self.locate_all_froms(): - if f.is_derived_from(fromclause): - return True - return False - - def _copy_internals(self, clone=_clone, **kw): - super(Select, self)._copy_internals(clone, **kw) - - # Select() object has been cloned and probably adapted by the - # given clone function. Apply the cloning function to internal - # objects - - # 1. keep a dictionary of the froms we've cloned, and what - # they've become. This is consulted later when we derive - # additional froms from "whereclause" and the columns clause, - # which may still reference the uncloned parent table. - # as of 0.7.4 we also put the current version of _froms, which - # gets cleared on each generation. previously we were "baking" - # _froms into self._from_obj. - self._from_cloned = from_cloned = dict( - (f, clone(f, **kw)) for f in self._from_obj.union(self._froms)) - - # 3. update persistent _from_obj with the cloned versions. - self._from_obj = util.OrderedSet(from_cloned[f] for f in - self._from_obj) - - # the _correlate collection is done separately, what can happen - # here is the same item is _correlate as in _from_obj but the - # _correlate version has an annotation on it - (specifically - # RelationshipProperty.Comparator._criterion_exists() does - # this). Also keep _correlate liberally open with its previous - # contents, as this set is used for matching, not rendering. - self._correlate = set(clone(f) for f in - self._correlate).union(self._correlate) - - # 4. clone other things. The difficulty here is that Column - # objects are not actually cloned, and refer to their original - # .table, resulting in the wrong "from" parent after a clone - # operation. Hence _from_cloned and _from_obj supersede what is - # present here. - self._raw_columns = [clone(c, **kw) for c in self._raw_columns] - for attr in '_whereclause', '_having', '_order_by_clause', \ - '_group_by_clause', '_for_update_arg': - if getattr(self, attr) is not None: - setattr(self, attr, clone(getattr(self, attr), **kw)) - - # erase exported column list, _froms collection, - # etc. - self._reset_exported() - - def get_children(self, column_collections=True, **kwargs): - """return child elements as per the ClauseElement specification.""" - - return (column_collections and list(self.columns) or []) + \ - self._raw_columns + list(self._froms) + \ - [x for x in - (self._whereclause, self._having, - self._order_by_clause, self._group_by_clause) - if x is not None] - - @_generative - def column(self, column): - """return a new select() construct with the given column expression - added to its columns clause. - - """ - self.append_column(column) - - @util.dependencies("sqlalchemy.sql.util") - def reduce_columns(self, sqlutil, only_synonyms=True): - """Return a new :func`.select` construct with redundantly - named, equivalently-valued columns removed from the columns clause. - - "Redundant" here means two columns where one refers to the - other either based on foreign key, or via a simple equality - comparison in the WHERE clause of the statement. The primary purpose - of this method is to automatically construct a select statement - with all uniquely-named columns, without the need to use - table-qualified labels as :meth:`.apply_labels` does. - - When columns are omitted based on foreign key, the referred-to - column is the one that's kept. When columns are omitted based on - WHERE eqivalence, the first column in the columns clause is the - one that's kept. - - :param only_synonyms: when True, limit the removal of columns - to those which have the same name as the equivalent. Otherwise, - all columns that are equivalent to another are removed. - - .. versionadded:: 0.8 - - """ - return self.with_only_columns( - sqlutil.reduce_columns( - self.inner_columns, - only_synonyms=only_synonyms, - *(self._whereclause, ) + tuple(self._from_obj) - ) - ) - - @_generative - def with_only_columns(self, columns): - """Return a new :func:`.select` construct with its columns - clause replaced with the given columns. - - .. versionchanged:: 0.7.3 - Due to a bug fix, this method has a slight - behavioral change as of version 0.7.3. - Prior to version 0.7.3, the FROM clause of - a :func:`.select` was calculated upfront and as new columns - were added; in 0.7.3 and later it's calculated - at compile time, fixing an issue regarding late binding - of columns to parent tables. This changes the behavior of - :meth:`.Select.with_only_columns` in that FROM clauses no - longer represented in the new list are dropped, - but this behavior is more consistent in - that the FROM clauses are consistently derived from the - current columns clause. The original intent of this method - is to allow trimming of the existing columns list to be fewer - columns than originally present; the use case of replacing - the columns list with an entirely different one hadn't - been anticipated until 0.7.3 was released; the usage - guidelines below illustrate how this should be done. - - This method is exactly equivalent to as if the original - :func:`.select` had been called with the given columns - clause. I.e. a statement:: - - s = select([table1.c.a, table1.c.b]) - s = s.with_only_columns([table1.c.b]) - - should be exactly equivalent to:: - - s = select([table1.c.b]) - - This means that FROM clauses which are only derived - from the column list will be discarded if the new column - list no longer contains that FROM:: - - >>> table1 = table('t1', column('a'), column('b')) - >>> table2 = table('t2', column('a'), column('b')) - >>> s1 = select([table1.c.a, table2.c.b]) - >>> print s1 - SELECT t1.a, t2.b FROM t1, t2 - >>> s2 = s1.with_only_columns([table2.c.b]) - >>> print s2 - SELECT t2.b FROM t1 - - The preferred way to maintain a specific FROM clause - in the construct, assuming it won't be represented anywhere - else (i.e. not in the WHERE clause, etc.) is to set it using - :meth:`.Select.select_from`:: - - >>> s1 = select([table1.c.a, table2.c.b]).\\ - ... select_from(table1.join(table2, - ... table1.c.a==table2.c.a)) - >>> s2 = s1.with_only_columns([table2.c.b]) - >>> print s2 - SELECT t2.b FROM t1 JOIN t2 ON t1.a=t2.a - - Care should also be taken to use the correct - set of column objects passed to :meth:`.Select.with_only_columns`. - Since the method is essentially equivalent to calling the - :func:`.select` construct in the first place with the given - columns, the columns passed to :meth:`.Select.with_only_columns` - should usually be a subset of those which were passed - to the :func:`.select` construct, not those which are available - from the ``.c`` collection of that :func:`.select`. That - is:: - - s = select([table1.c.a, table1.c.b]).select_from(table1) - s = s.with_only_columns([table1.c.b]) - - and **not**:: - - # usually incorrect - s = s.with_only_columns([s.c.b]) - - The latter would produce the SQL:: - - SELECT b - FROM (SELECT t1.a AS a, t1.b AS b - FROM t1), t1 - - Since the :func:`.select` construct is essentially being - asked to select both from ``table1`` as well as itself. - - """ - self._reset_exported() - rc = [] - for c in columns: - c = _interpret_as_column_or_from(c) - if isinstance(c, ScalarSelect): - c = c.self_group(against=operators.comma_op) - rc.append(c) - self._raw_columns = rc - - @_generative - def where(self, whereclause): - """return a new select() construct with the given expression added to - its WHERE clause, joined to the existing clause via AND, if any. - - """ - - self.append_whereclause(whereclause) - - @_generative - def having(self, having): - """return a new select() construct with the given expression added to - its HAVING clause, joined to the existing clause via AND, if any. - - """ - self.append_having(having) - - @_generative - def distinct(self, *expr): - """Return a new select() construct which will apply DISTINCT to its - columns clause. - - :param \*expr: optional column expressions. When present, - the Postgresql dialect will render a ``DISTINCT ON (>)`` - construct. - - """ - if expr: - expr = [_literal_as_label_reference(e) for e in expr] - if isinstance(self._distinct, list): - self._distinct = self._distinct + expr - else: - self._distinct = expr - else: - self._distinct = True - - @_generative - def select_from(self, fromclause): - """return a new :func:`.select` construct with the - given FROM expression - merged into its list of FROM objects. - - E.g.:: - - table1 = table('t1', column('a')) - table2 = table('t2', column('b')) - s = select([table1.c.a]).\\ - select_from( - table1.join(table2, table1.c.a==table2.c.b) - ) - - The "from" list is a unique set on the identity of each element, - so adding an already present :class:`.Table` or other selectable - will have no effect. Passing a :class:`.Join` that refers - to an already present :class:`.Table` or other selectable will have - the effect of concealing the presence of that selectable as - an individual element in the rendered FROM list, instead - rendering it into a JOIN clause. - - While the typical purpose of :meth:`.Select.select_from` is to - replace the default, derived FROM clause with a join, it can - also be called with individual table elements, multiple times - if desired, in the case that the FROM clause cannot be fully - derived from the columns clause:: - - select([func.count('*')]).select_from(table1) - - """ - self.append_from(fromclause) - - @_generative - def correlate(self, *fromclauses): - """return a new :class:`.Select` which will correlate the given FROM - clauses to that of an enclosing :class:`.Select`. - - Calling this method turns off the :class:`.Select` object's - default behavior of "auto-correlation". Normally, FROM elements - which appear in a :class:`.Select` that encloses this one via - its :term:`WHERE clause`, ORDER BY, HAVING or - :term:`columns clause` will be omitted from this :class:`.Select` - object's :term:`FROM clause`. - Setting an explicit correlation collection using the - :meth:`.Select.correlate` method provides a fixed list of FROM objects - that can potentially take place in this process. - - When :meth:`.Select.correlate` is used to apply specific FROM clauses - for correlation, the FROM elements become candidates for - correlation regardless of how deeply nested this :class:`.Select` - object is, relative to an enclosing :class:`.Select` which refers to - the same FROM object. This is in contrast to the behavior of - "auto-correlation" which only correlates to an immediate enclosing - :class:`.Select`. Multi-level correlation ensures that the link - between enclosed and enclosing :class:`.Select` is always via - at least one WHERE/ORDER BY/HAVING/columns clause in order for - correlation to take place. - - If ``None`` is passed, the :class:`.Select` object will correlate - none of its FROM entries, and all will render unconditionally - in the local FROM clause. - - :param \*fromclauses: a list of one or more :class:`.FromClause` - constructs, or other compatible constructs (i.e. ORM-mapped - classes) to become part of the correlate collection. - - .. versionchanged:: 0.8.0 ORM-mapped classes are accepted by - :meth:`.Select.correlate`. - - .. versionchanged:: 0.8.0 The :meth:`.Select.correlate` method no - longer unconditionally removes entries from the FROM clause; - instead, the candidate FROM entries must also be matched by a FROM - entry located in an enclosing :class:`.Select`, which ultimately - encloses this one as present in the WHERE clause, ORDER BY clause, - HAVING clause, or columns clause of an enclosing :meth:`.Select`. - - .. versionchanged:: 0.8.2 explicit correlation takes place - via any level of nesting of :class:`.Select` objects; in previous - 0.8 versions, correlation would only occur relative to the - immediate enclosing :class:`.Select` construct. - - .. seealso:: - - :meth:`.Select.correlate_except` - - :ref:`correlated_subqueries` - - """ - self._auto_correlate = False - if fromclauses and fromclauses[0] is None: - self._correlate = () - else: - self._correlate = set(self._correlate).union( - _interpret_as_from(f) for f in fromclauses) - - @_generative - def correlate_except(self, *fromclauses): - """return a new :class:`.Select` which will omit the given FROM - clauses from the auto-correlation process. - - Calling :meth:`.Select.correlate_except` turns off the - :class:`.Select` object's default behavior of - "auto-correlation" for the given FROM elements. An element - specified here will unconditionally appear in the FROM list, while - all other FROM elements remain subject to normal auto-correlation - behaviors. - - .. versionchanged:: 0.8.2 The :meth:`.Select.correlate_except` - method was improved to fully prevent FROM clauses specified here - from being omitted from the immediate FROM clause of this - :class:`.Select`. - - If ``None`` is passed, the :class:`.Select` object will correlate - all of its FROM entries. - - .. versionchanged:: 0.8.2 calling ``correlate_except(None)`` will - correctly auto-correlate all FROM clauses. - - :param \*fromclauses: a list of one or more :class:`.FromClause` - constructs, or other compatible constructs (i.e. ORM-mapped - classes) to become part of the correlate-exception collection. - - .. seealso:: - - :meth:`.Select.correlate` - - :ref:`correlated_subqueries` - - """ - - self._auto_correlate = False - if fromclauses and fromclauses[0] is None: - self._correlate_except = () - else: - self._correlate_except = set(self._correlate_except or ()).union( - _interpret_as_from(f) for f in fromclauses) - - def append_correlation(self, fromclause): - """append the given correlation expression to this select() - construct. - - This is an **in-place** mutation method; the - :meth:`~.Select.correlate` method is preferred, as it provides - standard :term:`method chaining`. - - """ - - self._auto_correlate = False - self._correlate = set(self._correlate).union( - _interpret_as_from(f) for f in fromclause) - - def append_column(self, column): - """append the given column expression to the columns clause of this - select() construct. - - This is an **in-place** mutation method; the - :meth:`~.Select.column` method is preferred, as it provides standard - :term:`method chaining`. - - """ - self._reset_exported() - column = _interpret_as_column_or_from(column) - - if isinstance(column, ScalarSelect): - column = column.self_group(against=operators.comma_op) - - self._raw_columns = self._raw_columns + [column] - - def append_prefix(self, clause): - """append the given columns clause prefix expression to this select() - construct. - - This is an **in-place** mutation method; the - :meth:`~.Select.prefix_with` method is preferred, as it provides - standard :term:`method chaining`. - - """ - clause = _literal_as_text(clause) - self._prefixes = self._prefixes + (clause,) - - def append_whereclause(self, whereclause): - """append the given expression to this select() construct's WHERE - criterion. - - The expression will be joined to existing WHERE criterion via AND. - - This is an **in-place** mutation method; the - :meth:`~.Select.where` method is preferred, as it provides standard - :term:`method chaining`. - - """ - - self._reset_exported() - self._whereclause = and_( - True_._ifnone(self._whereclause), whereclause) - - def append_having(self, having): - """append the given expression to this select() construct's HAVING - criterion. - - The expression will be joined to existing HAVING criterion via AND. - - This is an **in-place** mutation method; the - :meth:`~.Select.having` method is preferred, as it provides standard - :term:`method chaining`. - - """ - self._reset_exported() - self._having = and_(True_._ifnone(self._having), having) - - def append_from(self, fromclause): - """append the given FromClause expression to this select() construct's - FROM clause. - - This is an **in-place** mutation method; the - :meth:`~.Select.select_from` method is preferred, as it provides - standard :term:`method chaining`. - - """ - self._reset_exported() - fromclause = _interpret_as_from(fromclause) - self._from_obj = self._from_obj.union([fromclause]) - - @_memoized_property - def _columns_plus_names(self): - if self.use_labels: - names = set() - - def name_for_col(c): - if c._label is None or not c._render_label_in_columns_clause: - return (None, c) - - name = c._label - if name in names: - name = c.anon_label - else: - names.add(name) - return name, c - - return [ - name_for_col(c) - for c in util.unique_list( - _select_iterables(self._raw_columns)) - ] - else: - return [ - (None, c) - for c in util.unique_list( - _select_iterables(self._raw_columns)) - ] - - def _populate_column_collection(self): - for name, c in self._columns_plus_names: - if not hasattr(c, '_make_proxy'): - continue - if name is None: - key = None - elif self.use_labels: - key = c._key_label - if key is not None and key in self.c: - key = c.anon_label - else: - key = None - - c._make_proxy(self, key=key, - name=name, - name_is_truncatable=True) - - def _refresh_for_new_column(self, column): - for fromclause in self._froms: - col = fromclause._refresh_for_new_column(column) - if col is not None: - if col in self.inner_columns and self._cols_populated: - our_label = col._key_label if self.use_labels else col.key - if our_label not in self.c: - return col._make_proxy( - self, - name=col._label if self.use_labels else None, - key=col._key_label if self.use_labels else None, - name_is_truncatable=True) - return None - return None - - def self_group(self, against=None): - """return a 'grouping' construct as per the ClauseElement - specification. - - This produces an element that can be embedded in an expression. Note - that this method is called automatically as needed when constructing - expressions and should not require explicit use. - - """ - if isinstance(against, CompoundSelect): - return self - return FromGrouping(self) - - def union(self, other, **kwargs): - """return a SQL UNION of this select() construct against the given - selectable.""" - - return CompoundSelect._create_union(self, other, **kwargs) - - def union_all(self, other, **kwargs): - """return a SQL UNION ALL of this select() construct against the given - selectable. - - """ - return CompoundSelect._create_union_all(self, other, **kwargs) - - def except_(self, other, **kwargs): - """return a SQL EXCEPT of this select() construct against the given - selectable.""" - - return CompoundSelect._create_except(self, other, **kwargs) - - def except_all(self, other, **kwargs): - """return a SQL EXCEPT ALL of this select() construct against the - given selectable. - - """ - return CompoundSelect._create_except_all(self, other, **kwargs) - - def intersect(self, other, **kwargs): - """return a SQL INTERSECT of this select() construct against the given - selectable. - - """ - return CompoundSelect._create_intersect(self, other, **kwargs) - - def intersect_all(self, other, **kwargs): - """return a SQL INTERSECT ALL of this select() construct against the - given selectable. - - """ - return CompoundSelect._create_intersect_all(self, other, **kwargs) - - def bind(self): - if self._bind: - return self._bind - froms = self._froms - if not froms: - for c in self._raw_columns: - e = c.bind - if e: - self._bind = e - return e - else: - e = list(froms)[0].bind - if e: - self._bind = e - return e - - return None - - def _set_bind(self, bind): - self._bind = bind - bind = property(bind, _set_bind) - - -class ScalarSelect(Generative, Grouping): - _from_objects = [] - _is_from_container = True - - def __init__(self, element): - self.element = element - self.type = element._scalar_type() - - @property - def columns(self): - raise exc.InvalidRequestError('Scalar Select expression has no ' - 'columns; use this object directly ' - 'within a column-level expression.') - c = columns - - @_generative - def where(self, crit): - """Apply a WHERE clause to the SELECT statement referred to - by this :class:`.ScalarSelect`. - - """ - self.element = self.element.where(crit) - - def self_group(self, **kwargs): - return self - - -class Exists(UnaryExpression): - """Represent an ``EXISTS`` clause. - - """ - __visit_name__ = UnaryExpression.__visit_name__ - _from_objects = [] - - def __init__(self, *args, **kwargs): - """Construct a new :class:`.Exists` against an existing - :class:`.Select` object. - - Calling styles are of the following forms:: - - # use on an existing select() - s = select([table.c.col1]).where(table.c.col2==5) - s = exists(s) - - # construct a select() at once - exists(['*'], **select_arguments).where(criterion) - - # columns argument is optional, generates "EXISTS (SELECT *)" - # by default. - exists().where(table.c.col2==5) - - """ - if args and isinstance(args[0], (SelectBase, ScalarSelect)): - s = args[0] - else: - if not args: - args = ([literal_column('*')],) - s = Select(*args, **kwargs).as_scalar().self_group() - - UnaryExpression.__init__(self, s, operator=operators.exists, - type_=type_api.BOOLEANTYPE, - wraps_column_expression=True) - - def select(self, whereclause=None, **params): - return Select([self], whereclause, **params) - - def correlate(self, *fromclause): - e = self._clone() - e.element = self.element.correlate(*fromclause).self_group() - return e - - def correlate_except(self, *fromclause): - e = self._clone() - e.element = self.element.correlate_except(*fromclause).self_group() - return e - - def select_from(self, clause): - """return a new :class:`.Exists` construct, applying the given - expression to the :meth:`.Select.select_from` method of the select - statement contained. - - """ - e = self._clone() - e.element = self.element.select_from(clause).self_group() - return e - - def where(self, clause): - """return a new exists() construct with the given expression added to - its WHERE clause, joined to the existing clause via AND, if any. - - """ - e = self._clone() - e.element = self.element.where(clause).self_group() - return e - - -class TextAsFrom(SelectBase): - """Wrap a :class:`.TextClause` construct within a :class:`.SelectBase` - interface. - - This allows the :class:`.TextClause` object to gain a ``.c`` collection - and other FROM-like capabilities such as :meth:`.FromClause.alias`, - :meth:`.SelectBase.cte`, etc. - - The :class:`.TextAsFrom` construct is produced via the - :meth:`.TextClause.columns` method - see that method for details. - - .. versionadded:: 0.9.0 - - .. seealso:: - - :func:`.text` - - :meth:`.TextClause.columns` - - """ - __visit_name__ = "text_as_from" - - _textual = True - - def __init__(self, text, columns): - self.element = text - self.column_args = columns - - @property - def _bind(self): - return self.element._bind - - @_generative - def bindparams(self, *binds, **bind_as_values): - self.element = self.element.bindparams(*binds, **bind_as_values) - - def _populate_column_collection(self): - for c in self.column_args: - c._make_proxy(self) - - def _copy_internals(self, clone=_clone, **kw): - self._reset_exported() - self.element = clone(self.element, **kw) - - def _scalar_type(self): - return self.column_args[0].type - - -class AnnotatedFromClause(Annotated): - def __init__(self, element, values): - # force FromClause to generate their internal - # collections into __dict__ - element.c - Annotated.__init__(self, element, values) diff --git a/python/sqlalchemy/sql/sqltypes.py b/python/sqlalchemy/sql/sqltypes.py deleted file mode 100644 index 7bf15728..00000000 --- a/python/sqlalchemy/sql/sqltypes.py +++ /dev/null @@ -1,1714 +0,0 @@ -# sql/sqltypes.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""SQL specific types. - -""" - -import datetime as dt -import codecs - -from .type_api import TypeEngine, TypeDecorator, to_instance -from .elements import quoted_name, type_coerce, _defer_name -from .. import exc, util, processors -from .base import _bind_or_error, SchemaEventTarget -from . import operators -from .. import event -from ..util import pickle -import decimal - -if util.jython: - import array - - -class _DateAffinity(object): - - """Mixin date/time specific expression adaptations. - - Rules are implemented within Date,Time,Interval,DateTime, Numeric, - Integer. Based on http://www.postgresql.org/docs/current/static - /functions-datetime.html. - - """ - - @property - def _expression_adaptations(self): - raise NotImplementedError() - - class Comparator(TypeEngine.Comparator): - _blank_dict = util.immutabledict() - - def _adapt_expression(self, op, other_comparator): - othertype = other_comparator.type._type_affinity - return ( - op, to_instance( - self.type._expression_adaptations. - get(op, self._blank_dict). - get(othertype, NULLTYPE)) - ) - comparator_factory = Comparator - - -class Concatenable(object): - - """A mixin that marks a type as supporting 'concatenation', - typically strings.""" - - class Comparator(TypeEngine.Comparator): - - def _adapt_expression(self, op, other_comparator): - if (op is operators.add and - isinstance( - other_comparator, - (Concatenable.Comparator, NullType.Comparator) - )): - return operators.concat_op, self.expr.type - else: - return op, self.expr.type - - comparator_factory = Comparator - - -class String(Concatenable, TypeEngine): - - """The base for all string and character types. - - In SQL, corresponds to VARCHAR. Can also take Python unicode objects - and encode to the database's encoding in bind params (and the reverse for - result sets.) - - The `length` field is usually required when the `String` type is - used within a CREATE TABLE statement, as VARCHAR requires a length - on most databases. - - """ - - __visit_name__ = 'string' - - def __init__(self, length=None, collation=None, - convert_unicode=False, - unicode_error=None, - _warn_on_bytestring=False - ): - """ - Create a string-holding type. - - :param length: optional, a length for the column for use in - DDL and CAST expressions. May be safely omitted if no ``CREATE - TABLE`` will be issued. Certain databases may require a - ``length`` for use in DDL, and will raise an exception when - the ``CREATE TABLE`` DDL is issued if a ``VARCHAR`` - with no length is included. Whether the value is - interpreted as bytes or characters is database specific. - - :param collation: Optional, a column-level collation for - use in DDL and CAST expressions. Renders using the - COLLATE keyword supported by SQLite, MySQL, and Postgresql. - E.g.:: - - >>> from sqlalchemy import cast, select, String - >>> print select([cast('some string', String(collation='utf8'))]) - SELECT CAST(:param_1 AS VARCHAR COLLATE utf8) AS anon_1 - - .. versionadded:: 0.8 Added support for COLLATE to all - string types. - - :param convert_unicode: When set to ``True``, the - :class:`.String` type will assume that - input is to be passed as Python ``unicode`` objects, - and results returned as Python ``unicode`` objects. - If the DBAPI in use does not support Python unicode - (which is fewer and fewer these days), SQLAlchemy - will encode/decode the value, using the - value of the ``encoding`` parameter passed to - :func:`.create_engine` as the encoding. - - When using a DBAPI that natively supports Python - unicode objects, this flag generally does not - need to be set. For columns that are explicitly - intended to store non-ASCII data, the :class:`.Unicode` - or :class:`.UnicodeText` - types should be used regardless, which feature - the same behavior of ``convert_unicode`` but - also indicate an underlying column type that - directly supports unicode, such as ``NVARCHAR``. - - For the extremely rare case that Python ``unicode`` - is to be encoded/decoded by SQLAlchemy on a backend - that does natively support Python ``unicode``, - the value ``force`` can be passed here which will - cause SQLAlchemy's encode/decode services to be - used unconditionally. - - :param unicode_error: Optional, a method to use to handle Unicode - conversion errors. Behaves like the ``errors`` keyword argument to - the standard library's ``string.decode()`` functions. This flag - requires that ``convert_unicode`` is set to ``force`` - otherwise, - SQLAlchemy is not guaranteed to handle the task of unicode - conversion. Note that this flag adds significant performance - overhead to row-fetching operations for backends that already - return unicode objects natively (which most DBAPIs do). This - flag should only be used as a last resort for reading - strings from a column with varied or corrupted encodings. - - """ - if unicode_error is not None and convert_unicode != 'force': - raise exc.ArgumentError("convert_unicode must be 'force' " - "when unicode_error is set.") - - self.length = length - self.collation = collation - self.convert_unicode = convert_unicode - self.unicode_error = unicode_error - self._warn_on_bytestring = _warn_on_bytestring - - def literal_processor(self, dialect): - def process(value): - value = value.replace("'", "''") - return "'%s'" % value - return process - - def bind_processor(self, dialect): - if self.convert_unicode or dialect.convert_unicode: - if dialect.supports_unicode_binds and \ - self.convert_unicode != 'force': - if self._warn_on_bytestring: - def process(value): - if isinstance(value, util.binary_type): - util.warn_limited( - "Unicode type received non-unicode " - "bind param value %r.", - (util.ellipses_string(value),)) - return value - return process - else: - return None - else: - encoder = codecs.getencoder(dialect.encoding) - warn_on_bytestring = self._warn_on_bytestring - - def process(value): - if isinstance(value, util.text_type): - return encoder(value, self.unicode_error)[0] - elif warn_on_bytestring and value is not None: - util.warn_limited( - "Unicode type received non-unicode bind " - "param value %r.", - (util.ellipses_string(value),)) - return value - return process - else: - return None - - def result_processor(self, dialect, coltype): - wants_unicode = self.convert_unicode or dialect.convert_unicode - needs_convert = wants_unicode and \ - (dialect.returns_unicode_strings is not True or - self.convert_unicode in ('force', 'force_nocheck')) - needs_isinstance = ( - needs_convert and - dialect.returns_unicode_strings and - self.convert_unicode != 'force_nocheck' - ) - if needs_convert: - if needs_isinstance: - return processors.to_conditional_unicode_processor_factory( - dialect.encoding, self.unicode_error) - else: - return processors.to_unicode_processor_factory( - dialect.encoding, self.unicode_error) - else: - return None - - @property - def python_type(self): - if self.convert_unicode: - return util.text_type - else: - return str - - def get_dbapi_type(self, dbapi): - return dbapi.STRING - - -class Text(String): - - """A variably sized string type. - - In SQL, usually corresponds to CLOB or TEXT. Can also take Python - unicode objects and encode to the database's encoding in bind - params (and the reverse for result sets.) In general, TEXT objects - do not have a length; while some databases will accept a length - argument here, it will be rejected by others. - - """ - __visit_name__ = 'text' - - -class Unicode(String): - - """A variable length Unicode string type. - - The :class:`.Unicode` type is a :class:`.String` subclass - that assumes input and output as Python ``unicode`` data, - and in that regard is equivalent to the usage of the - ``convert_unicode`` flag with the :class:`.String` type. - However, unlike plain :class:`.String`, it also implies an - underlying column type that is explicitly supporting of non-ASCII - data, such as ``NVARCHAR`` on Oracle and SQL Server. - This can impact the output of ``CREATE TABLE`` statements - and ``CAST`` functions at the dialect level, and can - also affect the handling of bound parameters in some - specific DBAPI scenarios. - - The encoding used by the :class:`.Unicode` type is usually - determined by the DBAPI itself; most modern DBAPIs - feature support for Python ``unicode`` objects as bound - values and result set values, and the encoding should - be configured as detailed in the notes for the target - DBAPI in the :ref:`dialect_toplevel` section. - - For those DBAPIs which do not support, or are not configured - to accommodate Python ``unicode`` objects - directly, SQLAlchemy does the encoding and decoding - outside of the DBAPI. The encoding in this scenario - is determined by the ``encoding`` flag passed to - :func:`.create_engine`. - - When using the :class:`.Unicode` type, it is only appropriate - to pass Python ``unicode`` objects, and not plain ``str``. - If a plain ``str`` is passed under Python 2, a warning - is emitted. If you notice your application emitting these warnings but - you're not sure of the source of them, the Python - ``warnings`` filter, documented at - http://docs.python.org/library/warnings.html, - can be used to turn these warnings into exceptions - which will illustrate a stack trace:: - - import warnings - warnings.simplefilter('error') - - For an application that wishes to pass plain bytestrings - and Python ``unicode`` objects to the ``Unicode`` type - equally, the bytestrings must first be decoded into - unicode. The recipe at :ref:`coerce_to_unicode` illustrates - how this is done. - - See also: - - :class:`.UnicodeText` - unlengthed textual counterpart - to :class:`.Unicode`. - - """ - - __visit_name__ = 'unicode' - - def __init__(self, length=None, **kwargs): - """ - Create a :class:`.Unicode` object. - - Parameters are the same as that of :class:`.String`, - with the exception that ``convert_unicode`` - defaults to ``True``. - - """ - kwargs.setdefault('convert_unicode', True) - kwargs.setdefault('_warn_on_bytestring', True) - super(Unicode, self).__init__(length=length, **kwargs) - - -class UnicodeText(Text): - - """An unbounded-length Unicode string type. - - See :class:`.Unicode` for details on the unicode - behavior of this object. - - Like :class:`.Unicode`, usage the :class:`.UnicodeText` type implies a - unicode-capable type being used on the backend, such as - ``NCLOB``, ``NTEXT``. - - """ - - __visit_name__ = 'unicode_text' - - def __init__(self, length=None, **kwargs): - """ - Create a Unicode-converting Text type. - - Parameters are the same as that of :class:`.Text`, - with the exception that ``convert_unicode`` - defaults to ``True``. - - """ - kwargs.setdefault('convert_unicode', True) - kwargs.setdefault('_warn_on_bytestring', True) - super(UnicodeText, self).__init__(length=length, **kwargs) - - -class Integer(_DateAffinity, TypeEngine): - - """A type for ``int`` integers.""" - - __visit_name__ = 'integer' - - def get_dbapi_type(self, dbapi): - return dbapi.NUMBER - - @property - def python_type(self): - return int - - def literal_processor(self, dialect): - def process(value): - return str(value) - return process - - @util.memoized_property - def _expression_adaptations(self): - # TODO: need a dictionary object that will - # handle operators generically here, this is incomplete - return { - operators.add: { - Date: Date, - Integer: self.__class__, - Numeric: Numeric, - }, - operators.mul: { - Interval: Interval, - Integer: self.__class__, - Numeric: Numeric, - }, - operators.div: { - Integer: self.__class__, - Numeric: Numeric, - }, - operators.truediv: { - Integer: self.__class__, - Numeric: Numeric, - }, - operators.sub: { - Integer: self.__class__, - Numeric: Numeric, - }, - } - - -class SmallInteger(Integer): - - """A type for smaller ``int`` integers. - - Typically generates a ``SMALLINT`` in DDL, and otherwise acts like - a normal :class:`.Integer` on the Python side. - - """ - - __visit_name__ = 'small_integer' - - -class BigInteger(Integer): - - """A type for bigger ``int`` integers. - - Typically generates a ``BIGINT`` in DDL, and otherwise acts like - a normal :class:`.Integer` on the Python side. - - """ - - __visit_name__ = 'big_integer' - - -class Numeric(_DateAffinity, TypeEngine): - - """A type for fixed precision numbers, such as ``NUMERIC`` or ``DECIMAL``. - - This type returns Python ``decimal.Decimal`` objects by default, unless - the :paramref:`.Numeric.asdecimal` flag is set to False, in which case - they are coerced to Python ``float`` objects. - - .. note:: - - The :class:`.Numeric` type is designed to receive data from a database - type that is explicitly known to be a decimal type - (e.g. ``DECIMAL``, ``NUMERIC``, others) and not a floating point - type (e.g. ``FLOAT``, ``REAL``, others). - If the database column on the server is in fact a floating-point type - type, such as ``FLOAT`` or ``REAL``, use the :class:`.Float` - type or a subclass, otherwise numeric coercion between - ``float``/``Decimal`` may or may not function as expected. - - .. note:: - - The Python ``decimal.Decimal`` class is generally slow - performing; cPython 3.3 has now switched to use the `cdecimal - `_ library natively. For - older Python versions, the ``cdecimal`` library can be patched - into any application where it will replace the ``decimal`` - library fully, however this needs to be applied globally and - before any other modules have been imported, as follows:: - - import sys - import cdecimal - sys.modules["decimal"] = cdecimal - - Note that the ``cdecimal`` and ``decimal`` libraries are **not - compatible with each other**, so patching ``cdecimal`` at the - global level is the only way it can be used effectively with - various DBAPIs that hardcode to import the ``decimal`` library. - - """ - - __visit_name__ = 'numeric' - - _default_decimal_return_scale = 10 - - def __init__(self, precision=None, scale=None, - decimal_return_scale=None, asdecimal=True): - """ - Construct a Numeric. - - :param precision: the numeric precision for use in DDL ``CREATE - TABLE``. - - :param scale: the numeric scale for use in DDL ``CREATE TABLE``. - - :param asdecimal: default True. Return whether or not - values should be sent as Python Decimal objects, or - as floats. Different DBAPIs send one or the other based on - datatypes - the Numeric type will ensure that return values - are one or the other across DBAPIs consistently. - - :param decimal_return_scale: Default scale to use when converting - from floats to Python decimals. Floating point values will typically - be much longer due to decimal inaccuracy, and most floating point - database types don't have a notion of "scale", so by default the - float type looks for the first ten decimal places when converting. - Specfiying this value will override that length. Types which - do include an explicit ".scale" value, such as the base - :class:`.Numeric` as well as the MySQL float types, will use the - value of ".scale" as the default for decimal_return_scale, if not - otherwise specified. - - .. versionadded:: 0.9.0 - - When using the ``Numeric`` type, care should be taken to ensure - that the asdecimal setting is apppropriate for the DBAPI in use - - when Numeric applies a conversion from Decimal->float or float-> - Decimal, this conversion incurs an additional performance overhead - for all result columns received. - - DBAPIs that return Decimal natively (e.g. psycopg2) will have - better accuracy and higher performance with a setting of ``True``, - as the native translation to Decimal reduces the amount of floating- - point issues at play, and the Numeric type itself doesn't need - to apply any further conversions. However, another DBAPI which - returns floats natively *will* incur an additional conversion - overhead, and is still subject to floating point data loss - in - which case ``asdecimal=False`` will at least remove the extra - conversion overhead. - - """ - self.precision = precision - self.scale = scale - self.decimal_return_scale = decimal_return_scale - self.asdecimal = asdecimal - - @property - def _effective_decimal_return_scale(self): - if self.decimal_return_scale is not None: - return self.decimal_return_scale - elif getattr(self, "scale", None) is not None: - return self.scale - else: - return self._default_decimal_return_scale - - def get_dbapi_type(self, dbapi): - return dbapi.NUMBER - - def literal_processor(self, dialect): - def process(value): - return str(value) - return process - - @property - def python_type(self): - if self.asdecimal: - return decimal.Decimal - else: - return float - - def bind_processor(self, dialect): - if dialect.supports_native_decimal: - return None - else: - return processors.to_float - - def result_processor(self, dialect, coltype): - if self.asdecimal: - if dialect.supports_native_decimal: - # we're a "numeric", DBAPI will give us Decimal directly - return None - else: - util.warn('Dialect %s+%s does *not* support Decimal ' - 'objects natively, and SQLAlchemy must ' - 'convert from floating point - rounding ' - 'errors and other issues may occur. Please ' - 'consider storing Decimal numbers as strings ' - 'or integers on this platform for lossless ' - 'storage.' % (dialect.name, dialect.driver)) - - # we're a "numeric", DBAPI returns floats, convert. - return processors.to_decimal_processor_factory( - decimal.Decimal, - self.scale if self.scale is not None - else self._default_decimal_return_scale) - else: - if dialect.supports_native_decimal: - return processors.to_float - else: - return None - - @util.memoized_property - def _expression_adaptations(self): - return { - operators.mul: { - Interval: Interval, - Numeric: self.__class__, - Integer: self.__class__, - }, - operators.div: { - Numeric: self.__class__, - Integer: self.__class__, - }, - operators.truediv: { - Numeric: self.__class__, - Integer: self.__class__, - }, - operators.add: { - Numeric: self.__class__, - Integer: self.__class__, - }, - operators.sub: { - Numeric: self.__class__, - Integer: self.__class__, - } - } - - -class Float(Numeric): - - """Type representing floating point types, such as ``FLOAT`` or ``REAL``. - - This type returns Python ``float`` objects by default, unless the - :paramref:`.Float.asdecimal` flag is set to True, in which case they - are coerced to ``decimal.Decimal`` objects. - - .. note:: - - The :class:`.Float` type is designed to receive data from a database - type that is explicitly known to be a floating point type - (e.g. ``FLOAT``, ``REAL``, others) - and not a decimal type (e.g. ``DECIMAL``, ``NUMERIC``, others). - If the database column on the server is in fact a Numeric - type, such as ``DECIMAL`` or ``NUMERIC``, use the :class:`.Numeric` - type or a subclass, otherwise numeric coercion between - ``float``/``Decimal`` may or may not function as expected. - - """ - - __visit_name__ = 'float' - - scale = None - - def __init__(self, precision=None, asdecimal=False, - decimal_return_scale=None, **kwargs): - """ - Construct a Float. - - :param precision: the numeric precision for use in DDL ``CREATE - TABLE``. - - :param asdecimal: the same flag as that of :class:`.Numeric`, but - defaults to ``False``. Note that setting this flag to ``True`` - results in floating point conversion. - - :param decimal_return_scale: Default scale to use when converting - from floats to Python decimals. Floating point values will typically - be much longer due to decimal inaccuracy, and most floating point - database types don't have a notion of "scale", so by default the - float type looks for the first ten decimal places when converting. - Specfiying this value will override that length. Note that the - MySQL float types, which do include "scale", will use "scale" - as the default for decimal_return_scale, if not otherwise specified. - - .. versionadded:: 0.9.0 - - :param \**kwargs: deprecated. Additional arguments here are ignored - by the default :class:`.Float` type. For database specific - floats that support additional arguments, see that dialect's - documentation for details, such as - :class:`sqlalchemy.dialects.mysql.FLOAT`. - - """ - self.precision = precision - self.asdecimal = asdecimal - self.decimal_return_scale = decimal_return_scale - if kwargs: - util.warn_deprecated("Additional keyword arguments " - "passed to Float ignored.") - - def result_processor(self, dialect, coltype): - if self.asdecimal: - return processors.to_decimal_processor_factory( - decimal.Decimal, - self._effective_decimal_return_scale) - else: - return None - - @util.memoized_property - def _expression_adaptations(self): - return { - operators.mul: { - Interval: Interval, - Numeric: self.__class__, - }, - operators.div: { - Numeric: self.__class__, - }, - operators.truediv: { - Numeric: self.__class__, - }, - operators.add: { - Numeric: self.__class__, - }, - operators.sub: { - Numeric: self.__class__, - } - } - - -class DateTime(_DateAffinity, TypeEngine): - - """A type for ``datetime.datetime()`` objects. - - Date and time types return objects from the Python ``datetime`` - module. Most DBAPIs have built in support for the datetime - module, with the noted exception of SQLite. In the case of - SQLite, date and time types are stored as strings which are then - converted back to datetime objects when rows are returned. - - """ - - __visit_name__ = 'datetime' - - def __init__(self, timezone=False): - """Construct a new :class:`.DateTime`. - - :param timezone: boolean. If True, and supported by the - backend, will produce 'TIMESTAMP WITH TIMEZONE'. For backends - that don't support timezone aware timestamps, has no - effect. - - """ - self.timezone = timezone - - def get_dbapi_type(self, dbapi): - return dbapi.DATETIME - - @property - def python_type(self): - return dt.datetime - - @util.memoized_property - def _expression_adaptations(self): - return { - operators.add: { - Interval: self.__class__, - }, - operators.sub: { - Interval: self.__class__, - DateTime: Interval, - }, - } - - -class Date(_DateAffinity, TypeEngine): - - """A type for ``datetime.date()`` objects.""" - - __visit_name__ = 'date' - - def get_dbapi_type(self, dbapi): - return dbapi.DATETIME - - @property - def python_type(self): - return dt.date - - @util.memoized_property - def _expression_adaptations(self): - return { - operators.add: { - Integer: self.__class__, - Interval: DateTime, - Time: DateTime, - }, - operators.sub: { - # date - integer = date - Integer: self.__class__, - - # date - date = integer. - Date: Integer, - - Interval: DateTime, - - # date - datetime = interval, - # this one is not in the PG docs - # but works - DateTime: Interval, - }, - } - - -class Time(_DateAffinity, TypeEngine): - - """A type for ``datetime.time()`` objects.""" - - __visit_name__ = 'time' - - def __init__(self, timezone=False): - self.timezone = timezone - - def get_dbapi_type(self, dbapi): - return dbapi.DATETIME - - @property - def python_type(self): - return dt.time - - @util.memoized_property - def _expression_adaptations(self): - return { - operators.add: { - Date: DateTime, - Interval: self.__class__ - }, - operators.sub: { - Time: Interval, - Interval: self.__class__, - }, - } - - -class _Binary(TypeEngine): - - """Define base behavior for binary types.""" - - def __init__(self, length=None): - self.length = length - - def literal_processor(self, dialect): - def process(value): - value = value.decode(dialect.encoding).replace("'", "''") - return "'%s'" % value - return process - - @property - def python_type(self): - return util.binary_type - - # Python 3 - sqlite3 doesn't need the `Binary` conversion - # here, though pg8000 does to indicate "bytea" - def bind_processor(self, dialect): - if dialect.dbapi is None: - return None - - DBAPIBinary = dialect.dbapi.Binary - - def process(value): - if value is not None: - return DBAPIBinary(value) - else: - return None - return process - - # Python 3 has native bytes() type - # both sqlite3 and pg8000 seem to return it, - # psycopg2 as of 2.5 returns 'memoryview' - if util.py2k: - def result_processor(self, dialect, coltype): - if util.jython: - def process(value): - if value is not None: - if isinstance(value, array.array): - return value.tostring() - return str(value) - else: - return None - else: - process = processors.to_str - return process - else: - def result_processor(self, dialect, coltype): - def process(value): - if value is not None: - value = bytes(value) - return value - return process - - def coerce_compared_value(self, op, value): - """See :meth:`.TypeEngine.coerce_compared_value` for a description.""" - - if isinstance(value, util.string_types): - return self - else: - return super(_Binary, self).coerce_compared_value(op, value) - - def get_dbapi_type(self, dbapi): - return dbapi.BINARY - - -class LargeBinary(_Binary): - - """A type for large binary byte data. - - The Binary type generates BLOB or BYTEA when tables are created, - and also converts incoming values using the ``Binary`` callable - provided by each DB-API. - - """ - - __visit_name__ = 'large_binary' - - def __init__(self, length=None): - """ - Construct a LargeBinary type. - - :param length: optional, a length for the column for use in - DDL statements, for those BLOB types that accept a length - (i.e. MySQL). It does *not* produce a *lengthed* BINARY/VARBINARY - type - use the BINARY/VARBINARY types specifically for those. - May be safely omitted if no ``CREATE - TABLE`` will be issued. Certain databases may require a - *length* for use in DDL, and will raise an exception when - the ``CREATE TABLE`` DDL is issued. - - """ - _Binary.__init__(self, length=length) - - -class Binary(LargeBinary): - - """Deprecated. Renamed to LargeBinary.""" - - def __init__(self, *arg, **kw): - util.warn_deprecated('The Binary type has been renamed to ' - 'LargeBinary.') - LargeBinary.__init__(self, *arg, **kw) - - -class SchemaType(SchemaEventTarget): - - """Mark a type as possibly requiring schema-level DDL for usage. - - Supports types that must be explicitly created/dropped (i.e. PG ENUM type) - as well as types that are complimented by table or schema level - constraints, triggers, and other rules. - - :class:`.SchemaType` classes can also be targets for the - :meth:`.DDLEvents.before_parent_attach` and - :meth:`.DDLEvents.after_parent_attach` events, where the events fire off - surrounding the association of the type object with a parent - :class:`.Column`. - - .. seealso:: - - :class:`.Enum` - - :class:`.Boolean` - - - """ - - def __init__(self, name=None, schema=None, metadata=None, - inherit_schema=False, quote=None, _create_events=True): - if name is not None: - self.name = quoted_name(name, quote) - else: - self.name = None - self.schema = schema - self.metadata = metadata - self.inherit_schema = inherit_schema - self._create_events = _create_events - - if _create_events and self.metadata: - event.listen( - self.metadata, - "before_create", - util.portable_instancemethod(self._on_metadata_create) - ) - event.listen( - self.metadata, - "after_drop", - util.portable_instancemethod(self._on_metadata_drop) - ) - - def _set_parent(self, column): - column._on_table_attach(util.portable_instancemethod(self._set_table)) - - def _set_table(self, column, table): - if self.inherit_schema: - self.schema = table.schema - - if not self._create_events: - return - - event.listen( - table, - "before_create", - util.portable_instancemethod( - self._on_table_create) - ) - event.listen( - table, - "after_drop", - util.portable_instancemethod(self._on_table_drop) - ) - if self.metadata is None: - # TODO: what's the difference between self.metadata - # and table.metadata here ? - event.listen( - table.metadata, - "before_create", - util.portable_instancemethod(self._on_metadata_create) - ) - event.listen( - table.metadata, - "after_drop", - util.portable_instancemethod(self._on_metadata_drop) - ) - - def copy(self, **kw): - return self.adapt(self.__class__, _create_events=True) - - def adapt(self, impltype, **kw): - schema = kw.pop('schema', self.schema) - metadata = kw.pop('metadata', self.metadata) - _create_events = kw.pop('_create_events', False) - - return impltype(name=self.name, - schema=schema, - inherit_schema=self.inherit_schema, - metadata=metadata, - _create_events=_create_events, - **kw) - - @property - def bind(self): - return self.metadata and self.metadata.bind or None - - def create(self, bind=None, checkfirst=False): - """Issue CREATE ddl for this type, if applicable.""" - - if bind is None: - bind = _bind_or_error(self) - t = self.dialect_impl(bind.dialect) - if t.__class__ is not self.__class__ and isinstance(t, SchemaType): - t.create(bind=bind, checkfirst=checkfirst) - - def drop(self, bind=None, checkfirst=False): - """Issue DROP ddl for this type, if applicable.""" - - if bind is None: - bind = _bind_or_error(self) - t = self.dialect_impl(bind.dialect) - if t.__class__ is not self.__class__ and isinstance(t, SchemaType): - t.drop(bind=bind, checkfirst=checkfirst) - - def _on_table_create(self, target, bind, **kw): - t = self.dialect_impl(bind.dialect) - if t.__class__ is not self.__class__ and isinstance(t, SchemaType): - t._on_table_create(target, bind, **kw) - - def _on_table_drop(self, target, bind, **kw): - t = self.dialect_impl(bind.dialect) - if t.__class__ is not self.__class__ and isinstance(t, SchemaType): - t._on_table_drop(target, bind, **kw) - - def _on_metadata_create(self, target, bind, **kw): - t = self.dialect_impl(bind.dialect) - if t.__class__ is not self.__class__ and isinstance(t, SchemaType): - t._on_metadata_create(target, bind, **kw) - - def _on_metadata_drop(self, target, bind, **kw): - t = self.dialect_impl(bind.dialect) - if t.__class__ is not self.__class__ and isinstance(t, SchemaType): - t._on_metadata_drop(target, bind, **kw) - - -class Enum(String, SchemaType): - - """Generic Enum Type. - - The Enum type provides a set of possible string values which the - column is constrained towards. - - By default, uses the backend's native ENUM type if available, - else uses VARCHAR + a CHECK constraint. - - .. seealso:: - - :class:`~.postgresql.ENUM` - PostgreSQL-specific type, - which has additional functionality. - - """ - - __visit_name__ = 'enum' - - def __init__(self, *enums, **kw): - """Construct an enum. - - Keyword arguments which don't apply to a specific backend are ignored - by that backend. - - :param \*enums: string or unicode enumeration labels. If unicode - labels are present, the `convert_unicode` flag is auto-enabled. - - :param convert_unicode: Enable unicode-aware bind parameter and - result-set processing for this Enum's data. This is set - automatically based on the presence of unicode label strings. - - :param metadata: Associate this type directly with a ``MetaData`` - object. For types that exist on the target database as an - independent schema construct (Postgresql), this type will be - created and dropped within ``create_all()`` and ``drop_all()`` - operations. If the type is not associated with any ``MetaData`` - object, it will associate itself with each ``Table`` in which it is - used, and will be created when any of those individual tables are - created, after a check is performed for its existence. The type is - only dropped when ``drop_all()`` is called for that ``Table`` - object's metadata, however. - - :param name: The name of this type. This is required for Postgresql - and any future supported database which requires an explicitly - named type, or an explicitly named constraint in order to generate - the type and/or a table that uses it. - - :param native_enum: Use the database's native ENUM type when - available. Defaults to True. When False, uses VARCHAR + check - constraint for all backends. - - :param schema: Schema name of this type. For types that exist on the - target database as an independent schema construct (Postgresql), - this parameter specifies the named schema in which the type is - present. - - .. note:: - - The ``schema`` of the :class:`.Enum` type does not - by default make use of the ``schema`` established on the - owning :class:`.Table`. If this behavior is desired, - set the ``inherit_schema`` flag to ``True``. - - :param quote: Set explicit quoting preferences for the type's name. - - :param inherit_schema: When ``True``, the "schema" from the owning - :class:`.Table` will be copied to the "schema" attribute of this - :class:`.Enum`, replacing whatever value was passed for the - ``schema`` attribute. This also takes effect when using the - :meth:`.Table.tometadata` operation. - - .. versionadded:: 0.8 - - """ - self.enums = enums - self.native_enum = kw.pop('native_enum', True) - convert_unicode = kw.pop('convert_unicode', None) - if convert_unicode is None: - for e in enums: - if isinstance(e, util.text_type): - convert_unicode = True - break - else: - convert_unicode = False - - if self.enums: - length = max(len(x) for x in self.enums) - else: - length = 0 - String.__init__(self, - length=length, - convert_unicode=convert_unicode, - ) - SchemaType.__init__(self, **kw) - - def __repr__(self): - return util.generic_repr(self, - additional_kw=[('native_enum', True)], - to_inspect=[Enum, SchemaType], - ) - - def _should_create_constraint(self, compiler): - return not self.native_enum or \ - not compiler.dialect.supports_native_enum - - @util.dependencies("sqlalchemy.sql.schema") - def _set_table(self, schema, column, table): - if self.native_enum: - SchemaType._set_table(self, column, table) - - e = schema.CheckConstraint( - type_coerce(column, self).in_(self.enums), - name=_defer_name(self.name), - _create_rule=util.portable_instancemethod( - self._should_create_constraint), - _type_bound=True - ) - assert e.table is table - - def adapt(self, impltype, **kw): - schema = kw.pop('schema', self.schema) - metadata = kw.pop('metadata', self.metadata) - _create_events = kw.pop('_create_events', False) - if issubclass(impltype, Enum): - return impltype(name=self.name, - schema=schema, - metadata=metadata, - convert_unicode=self.convert_unicode, - native_enum=self.native_enum, - inherit_schema=self.inherit_schema, - _create_events=_create_events, - *self.enums, - **kw) - else: - # TODO: why would we be here? - return super(Enum, self).adapt(impltype, **kw) - - -class PickleType(TypeDecorator): - - """Holds Python objects, which are serialized using pickle. - - PickleType builds upon the Binary type to apply Python's - ``pickle.dumps()`` to incoming objects, and ``pickle.loads()`` on - the way out, allowing any pickleable Python object to be stored as - a serialized binary field. - - To allow ORM change events to propagate for elements associated - with :class:`.PickleType`, see :ref:`mutable_toplevel`. - - """ - - impl = LargeBinary - - def __init__(self, protocol=pickle.HIGHEST_PROTOCOL, - pickler=None, comparator=None): - """ - Construct a PickleType. - - :param protocol: defaults to ``pickle.HIGHEST_PROTOCOL``. - - :param pickler: defaults to cPickle.pickle or pickle.pickle if - cPickle is not available. May be any object with - pickle-compatible ``dumps` and ``loads`` methods. - - :param comparator: a 2-arg callable predicate used - to compare values of this type. If left as ``None``, - the Python "equals" operator is used to compare values. - - """ - self.protocol = protocol - self.pickler = pickler or pickle - self.comparator = comparator - super(PickleType, self).__init__() - - def __reduce__(self): - return PickleType, (self.protocol, - None, - self.comparator) - - def bind_processor(self, dialect): - impl_processor = self.impl.bind_processor(dialect) - dumps = self.pickler.dumps - protocol = self.protocol - if impl_processor: - def process(value): - if value is not None: - value = dumps(value, protocol) - return impl_processor(value) - else: - def process(value): - if value is not None: - value = dumps(value, protocol) - return value - return process - - def result_processor(self, dialect, coltype): - impl_processor = self.impl.result_processor(dialect, coltype) - loads = self.pickler.loads - if impl_processor: - def process(value): - value = impl_processor(value) - if value is None: - return None - return loads(value) - else: - def process(value): - if value is None: - return None - return loads(value) - return process - - def compare_values(self, x, y): - if self.comparator: - return self.comparator(x, y) - else: - return x == y - - -class Boolean(TypeEngine, SchemaType): - - """A bool datatype. - - Boolean typically uses BOOLEAN or SMALLINT on the DDL side, and on - the Python side deals in ``True`` or ``False``. - - """ - - __visit_name__ = 'boolean' - - def __init__( - self, create_constraint=True, name=None, _create_events=True): - """Construct a Boolean. - - :param create_constraint: defaults to True. If the boolean - is generated as an int/smallint, also create a CHECK constraint - on the table that ensures 1 or 0 as a value. - - :param name: if a CHECK constraint is generated, specify - the name of the constraint. - - """ - self.create_constraint = create_constraint - self.name = name - self._create_events = _create_events - - def _should_create_constraint(self, compiler): - return not compiler.dialect.supports_native_boolean - - @util.dependencies("sqlalchemy.sql.schema") - def _set_table(self, schema, column, table): - if not self.create_constraint: - return - - e = schema.CheckConstraint( - type_coerce(column, self).in_([0, 1]), - name=_defer_name(self.name), - _create_rule=util.portable_instancemethod( - self._should_create_constraint), - _type_bound=True - ) - assert e.table is table - - @property - def python_type(self): - return bool - - def literal_processor(self, dialect): - if dialect.supports_native_boolean: - def process(value): - return "true" if value else "false" - else: - def process(value): - return str(1 if value else 0) - return process - - def bind_processor(self, dialect): - if dialect.supports_native_boolean: - return None - else: - return processors.boolean_to_int - - def result_processor(self, dialect, coltype): - if dialect.supports_native_boolean: - return None - else: - return processors.int_to_boolean - - -class Interval(_DateAffinity, TypeDecorator): - - """A type for ``datetime.timedelta()`` objects. - - The Interval type deals with ``datetime.timedelta`` objects. In - PostgreSQL, the native ``INTERVAL`` type is used; for others, the - value is stored as a date which is relative to the "epoch" - (Jan. 1, 1970). - - Note that the ``Interval`` type does not currently provide date arithmetic - operations on platforms which do not support interval types natively. Such - operations usually require transformation of both sides of the expression - (such as, conversion of both sides into integer epoch values first) which - currently is a manual procedure (such as via - :attr:`~sqlalchemy.sql.expression.func`). - - """ - - impl = DateTime - epoch = dt.datetime.utcfromtimestamp(0) - - def __init__(self, native=True, - second_precision=None, - day_precision=None): - """Construct an Interval object. - - :param native: when True, use the actual - INTERVAL type provided by the database, if - supported (currently Postgresql, Oracle). - Otherwise, represent the interval data as - an epoch value regardless. - - :param second_precision: For native interval types - which support a "fractional seconds precision" parameter, - i.e. Oracle and Postgresql - - :param day_precision: for native interval types which - support a "day precision" parameter, i.e. Oracle. - - """ - super(Interval, self).__init__() - self.native = native - self.second_precision = second_precision - self.day_precision = day_precision - - def adapt(self, cls, **kw): - if self.native and hasattr(cls, '_adapt_from_generic_interval'): - return cls._adapt_from_generic_interval(self, **kw) - else: - return self.__class__( - native=self.native, - second_precision=self.second_precision, - day_precision=self.day_precision, - **kw) - - @property - def python_type(self): - return dt.timedelta - - def bind_processor(self, dialect): - impl_processor = self.impl.bind_processor(dialect) - epoch = self.epoch - if impl_processor: - def process(value): - if value is not None: - value = epoch + value - return impl_processor(value) - else: - def process(value): - if value is not None: - value = epoch + value - return value - return process - - def result_processor(self, dialect, coltype): - impl_processor = self.impl.result_processor(dialect, coltype) - epoch = self.epoch - if impl_processor: - def process(value): - value = impl_processor(value) - if value is None: - return None - return value - epoch - else: - def process(value): - if value is None: - return None - return value - epoch - return process - - @util.memoized_property - def _expression_adaptations(self): - return { - operators.add: { - Date: DateTime, - Interval: self.__class__, - DateTime: DateTime, - Time: Time, - }, - operators.sub: { - Interval: self.__class__ - }, - operators.mul: { - Numeric: self.__class__ - }, - operators.truediv: { - Numeric: self.__class__ - }, - operators.div: { - Numeric: self.__class__ - } - } - - @property - def _type_affinity(self): - return Interval - - def coerce_compared_value(self, op, value): - """See :meth:`.TypeEngine.coerce_compared_value` for a description.""" - - return self.impl.coerce_compared_value(op, value) - - -class REAL(Float): - - """The SQL REAL type.""" - - __visit_name__ = 'REAL' - - -class FLOAT(Float): - - """The SQL FLOAT type.""" - - __visit_name__ = 'FLOAT' - - -class NUMERIC(Numeric): - - """The SQL NUMERIC type.""" - - __visit_name__ = 'NUMERIC' - - -class DECIMAL(Numeric): - - """The SQL DECIMAL type.""" - - __visit_name__ = 'DECIMAL' - - -class INTEGER(Integer): - - """The SQL INT or INTEGER type.""" - - __visit_name__ = 'INTEGER' -INT = INTEGER - - -class SMALLINT(SmallInteger): - - """The SQL SMALLINT type.""" - - __visit_name__ = 'SMALLINT' - - -class BIGINT(BigInteger): - - """The SQL BIGINT type.""" - - __visit_name__ = 'BIGINT' - - -class TIMESTAMP(DateTime): - - """The SQL TIMESTAMP type.""" - - __visit_name__ = 'TIMESTAMP' - - def get_dbapi_type(self, dbapi): - return dbapi.TIMESTAMP - - -class DATETIME(DateTime): - - """The SQL DATETIME type.""" - - __visit_name__ = 'DATETIME' - - -class DATE(Date): - - """The SQL DATE type.""" - - __visit_name__ = 'DATE' - - -class TIME(Time): - - """The SQL TIME type.""" - - __visit_name__ = 'TIME' - - -class TEXT(Text): - - """The SQL TEXT type.""" - - __visit_name__ = 'TEXT' - - -class CLOB(Text): - - """The CLOB type. - - This type is found in Oracle and Informix. - """ - - __visit_name__ = 'CLOB' - - -class VARCHAR(String): - - """The SQL VARCHAR type.""" - - __visit_name__ = 'VARCHAR' - - -class NVARCHAR(Unicode): - - """The SQL NVARCHAR type.""" - - __visit_name__ = 'NVARCHAR' - - -class CHAR(String): - - """The SQL CHAR type.""" - - __visit_name__ = 'CHAR' - - -class NCHAR(Unicode): - - """The SQL NCHAR type.""" - - __visit_name__ = 'NCHAR' - - -class BLOB(LargeBinary): - - """The SQL BLOB type.""" - - __visit_name__ = 'BLOB' - - -class BINARY(_Binary): - - """The SQL BINARY type.""" - - __visit_name__ = 'BINARY' - - -class VARBINARY(_Binary): - - """The SQL VARBINARY type.""" - - __visit_name__ = 'VARBINARY' - - -class BOOLEAN(Boolean): - - """The SQL BOOLEAN type.""" - - __visit_name__ = 'BOOLEAN' - - -class NullType(TypeEngine): - - """An unknown type. - - :class:`.NullType` is used as a default type for those cases where - a type cannot be determined, including: - - * During table reflection, when the type of a column is not recognized - by the :class:`.Dialect` - * When constructing SQL expressions using plain Python objects of - unknown types (e.g. ``somecolumn == my_special_object``) - * When a new :class:`.Column` is created, and the given type is passed - as ``None`` or is not passed at all. - - The :class:`.NullType` can be used within SQL expression invocation - without issue, it just has no behavior either at the expression - construction level or at the bind-parameter/result processing level. - :class:`.NullType` will result in a :exc:`.CompileError` if the compiler - is asked to render the type itself, such as if it is used in a - :func:`.cast` operation or within a schema creation operation such as that - invoked by :meth:`.MetaData.create_all` or the :class:`.CreateTable` - construct. - - """ - __visit_name__ = 'null' - - _isnull = True - - def literal_processor(self, dialect): - def process(value): - return "NULL" - return process - - class Comparator(TypeEngine.Comparator): - - def _adapt_expression(self, op, other_comparator): - if isinstance(other_comparator, NullType.Comparator) or \ - not operators.is_commutative(op): - return op, self.expr.type - else: - return other_comparator._adapt_expression(op, self) - comparator_factory = Comparator - - -class MatchType(Boolean): - """Refers to the return type of the MATCH operator. - - As the :meth:`.ColumnOperators.match` is probably the most open-ended - operator in generic SQLAlchemy Core, we can't assume the return type - at SQL evaluation time, as MySQL returns a floating point, not a boolean, - and other backends might do something different. So this type - acts as a placeholder, currently subclassing :class:`.Boolean`. - The type allows dialects to inject result-processing functionality - if needed, and on MySQL will return floating-point values. - - .. versionadded:: 1.0.0 - - """ - -NULLTYPE = NullType() -BOOLEANTYPE = Boolean() -STRINGTYPE = String() -INTEGERTYPE = Integer() -MATCHTYPE = MatchType() - -_type_map = { - int: Integer(), - float: Numeric(), - bool: BOOLEANTYPE, - decimal.Decimal: Numeric(), - dt.date: Date(), - dt.datetime: DateTime(), - dt.time: Time(), - dt.timedelta: Interval(), - util.NoneType: NULLTYPE -} - -if util.py3k: - _type_map[bytes] = LargeBinary() - _type_map[str] = Unicode() -else: - _type_map[unicode] = Unicode() - _type_map[str] = String() - - -# back-assign to type_api -from . import type_api -type_api.BOOLEANTYPE = BOOLEANTYPE -type_api.STRINGTYPE = STRINGTYPE -type_api.INTEGERTYPE = INTEGERTYPE -type_api.NULLTYPE = NULLTYPE -type_api.MATCHTYPE = MATCHTYPE -type_api._type_map = _type_map - -TypeEngine.Comparator.BOOLEANTYPE = BOOLEANTYPE diff --git a/python/sqlalchemy/sql/type_api.py b/python/sqlalchemy/sql/type_api.py deleted file mode 100644 index a55eed98..00000000 --- a/python/sqlalchemy/sql/type_api.py +++ /dev/null @@ -1,1166 +0,0 @@ -# sql/types_api.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Base types API. - -""" - - -from .. import exc, util -from . import operators -from .visitors import Visitable, VisitableType - -# these are back-assigned by sqltypes. -BOOLEANTYPE = None -INTEGERTYPE = None -NULLTYPE = None -STRINGTYPE = None -MATCHTYPE = None - - -class TypeEngine(Visitable): - """The ultimate base class for all SQL datatypes. - - Common subclasses of :class:`.TypeEngine` include - :class:`.String`, :class:`.Integer`, and :class:`.Boolean`. - - For an overview of the SQLAlchemy typing system, see - :ref:`types_toplevel`. - - .. seealso:: - - :ref:`types_toplevel` - - """ - - _sqla_type = True - _isnull = False - - class Comparator(operators.ColumnOperators): - """Base class for custom comparison operations defined at the - type level. See :attr:`.TypeEngine.comparator_factory`. - - - """ - __slots__ = 'expr', 'type' - - default_comparator = None - - def __init__(self, expr): - self.expr = expr - self.type = expr.type - - @util.dependencies('sqlalchemy.sql.default_comparator') - def operate(self, default_comparator, op, *other, **kwargs): - o = default_comparator.operator_lookup[op.__name__] - return o[0](self.expr, op, *(other + o[1:]), **kwargs) - - @util.dependencies('sqlalchemy.sql.default_comparator') - def reverse_operate(self, default_comparator, op, other, **kwargs): - o = default_comparator.operator_lookup[op.__name__] - return o[0](self.expr, op, other, - reverse=True, *o[1:], **kwargs) - - def _adapt_expression(self, op, other_comparator): - """evaluate the return type of , - and apply any adaptations to the given operator. - - This method determines the type of a resulting binary expression - given two source types and an operator. For example, two - :class:`.Column` objects, both of the type :class:`.Integer`, will - produce a :class:`.BinaryExpression` that also has the type - :class:`.Integer` when compared via the addition (``+``) operator. - However, using the addition operator with an :class:`.Integer` - and a :class:`.Date` object will produce a :class:`.Date`, assuming - "days delta" behavior by the database (in reality, most databases - other than Postgresql don't accept this particular operation). - - The method returns a tuple of the form , . - The resulting operator and type will be those applied to the - resulting :class:`.BinaryExpression` as the final operator and the - right-hand side of the expression. - - Note that only a subset of operators make usage of - :meth:`._adapt_expression`, - including math operators and user-defined operators, but not - boolean comparison or special SQL keywords like MATCH or BETWEEN. - - """ - return op, other_comparator.type - - def __reduce__(self): - return _reconstitute_comparator, (self.expr, ) - - hashable = True - """Flag, if False, means values from this type aren't hashable. - - Used by the ORM when uniquing result lists. - - """ - - comparator_factory = Comparator - """A :class:`.TypeEngine.Comparator` class which will apply - to operations performed by owning :class:`.ColumnElement` objects. - - The :attr:`.comparator_factory` attribute is a hook consulted by - the core expression system when column and SQL expression operations - are performed. When a :class:`.TypeEngine.Comparator` class is - associated with this attribute, it allows custom re-definition of - all existing operators, as well as definition of new operators. - Existing operators include those provided by Python operator overloading - such as :meth:`.operators.ColumnOperators.__add__` and - :meth:`.operators.ColumnOperators.__eq__`, - those provided as standard - attributes of :class:`.operators.ColumnOperators` such as - :meth:`.operators.ColumnOperators.like` - and :meth:`.operators.ColumnOperators.in_`. - - Rudimentary usage of this hook is allowed through simple subclassing - of existing types, or alternatively by using :class:`.TypeDecorator`. - See the documentation section :ref:`types_operators` for examples. - - .. versionadded:: 0.8 The expression system was enhanced to support - customization of operators on a per-type level. - - """ - - def compare_against_backend(self, dialect, conn_type): - """Compare this type against the given backend type. - - This function is currently not implemented for SQLAlchemy - types, and for all built in types will return ``None``. However, - it can be implemented by a user-defined type - where it can be consumed by schema comparison tools such as - Alembic autogenerate. - - A future release of SQLAlchemy will potentially impement this method - for builtin types as well. - - The function should return True if this type is equivalent to the - given type; the type is typically reflected from the database - so should be database specific. The dialect in use is also - passed. It can also return False to assert that the type is - not equivalent. - - :param dialect: a :class:`.Dialect` that is involved in the comparison. - - :param conn_type: the type object reflected from the backend. - - .. versionadded:: 1.0.3 - - """ - return None - - def copy_value(self, value): - return value - - def literal_processor(self, dialect): - """Return a conversion function for processing literal values that are - to be rendered directly without using binds. - - This function is used when the compiler makes use of the - "literal_binds" flag, typically used in DDL generation as well - as in certain scenarios where backends don't accept bound parameters. - - .. versionadded:: 0.9.0 - - """ - return None - - def bind_processor(self, dialect): - """Return a conversion function for processing bind values. - - Returns a callable which will receive a bind parameter value - as the sole positional argument and will return a value to - send to the DB-API. - - If processing is not necessary, the method should return ``None``. - - :param dialect: Dialect instance in use. - - """ - return None - - def result_processor(self, dialect, coltype): - """Return a conversion function for processing result row values. - - Returns a callable which will receive a result row column - value as the sole positional argument and will return a value - to return to the user. - - If processing is not necessary, the method should return ``None``. - - :param dialect: Dialect instance in use. - - :param coltype: DBAPI coltype argument received in cursor.description. - - """ - return None - - def column_expression(self, colexpr): - """Given a SELECT column expression, return a wrapping SQL expression. - - This is typically a SQL function that wraps a column expression - as rendered in the columns clause of a SELECT statement. - It is used for special data types that require - columns to be wrapped in some special database function in order - to coerce the value before being sent back to the application. - It is the SQL analogue of the :meth:`.TypeEngine.result_processor` - method. - - The method is evaluated at statement compile time, as opposed - to statement construction time. - - See also: - - :ref:`types_sql_value_processing` - - """ - - return None - - @util.memoized_property - def _has_column_expression(self): - """memoized boolean, check if column_expression is implemented. - - Allows the method to be skipped for the vast majority of expression - types that don't use this feature. - - """ - - return self.__class__.column_expression.__code__ \ - is not TypeEngine.column_expression.__code__ - - def bind_expression(self, bindvalue): - """"Given a bind value (i.e. a :class:`.BindParameter` instance), - return a SQL expression in its place. - - This is typically a SQL function that wraps the existing bound - parameter within the statement. It is used for special data types - that require literals being wrapped in some special database function - in order to coerce an application-level value into a database-specific - format. It is the SQL analogue of the - :meth:`.TypeEngine.bind_processor` method. - - The method is evaluated at statement compile time, as opposed - to statement construction time. - - Note that this method, when implemented, should always return - the exact same structure, without any conditional logic, as it - may be used in an executemany() call against an arbitrary number - of bound parameter sets. - - See also: - - :ref:`types_sql_value_processing` - - """ - return None - - @util.memoized_property - def _has_bind_expression(self): - """memoized boolean, check if bind_expression is implemented. - - Allows the method to be skipped for the vast majority of expression - types that don't use this feature. - - """ - - return self.__class__.bind_expression.__code__ \ - is not TypeEngine.bind_expression.__code__ - - def compare_values(self, x, y): - """Compare two values for equality.""" - - return x == y - - def get_dbapi_type(self, dbapi): - """Return the corresponding type object from the underlying DB-API, if - any. - - This can be useful for calling ``setinputsizes()``, for example. - - """ - return None - - @property - def python_type(self): - """Return the Python type object expected to be returned - by instances of this type, if known. - - Basically, for those types which enforce a return type, - or are known across the board to do such for all common - DBAPIs (like ``int`` for example), will return that type. - - If a return type is not defined, raises - ``NotImplementedError``. - - Note that any type also accommodates NULL in SQL which - means you can also get back ``None`` from any type - in practice. - - """ - raise NotImplementedError() - - def with_variant(self, type_, dialect_name): - """Produce a new type object that will utilize the given - type when applied to the dialect of the given name. - - e.g.:: - - from sqlalchemy.types import String - from sqlalchemy.dialects import mysql - - s = String() - - s = s.with_variant(mysql.VARCHAR(collation='foo'), 'mysql') - - The construction of :meth:`.TypeEngine.with_variant` is always - from the "fallback" type to that which is dialect specific. - The returned type is an instance of :class:`.Variant`, which - itself provides a :meth:`.Variant.with_variant` - that can be called repeatedly. - - :param type_: a :class:`.TypeEngine` that will be selected - as a variant from the originating type, when a dialect - of the given name is in use. - :param dialect_name: base name of the dialect which uses - this type. (i.e. ``'postgresql'``, ``'mysql'``, etc.) - - .. versionadded:: 0.7.2 - - """ - return Variant(self, {dialect_name: to_instance(type_)}) - - @util.memoized_property - def _type_affinity(self): - """Return a rudimental 'affinity' value expressing the general class - of type.""" - - typ = None - for t in self.__class__.__mro__: - if t in (TypeEngine, UserDefinedType): - return typ - elif issubclass(t, (TypeEngine, UserDefinedType)): - typ = t - else: - return self.__class__ - - def dialect_impl(self, dialect): - """Return a dialect-specific implementation for this - :class:`.TypeEngine`. - - """ - try: - return dialect._type_memos[self]['impl'] - except KeyError: - return self._dialect_info(dialect)['impl'] - - def _cached_literal_processor(self, dialect): - """Return a dialect-specific literal processor for this type.""" - try: - return dialect._type_memos[self]['literal'] - except KeyError: - d = self._dialect_info(dialect) - d['literal'] = lp = d['impl'].literal_processor(dialect) - return lp - - def _cached_bind_processor(self, dialect): - """Return a dialect-specific bind processor for this type.""" - - try: - return dialect._type_memos[self]['bind'] - except KeyError: - d = self._dialect_info(dialect) - d['bind'] = bp = d['impl'].bind_processor(dialect) - return bp - - def _cached_result_processor(self, dialect, coltype): - """Return a dialect-specific result processor for this type.""" - - try: - return dialect._type_memos[self][coltype] - except KeyError: - d = self._dialect_info(dialect) - # key assumption: DBAPI type codes are - # constants. Else this dictionary would - # grow unbounded. - d[coltype] = rp = d['impl'].result_processor(dialect, coltype) - return rp - - def _dialect_info(self, dialect): - """Return a dialect-specific registry which - caches a dialect-specific implementation, bind processing - function, and one or more result processing functions.""" - - if self in dialect._type_memos: - return dialect._type_memos[self] - else: - impl = self._gen_dialect_impl(dialect) - if impl is self: - impl = self.adapt(type(self)) - # this can't be self, else we create a cycle - assert impl is not self - dialect._type_memos[self] = d = {'impl': impl} - return d - - def _gen_dialect_impl(self, dialect): - return dialect.type_descriptor(self) - - def adapt(self, cls, **kw): - """Produce an "adapted" form of this type, given an "impl" class - to work with. - - This method is used internally to associate generic - types with "implementation" types that are specific to a particular - dialect. - """ - return util.constructor_copy(self, cls, **kw) - - def coerce_compared_value(self, op, value): - """Suggest a type for a 'coerced' Python value in an expression. - - Given an operator and value, gives the type a chance - to return a type which the value should be coerced into. - - The default behavior here is conservative; if the right-hand - side is already coerced into a SQL type based on its - Python type, it is usually left alone. - - End-user functionality extension here should generally be via - :class:`.TypeDecorator`, which provides more liberal behavior in that - it defaults to coercing the other side of the expression into this - type, thus applying special Python conversions above and beyond those - needed by the DBAPI to both ides. It also provides the public method - :meth:`.TypeDecorator.coerce_compared_value` which is intended for - end-user customization of this behavior. - - """ - _coerced_type = _type_map.get(type(value), NULLTYPE) - if _coerced_type is NULLTYPE or _coerced_type._type_affinity \ - is self._type_affinity: - return self - else: - return _coerced_type - - def _compare_type_affinity(self, other): - return self._type_affinity is other._type_affinity - - def compile(self, dialect=None): - """Produce a string-compiled form of this :class:`.TypeEngine`. - - When called with no arguments, uses a "default" dialect - to produce a string result. - - :param dialect: a :class:`.Dialect` instance. - - """ - # arg, return value is inconsistent with - # ClauseElement.compile()....this is a mistake. - - if not dialect: - dialect = self._default_dialect() - - return dialect.type_compiler.process(self) - - @util.dependencies("sqlalchemy.engine.default") - def _default_dialect(self, default): - if self.__class__.__module__.startswith("sqlalchemy.dialects"): - tokens = self.__class__.__module__.split(".")[0:3] - mod = ".".join(tokens) - return getattr(__import__(mod).dialects, tokens[-1]).dialect() - else: - return default.DefaultDialect() - - def __str__(self): - if util.py2k: - return unicode(self.compile()).\ - encode('ascii', 'backslashreplace') - else: - return str(self.compile()) - - def __repr__(self): - return util.generic_repr(self) - - -class VisitableCheckKWArg(util.EnsureKWArgType, VisitableType): - pass - - -class UserDefinedType(util.with_metaclass(VisitableCheckKWArg, TypeEngine)): - """Base for user defined types. - - This should be the base of new types. Note that - for most cases, :class:`.TypeDecorator` is probably - more appropriate:: - - import sqlalchemy.types as types - - class MyType(types.UserDefinedType): - def __init__(self, precision = 8): - self.precision = precision - - def get_col_spec(self, **kw): - return "MYTYPE(%s)" % self.precision - - def bind_processor(self, dialect): - def process(value): - return value - return process - - def result_processor(self, dialect, coltype): - def process(value): - return value - return process - - Once the type is made, it's immediately usable:: - - table = Table('foo', meta, - Column('id', Integer, primary_key=True), - Column('data', MyType(16)) - ) - - The ``get_col_spec()`` method will in most cases receive a keyword - argument ``type_expression`` which refers to the owning expression - of the type as being compiled, such as a :class:`.Column` or - :func:`.cast` construct. This keyword is only sent if the method - accepts keyword arguments (e.g. ``**kw``) in its argument signature; - introspection is used to check for this in order to support legacy - forms of this function. - - .. versionadded:: 1.0.0 the owning expression is passed to - the ``get_col_spec()`` method via the keyword argument - ``type_expression``, if it receives ``**kw`` in its signature. - - """ - __visit_name__ = "user_defined" - - ensure_kwarg = 'get_col_spec' - - class Comparator(TypeEngine.Comparator): - __slots__ = () - - def _adapt_expression(self, op, other_comparator): - if hasattr(self.type, 'adapt_operator'): - util.warn_deprecated( - "UserDefinedType.adapt_operator is deprecated. Create " - "a UserDefinedType.Comparator subclass instead which " - "generates the desired expression constructs, given a " - "particular operator." - ) - return self.type.adapt_operator(op), self.type - else: - return op, self.type - - comparator_factory = Comparator - - def coerce_compared_value(self, op, value): - """Suggest a type for a 'coerced' Python value in an expression. - - Default behavior for :class:`.UserDefinedType` is the - same as that of :class:`.TypeDecorator`; by default it returns - ``self``, assuming the compared value should be coerced into - the same type as this one. See - :meth:`.TypeDecorator.coerce_compared_value` for more detail. - - .. versionchanged:: 0.8 :meth:`.UserDefinedType.coerce_compared_value` - now returns ``self`` by default, rather than falling onto the - more fundamental behavior of - :meth:`.TypeEngine.coerce_compared_value`. - - """ - - return self - - -class TypeDecorator(TypeEngine): - """Allows the creation of types which add additional functionality - to an existing type. - - This method is preferred to direct subclassing of SQLAlchemy's - built-in types as it ensures that all required functionality of - the underlying type is kept in place. - - Typical usage:: - - import sqlalchemy.types as types - - class MyType(types.TypeDecorator): - '''Prefixes Unicode values with "PREFIX:" on the way in and - strips it off on the way out. - ''' - - impl = types.Unicode - - def process_bind_param(self, value, dialect): - return "PREFIX:" + value - - def process_result_value(self, value, dialect): - return value[7:] - - def copy(self): - return MyType(self.impl.length) - - The class-level "impl" attribute is required, and can reference any - TypeEngine class. Alternatively, the load_dialect_impl() method - can be used to provide different type classes based on the dialect - given; in this case, the "impl" variable can reference - ``TypeEngine`` as a placeholder. - - Types that receive a Python type that isn't similar to the ultimate type - used may want to define the :meth:`TypeDecorator.coerce_compared_value` - method. This is used to give the expression system a hint when coercing - Python objects into bind parameters within expressions. Consider this - expression:: - - mytable.c.somecol + datetime.date(2009, 5, 15) - - Above, if "somecol" is an ``Integer`` variant, it makes sense that - we're doing date arithmetic, where above is usually interpreted - by databases as adding a number of days to the given date. - The expression system does the right thing by not attempting to - coerce the "date()" value into an integer-oriented bind parameter. - - However, in the case of ``TypeDecorator``, we are usually changing an - incoming Python type to something new - ``TypeDecorator`` by default will - "coerce" the non-typed side to be the same type as itself. Such as below, - we define an "epoch" type that stores a date value as an integer:: - - class MyEpochType(types.TypeDecorator): - impl = types.Integer - - epoch = datetime.date(1970, 1, 1) - - def process_bind_param(self, value, dialect): - return (value - self.epoch).days - - def process_result_value(self, value, dialect): - return self.epoch + timedelta(days=value) - - Our expression of ``somecol + date`` with the above type will coerce the - "date" on the right side to also be treated as ``MyEpochType``. - - This behavior can be overridden via the - :meth:`~TypeDecorator.coerce_compared_value` method, which returns a type - that should be used for the value of the expression. Below we set it such - that an integer value will be treated as an ``Integer``, and any other - value is assumed to be a date and will be treated as a ``MyEpochType``:: - - def coerce_compared_value(self, op, value): - if isinstance(value, int): - return Integer() - else: - return self - - """ - - __visit_name__ = "type_decorator" - - def __init__(self, *args, **kwargs): - """Construct a :class:`.TypeDecorator`. - - Arguments sent here are passed to the constructor - of the class assigned to the ``impl`` class level attribute, - assuming the ``impl`` is a callable, and the resulting - object is assigned to the ``self.impl`` instance attribute - (thus overriding the class attribute of the same name). - - If the class level ``impl`` is not a callable (the unusual case), - it will be assigned to the same instance attribute 'as-is', - ignoring those arguments passed to the constructor. - - Subclasses can override this to customize the generation - of ``self.impl`` entirely. - - """ - - if not hasattr(self.__class__, 'impl'): - raise AssertionError("TypeDecorator implementations " - "require a class-level variable " - "'impl' which refers to the class of " - "type being decorated") - self.impl = to_instance(self.__class__.impl, *args, **kwargs) - - coerce_to_is_types = (util.NoneType, ) - """Specify those Python types which should be coerced at the expression - level to "IS " when compared using ``==`` (and same for - ``IS NOT`` in conjunction with ``!=``. - - For most SQLAlchemy types, this includes ``NoneType``, as well as - ``bool``. - - :class:`.TypeDecorator` modifies this list to only include ``NoneType``, - as typedecorator implementations that deal with boolean types are common. - - Custom :class:`.TypeDecorator` classes can override this attribute to - return an empty tuple, in which case no values will be coerced to - constants. - - ..versionadded:: 0.8.2 - Added :attr:`.TypeDecorator.coerce_to_is_types` to allow for easier - control of ``__eq__()`` ``__ne__()`` operations. - - """ - - class Comparator(TypeEngine.Comparator): - __slots__ = () - - def operate(self, op, *other, **kwargs): - kwargs['_python_is_types'] = self.expr.type.coerce_to_is_types - return super(TypeDecorator.Comparator, self).operate( - op, *other, **kwargs) - - def reverse_operate(self, op, other, **kwargs): - kwargs['_python_is_types'] = self.expr.type.coerce_to_is_types - return super(TypeDecorator.Comparator, self).reverse_operate( - op, other, **kwargs) - - @property - def comparator_factory(self): - if TypeDecorator.Comparator in self.impl.comparator_factory.__mro__: - return self.impl.comparator_factory - else: - return type("TDComparator", - (TypeDecorator.Comparator, - self.impl.comparator_factory), - {}) - - def _gen_dialect_impl(self, dialect): - """ - #todo - """ - adapted = dialect.type_descriptor(self) - if adapted is not self: - return adapted - - # otherwise adapt the impl type, link - # to a copy of this TypeDecorator and return - # that. - typedesc = self.load_dialect_impl(dialect).dialect_impl(dialect) - tt = self.copy() - if not isinstance(tt, self.__class__): - raise AssertionError('Type object %s does not properly ' - 'implement the copy() method, it must ' - 'return an object of type %s' % - (self, self.__class__)) - tt.impl = typedesc - return tt - - @property - def _type_affinity(self): - """ - #todo - """ - return self.impl._type_affinity - - def type_engine(self, dialect): - """Return a dialect-specific :class:`.TypeEngine` instance - for this :class:`.TypeDecorator`. - - In most cases this returns a dialect-adapted form of - the :class:`.TypeEngine` type represented by ``self.impl``. - Makes usage of :meth:`dialect_impl` but also traverses - into wrapped :class:`.TypeDecorator` instances. - Behavior can be customized here by overriding - :meth:`load_dialect_impl`. - - """ - adapted = dialect.type_descriptor(self) - if not isinstance(adapted, type(self)): - return adapted - elif isinstance(self.impl, TypeDecorator): - return self.impl.type_engine(dialect) - else: - return self.load_dialect_impl(dialect) - - def load_dialect_impl(self, dialect): - """Return a :class:`.TypeEngine` object corresponding to a dialect. - - This is an end-user override hook that can be used to provide - differing types depending on the given dialect. It is used - by the :class:`.TypeDecorator` implementation of :meth:`type_engine` - to help determine what type should ultimately be returned - for a given :class:`.TypeDecorator`. - - By default returns ``self.impl``. - - """ - return self.impl - - def __getattr__(self, key): - """Proxy all other undefined accessors to the underlying - implementation.""" - return getattr(self.impl, key) - - def process_literal_param(self, value, dialect): - """Receive a literal parameter value to be rendered inline within - a statement. - - This method is used when the compiler renders a - literal value without using binds, typically within DDL - such as in the "server default" of a column or an expression - within a CHECK constraint. - - The returned string will be rendered into the output string. - - .. versionadded:: 0.9.0 - - """ - raise NotImplementedError() - - def process_bind_param(self, value, dialect): - """Receive a bound parameter value to be converted. - - Subclasses override this method to return the - value that should be passed along to the underlying - :class:`.TypeEngine` object, and from there to the - DBAPI ``execute()`` method. - - The operation could be anything desired to perform custom - behavior, such as transforming or serializing data. - This could also be used as a hook for validating logic. - - This operation should be designed with the reverse operation - in mind, which would be the process_result_value method of - this class. - - :param value: Data to operate upon, of any type expected by - this method in the subclass. Can be ``None``. - :param dialect: the :class:`.Dialect` in use. - - """ - - raise NotImplementedError() - - def process_result_value(self, value, dialect): - """Receive a result-row column value to be converted. - - Subclasses should implement this method to operate on data - fetched from the database. - - Subclasses override this method to return the - value that should be passed back to the application, - given a value that is already processed by - the underlying :class:`.TypeEngine` object, originally - from the DBAPI cursor method ``fetchone()`` or similar. - - The operation could be anything desired to perform custom - behavior, such as transforming or serializing data. - This could also be used as a hook for validating logic. - - :param value: Data to operate upon, of any type expected by - this method in the subclass. Can be ``None``. - :param dialect: the :class:`.Dialect` in use. - - This operation should be designed to be reversible by - the "process_bind_param" method of this class. - - """ - - raise NotImplementedError() - - @util.memoized_property - def _has_bind_processor(self): - """memoized boolean, check if process_bind_param is implemented. - - Allows the base process_bind_param to raise - NotImplementedError without needing to test an expensive - exception throw. - - """ - - return self.__class__.process_bind_param.__code__ \ - is not TypeDecorator.process_bind_param.__code__ - - @util.memoized_property - def _has_literal_processor(self): - """memoized boolean, check if process_literal_param is implemented. - - - """ - - return self.__class__.process_literal_param.__code__ \ - is not TypeDecorator.process_literal_param.__code__ - - def literal_processor(self, dialect): - """Provide a literal processing function for the given - :class:`.Dialect`. - - Subclasses here will typically override - :meth:`.TypeDecorator.process_literal_param` instead of this method - directly. - - By default, this method makes use of - :meth:`.TypeDecorator.process_bind_param` if that method is - implemented, where :meth:`.TypeDecorator.process_literal_param` is - not. The rationale here is that :class:`.TypeDecorator` typically - deals with Python conversions of data that are above the layer of - database presentation. With the value converted by - :meth:`.TypeDecorator.process_bind_param`, the underlying type will - then handle whether it needs to be presented to the DBAPI as a bound - parameter or to the database as an inline SQL value. - - .. versionadded:: 0.9.0 - - """ - if self._has_literal_processor: - process_param = self.process_literal_param - elif self._has_bind_processor: - # the bind processor should normally be OK - # for TypeDecorator since it isn't doing DB-level - # handling, the handling here won't be different for bound vs. - # literals. - process_param = self.process_bind_param - else: - process_param = None - - if process_param: - impl_processor = self.impl.literal_processor(dialect) - if impl_processor: - def process(value): - return impl_processor(process_param(value, dialect)) - else: - def process(value): - return process_param(value, dialect) - - return process - else: - return self.impl.literal_processor(dialect) - - def bind_processor(self, dialect): - """Provide a bound value processing function for the - given :class:`.Dialect`. - - This is the method that fulfills the :class:`.TypeEngine` - contract for bound value conversion. :class:`.TypeDecorator` - will wrap a user-defined implementation of - :meth:`process_bind_param` here. - - User-defined code can override this method directly, - though its likely best to use :meth:`process_bind_param` so that - the processing provided by ``self.impl`` is maintained. - - :param dialect: Dialect instance in use. - - This method is the reverse counterpart to the - :meth:`result_processor` method of this class. - - """ - if self._has_bind_processor: - process_param = self.process_bind_param - impl_processor = self.impl.bind_processor(dialect) - if impl_processor: - def process(value): - return impl_processor(process_param(value, dialect)) - - else: - def process(value): - return process_param(value, dialect) - - return process - else: - return self.impl.bind_processor(dialect) - - @util.memoized_property - def _has_result_processor(self): - """memoized boolean, check if process_result_value is implemented. - - Allows the base process_result_value to raise - NotImplementedError without needing to test an expensive - exception throw. - - """ - return self.__class__.process_result_value.__code__ \ - is not TypeDecorator.process_result_value.__code__ - - def result_processor(self, dialect, coltype): - """Provide a result value processing function for the given - :class:`.Dialect`. - - This is the method that fulfills the :class:`.TypeEngine` - contract for result value conversion. :class:`.TypeDecorator` - will wrap a user-defined implementation of - :meth:`process_result_value` here. - - User-defined code can override this method directly, - though its likely best to use :meth:`process_result_value` so that - the processing provided by ``self.impl`` is maintained. - - :param dialect: Dialect instance in use. - :param coltype: An SQLAlchemy data type - - This method is the reverse counterpart to the - :meth:`bind_processor` method of this class. - - """ - if self._has_result_processor: - process_value = self.process_result_value - impl_processor = self.impl.result_processor(dialect, - coltype) - if impl_processor: - def process(value): - return process_value(impl_processor(value), dialect) - - else: - def process(value): - return process_value(value, dialect) - - return process - else: - return self.impl.result_processor(dialect, coltype) - - def coerce_compared_value(self, op, value): - """Suggest a type for a 'coerced' Python value in an expression. - - By default, returns self. This method is called by - the expression system when an object using this type is - on the left or right side of an expression against a plain Python - object which does not yet have a SQLAlchemy type assigned:: - - expr = table.c.somecolumn + 35 - - Where above, if ``somecolumn`` uses this type, this method will - be called with the value ``operator.add`` - and ``35``. The return value is whatever SQLAlchemy type should - be used for ``35`` for this particular operation. - - """ - return self - - def copy(self): - """Produce a copy of this :class:`.TypeDecorator` instance. - - This is a shallow copy and is provided to fulfill part of - the :class:`.TypeEngine` contract. It usually does not - need to be overridden unless the user-defined :class:`.TypeDecorator` - has local state that should be deep-copied. - - """ - - instance = self.__class__.__new__(self.__class__) - instance.__dict__.update(self.__dict__) - return instance - - def get_dbapi_type(self, dbapi): - """Return the DBAPI type object represented by this - :class:`.TypeDecorator`. - - By default this calls upon :meth:`.TypeEngine.get_dbapi_type` of the - underlying "impl". - """ - return self.impl.get_dbapi_type(dbapi) - - def compare_values(self, x, y): - """Given two values, compare them for equality. - - By default this calls upon :meth:`.TypeEngine.compare_values` - of the underlying "impl", which in turn usually - uses the Python equals operator ``==``. - - This function is used by the ORM to compare - an original-loaded value with an intercepted - "changed" value, to determine if a net change - has occurred. - - """ - return self.impl.compare_values(x, y) - - def __repr__(self): - return util.generic_repr(self, to_inspect=self.impl) - - -class Variant(TypeDecorator): - """A wrapping type that selects among a variety of - implementations based on dialect in use. - - The :class:`.Variant` type is typically constructed - using the :meth:`.TypeEngine.with_variant` method. - - .. versionadded:: 0.7.2 - - .. seealso:: :meth:`.TypeEngine.with_variant` for an example of use. - - """ - - def __init__(self, base, mapping): - """Construct a new :class:`.Variant`. - - :param base: the base 'fallback' type - :param mapping: dictionary of string dialect names to - :class:`.TypeEngine` instances. - - """ - self.impl = base - self.mapping = mapping - - def load_dialect_impl(self, dialect): - if dialect.name in self.mapping: - return self.mapping[dialect.name] - else: - return self.impl - - def with_variant(self, type_, dialect_name): - """Return a new :class:`.Variant` which adds the given - type + dialect name to the mapping, in addition to the - mapping present in this :class:`.Variant`. - - :param type_: a :class:`.TypeEngine` that will be selected - as a variant from the originating type, when a dialect - of the given name is in use. - :param dialect_name: base name of the dialect which uses - this type. (i.e. ``'postgresql'``, ``'mysql'``, etc.) - - """ - - if dialect_name in self.mapping: - raise exc.ArgumentError( - "Dialect '%s' is already present in " - "the mapping for this Variant" % dialect_name) - mapping = self.mapping.copy() - mapping[dialect_name] = type_ - return Variant(self.impl, mapping) - - @property - def comparator_factory(self): - """express comparison behavior in terms of the base type""" - return self.impl.comparator_factory - - -def _reconstitute_comparator(expression): - return expression.comparator - - -def to_instance(typeobj, *arg, **kw): - if typeobj is None: - return NULLTYPE - - if util.callable(typeobj): - return typeobj(*arg, **kw) - else: - return typeobj - - -def adapt_type(typeobj, colspecs): - if isinstance(typeobj, type): - typeobj = typeobj() - for t in typeobj.__class__.__mro__[0:-1]: - try: - impltype = colspecs[t] - break - except KeyError: - pass - else: - # couldn't adapt - so just return the type itself - # (it may be a user-defined type) - return typeobj - # if we adapted the given generic type to a database-specific type, - # but it turns out the originally given "generic" type - # is actually a subclass of our resulting type, then we were already - # given a more specific type than that required; so use that. - if (issubclass(typeobj.__class__, impltype)): - return typeobj - return typeobj.adapt(impltype) diff --git a/python/sqlalchemy/sql/util.py b/python/sqlalchemy/sql/util.py deleted file mode 100644 index 8f502fc8..00000000 --- a/python/sqlalchemy/sql/util.py +++ /dev/null @@ -1,613 +0,0 @@ -# sql/util.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""High level utilities which build upon other modules here. - -""" - -from .. import exc, util -from .base import _from_objects, ColumnSet -from . import operators, visitors -from itertools import chain -from collections import deque - -from .elements import BindParameter, ColumnClause, ColumnElement, \ - Null, UnaryExpression, literal_column, Label, _label_reference, \ - _textual_label_reference -from .selectable import ScalarSelect, Join, FromClause, FromGrouping -from .schema import Column - -join_condition = util.langhelpers.public_factory( - Join._join_condition, - ".sql.util.join_condition") - -# names that are still being imported from the outside -from .annotation import _shallow_annotate, _deep_annotate, _deep_deannotate -from .elements import _find_columns -from .ddl import sort_tables - - -def find_join_source(clauses, join_to): - """Given a list of FROM clauses and a selectable, - return the first index and element from the list of - clauses which can be joined against the selectable. returns - None, None if no match is found. - - e.g.:: - - clause1 = table1.join(table2) - clause2 = table4.join(table5) - - join_to = table2.join(table3) - - find_join_source([clause1, clause2], join_to) == clause1 - - """ - - selectables = list(_from_objects(join_to)) - for i, f in enumerate(clauses): - for s in selectables: - if f.is_derived_from(s): - return i, f - else: - return None, None - - -def visit_binary_product(fn, expr): - """Produce a traversal of the given expression, delivering - column comparisons to the given function. - - The function is of the form:: - - def my_fn(binary, left, right) - - For each binary expression located which has a - comparison operator, the product of "left" and - "right" will be delivered to that function, - in terms of that binary. - - Hence an expression like:: - - and_( - (a + b) == q + func.sum(e + f), - j == r - ) - - would have the traversal:: - - a q - a e - a f - b q - b e - b f - j r - - That is, every combination of "left" and - "right" that doesn't further contain - a binary comparison is passed as pairs. - - """ - stack = [] - - def visit(element): - if isinstance(element, ScalarSelect): - # we don't want to dig into correlated subqueries, - # those are just column elements by themselves - yield element - elif element.__visit_name__ == 'binary' and \ - operators.is_comparison(element.operator): - stack.insert(0, element) - for l in visit(element.left): - for r in visit(element.right): - fn(stack[0], l, r) - stack.pop(0) - for elem in element.get_children(): - visit(elem) - else: - if isinstance(element, ColumnClause): - yield element - for elem in element.get_children(): - for e in visit(elem): - yield e - list(visit(expr)) - - -def find_tables(clause, check_columns=False, - include_aliases=False, include_joins=False, - include_selects=False, include_crud=False): - """locate Table objects within the given expression.""" - - tables = [] - _visitors = {} - - if include_selects: - _visitors['select'] = _visitors['compound_select'] = tables.append - - if include_joins: - _visitors['join'] = tables.append - - if include_aliases: - _visitors['alias'] = tables.append - - if include_crud: - _visitors['insert'] = _visitors['update'] = \ - _visitors['delete'] = lambda ent: tables.append(ent.table) - - if check_columns: - def visit_column(column): - tables.append(column.table) - _visitors['column'] = visit_column - - _visitors['table'] = tables.append - - visitors.traverse(clause, {'column_collections': False}, _visitors) - return tables - - -def unwrap_order_by(clause): - """Break up an 'order by' expression into individual column-expressions, - without DESC/ASC/NULLS FIRST/NULLS LAST""" - - cols = util.column_set() - stack = deque([clause]) - while stack: - t = stack.popleft() - if isinstance(t, ColumnElement) and \ - ( - not isinstance(t, UnaryExpression) or - not operators.is_ordering_modifier(t.modifier) - ): - if isinstance(t, _label_reference): - t = t.element - if isinstance(t, (_textual_label_reference)): - continue - cols.add(t) - else: - for c in t.get_children(): - stack.append(c) - return cols - - -def clause_is_present(clause, search): - """Given a target clause and a second to search within, return True - if the target is plainly present in the search without any - subqueries or aliases involved. - - Basically descends through Joins. - - """ - - for elem in surface_selectables(search): - if clause == elem: # use == here so that Annotated's compare - return True - else: - return False - - -def surface_selectables(clause): - stack = [clause] - while stack: - elem = stack.pop() - yield elem - if isinstance(elem, Join): - stack.extend((elem.left, elem.right)) - elif isinstance(elem, FromGrouping): - stack.append(elem.element) - - -def selectables_overlap(left, right): - """Return True if left/right have some overlapping selectable""" - - return bool( - set(surface_selectables(left)).intersection( - surface_selectables(right) - ) - ) - - -def bind_values(clause): - """Return an ordered list of "bound" values in the given clause. - - E.g.:: - - >>> expr = and_( - ... table.c.foo==5, table.c.foo==7 - ... ) - >>> bind_values(expr) - [5, 7] - """ - - v = [] - - def visit_bindparam(bind): - v.append(bind.effective_value) - - visitors.traverse(clause, {}, {'bindparam': visit_bindparam}) - return v - - -def _quote_ddl_expr(element): - if isinstance(element, util.string_types): - element = element.replace("'", "''") - return "'%s'" % element - else: - return repr(element) - - -class _repr_params(object): - """A string view of bound parameters, truncating - display to the given number of 'multi' parameter sets. - - """ - - def __init__(self, params, batches): - self.params = params - self.batches = batches - - def __repr__(self): - if isinstance(self.params, (list, tuple)) and \ - len(self.params) > self.batches and \ - isinstance(self.params[0], (list, dict, tuple)): - msg = " ... displaying %i of %i total bound parameter sets ... " - return ' '.join(( - repr(self.params[:self.batches - 2])[0:-1], - msg % (self.batches, len(self.params)), - repr(self.params[-2:])[1:] - )) - else: - return repr(self.params) - - -def adapt_criterion_to_null(crit, nulls): - """given criterion containing bind params, convert selected elements - to IS NULL. - - """ - - def visit_binary(binary): - if isinstance(binary.left, BindParameter) \ - and binary.left._identifying_key in nulls: - # reverse order if the NULL is on the left side - binary.left = binary.right - binary.right = Null() - binary.operator = operators.is_ - binary.negate = operators.isnot - elif isinstance(binary.right, BindParameter) \ - and binary.right._identifying_key in nulls: - binary.right = Null() - binary.operator = operators.is_ - binary.negate = operators.isnot - - return visitors.cloned_traverse(crit, {}, {'binary': visit_binary}) - - -def splice_joins(left, right, stop_on=None): - if left is None: - return right - - stack = [(right, None)] - - adapter = ClauseAdapter(left) - ret = None - while stack: - (right, prevright) = stack.pop() - if isinstance(right, Join) and right is not stop_on: - right = right._clone() - right._reset_exported() - right.onclause = adapter.traverse(right.onclause) - stack.append((right.left, right)) - else: - right = adapter.traverse(right) - if prevright is not None: - prevright.left = right - if ret is None: - ret = right - - return ret - - -def reduce_columns(columns, *clauses, **kw): - """given a list of columns, return a 'reduced' set based on natural - equivalents. - - the set is reduced to the smallest list of columns which have no natural - equivalent present in the list. A "natural equivalent" means that two - columns will ultimately represent the same value because they are related - by a foreign key. - - \*clauses is an optional list of join clauses which will be traversed - to further identify columns that are "equivalent". - - \**kw may specify 'ignore_nonexistent_tables' to ignore foreign keys - whose tables are not yet configured, or columns that aren't yet present. - - This function is primarily used to determine the most minimal "primary - key" from a selectable, by reducing the set of primary key columns present - in the selectable to just those that are not repeated. - - """ - ignore_nonexistent_tables = kw.pop('ignore_nonexistent_tables', False) - only_synonyms = kw.pop('only_synonyms', False) - - columns = util.ordered_column_set(columns) - - omit = util.column_set() - for col in columns: - for fk in chain(*[c.foreign_keys for c in col.proxy_set]): - for c in columns: - if c is col: - continue - try: - fk_col = fk.column - except exc.NoReferencedColumnError: - # TODO: add specific coverage here - # to test/sql/test_selectable ReduceTest - if ignore_nonexistent_tables: - continue - else: - raise - except exc.NoReferencedTableError: - # TODO: add specific coverage here - # to test/sql/test_selectable ReduceTest - if ignore_nonexistent_tables: - continue - else: - raise - if fk_col.shares_lineage(c) and \ - (not only_synonyms or - c.name == col.name): - omit.add(col) - break - - if clauses: - def visit_binary(binary): - if binary.operator == operators.eq: - cols = util.column_set( - chain(*[c.proxy_set for c in columns.difference(omit)])) - if binary.left in cols and binary.right in cols: - for c in reversed(columns): - if c.shares_lineage(binary.right) and \ - (not only_synonyms or - c.name == binary.left.name): - omit.add(c) - break - for clause in clauses: - if clause is not None: - visitors.traverse(clause, {}, {'binary': visit_binary}) - - return ColumnSet(columns.difference(omit)) - - -def criterion_as_pairs(expression, consider_as_foreign_keys=None, - consider_as_referenced_keys=None, any_operator=False): - """traverse an expression and locate binary criterion pairs.""" - - if consider_as_foreign_keys and consider_as_referenced_keys: - raise exc.ArgumentError("Can only specify one of " - "'consider_as_foreign_keys' or " - "'consider_as_referenced_keys'") - - def col_is(a, b): - # return a is b - return a.compare(b) - - def visit_binary(binary): - if not any_operator and binary.operator is not operators.eq: - return - if not isinstance(binary.left, ColumnElement) or \ - not isinstance(binary.right, ColumnElement): - return - - if consider_as_foreign_keys: - if binary.left in consider_as_foreign_keys and \ - (col_is(binary.right, binary.left) or - binary.right not in consider_as_foreign_keys): - pairs.append((binary.right, binary.left)) - elif binary.right in consider_as_foreign_keys and \ - (col_is(binary.left, binary.right) or - binary.left not in consider_as_foreign_keys): - pairs.append((binary.left, binary.right)) - elif consider_as_referenced_keys: - if binary.left in consider_as_referenced_keys and \ - (col_is(binary.right, binary.left) or - binary.right not in consider_as_referenced_keys): - pairs.append((binary.left, binary.right)) - elif binary.right in consider_as_referenced_keys and \ - (col_is(binary.left, binary.right) or - binary.left not in consider_as_referenced_keys): - pairs.append((binary.right, binary.left)) - else: - if isinstance(binary.left, Column) and \ - isinstance(binary.right, Column): - if binary.left.references(binary.right): - pairs.append((binary.right, binary.left)) - elif binary.right.references(binary.left): - pairs.append((binary.left, binary.right)) - pairs = [] - visitors.traverse(expression, {}, {'binary': visit_binary}) - return pairs - - - -class ClauseAdapter(visitors.ReplacingCloningVisitor): - """Clones and modifies clauses based on column correspondence. - - E.g.:: - - table1 = Table('sometable', metadata, - Column('col1', Integer), - Column('col2', Integer) - ) - table2 = Table('someothertable', metadata, - Column('col1', Integer), - Column('col2', Integer) - ) - - condition = table1.c.col1 == table2.c.col1 - - make an alias of table1:: - - s = table1.alias('foo') - - calling ``ClauseAdapter(s).traverse(condition)`` converts - condition to read:: - - s.c.col1 == table2.c.col1 - - """ - - def __init__(self, selectable, equivalents=None, - include_fn=None, exclude_fn=None, - adapt_on_names=False, anonymize_labels=False): - self.__traverse_options__ = { - 'stop_on': [selectable], - 'anonymize_labels': anonymize_labels} - self.selectable = selectable - self.include_fn = include_fn - self.exclude_fn = exclude_fn - self.equivalents = util.column_dict(equivalents or {}) - self.adapt_on_names = adapt_on_names - - def _corresponding_column(self, col, require_embedded, - _seen=util.EMPTY_SET): - newcol = self.selectable.corresponding_column( - col, - require_embedded=require_embedded) - if newcol is None and col in self.equivalents and col not in _seen: - for equiv in self.equivalents[col]: - newcol = self._corresponding_column( - equiv, require_embedded=require_embedded, - _seen=_seen.union([col])) - if newcol is not None: - return newcol - if self.adapt_on_names and newcol is None: - newcol = self.selectable.c.get(col.name) - return newcol - - def replace(self, col): - if isinstance(col, FromClause) and \ - self.selectable.is_derived_from(col): - return self.selectable - elif not isinstance(col, ColumnElement): - return None - elif self.include_fn and not self.include_fn(col): - return None - elif self.exclude_fn and self.exclude_fn(col): - return None - else: - return self._corresponding_column(col, True) - - -class ColumnAdapter(ClauseAdapter): - """Extends ClauseAdapter with extra utility functions. - - Key aspects of ColumnAdapter include: - - * Expressions that are adapted are stored in a persistent - .columns collection; so that an expression E adapted into - an expression E1, will return the same object E1 when adapted - a second time. This is important in particular for things like - Label objects that are anonymized, so that the ColumnAdapter can - be used to present a consistent "adapted" view of things. - - * Exclusion of items from the persistent collection based on - include/exclude rules, but also independent of hash identity. - This because "annotated" items all have the same hash identity as their - parent. - - * "wrapping" capability is added, so that the replacement of an expression - E can proceed through a series of adapters. This differs from the - visitor's "chaining" feature in that the resulting object is passed - through all replacing functions unconditionally, rather than stopping - at the first one that returns non-None. - - * An adapt_required option, used by eager loading to indicate that - We don't trust a result row column that is not translated. - This is to prevent a column from being interpreted as that - of the child row in a self-referential scenario, see - inheritance/test_basic.py->EagerTargetingTest.test_adapt_stringency - - """ - - def __init__(self, selectable, equivalents=None, - chain_to=None, adapt_required=False, - include_fn=None, exclude_fn=None, - adapt_on_names=False, - allow_label_resolve=True, - anonymize_labels=False): - ClauseAdapter.__init__(self, selectable, equivalents, - include_fn=include_fn, exclude_fn=exclude_fn, - adapt_on_names=adapt_on_names, - anonymize_labels=anonymize_labels) - - if chain_to: - self.chain(chain_to) - self.columns = util.populate_column_dict(self._locate_col) - if self.include_fn or self.exclude_fn: - self.columns = self._IncludeExcludeMapping(self, self.columns) - self.adapt_required = adapt_required - self.allow_label_resolve = allow_label_resolve - self._wrap = None - - class _IncludeExcludeMapping(object): - def __init__(self, parent, columns): - self.parent = parent - self.columns = columns - - def __getitem__(self, key): - if ( - self.parent.include_fn and not self.parent.include_fn(key) - ) or ( - self.parent.exclude_fn and self.parent.exclude_fn(key) - ): - if self.parent._wrap: - return self.parent._wrap.columns[key] - else: - return key - return self.columns[key] - - def wrap(self, adapter): - ac = self.__class__.__new__(self.__class__) - ac.__dict__.update(self.__dict__) - ac._wrap = adapter - ac.columns = util.populate_column_dict(ac._locate_col) - if ac.include_fn or ac.exclude_fn: - ac.columns = self._IncludeExcludeMapping(ac, ac.columns) - - return ac - - def traverse(self, obj): - return self.columns[obj] - - adapt_clause = traverse - adapt_list = ClauseAdapter.copy_and_process - - def _locate_col(self, col): - - c = ClauseAdapter.traverse(self, col) - - if self._wrap: - c2 = self._wrap._locate_col(c) - if c2 is not None: - c = c2 - - if self.adapt_required and c is col: - return None - - c._allow_label_resolve = self.allow_label_resolve - - return c - - def __getstate__(self): - d = self.__dict__.copy() - del d['columns'] - return d - - def __setstate__(self, state): - self.__dict__.update(state) - self.columns = util.PopulateDict(self._locate_col) diff --git a/python/sqlalchemy/sql/visitors.py b/python/sqlalchemy/sql/visitors.py deleted file mode 100644 index 0540ac5d..00000000 --- a/python/sqlalchemy/sql/visitors.py +++ /dev/null @@ -1,328 +0,0 @@ -# sql/visitors.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Visitor/traversal interface and library functions. - -SQLAlchemy schema and expression constructs rely on a Python-centric -version of the classic "visitor" pattern as the primary way in which -they apply functionality. The most common use of this pattern -is statement compilation, where individual expression classes match -up to rendering methods that produce a string result. Beyond this, -the visitor system is also used to inspect expressions for various -information and patterns, as well as for usage in -some kinds of expression transformation. Other kinds of transformation -use a non-visitor traversal system. - -For many examples of how the visit system is used, see the -sqlalchemy.sql.util and the sqlalchemy.sql.compiler modules. -For an introduction to clause adaption, see -http://techspot.zzzeek.org/2008/01/23/expression-transformations/ - -""" - -from collections import deque -from .. import util -import operator -from .. import exc - -__all__ = ['VisitableType', 'Visitable', 'ClauseVisitor', - 'CloningVisitor', 'ReplacingCloningVisitor', 'iterate', - 'iterate_depthfirst', 'traverse_using', 'traverse', - 'traverse_depthfirst', - 'cloned_traverse', 'replacement_traverse'] - - -class VisitableType(type): - """Metaclass which assigns a `_compiler_dispatch` method to classes - having a `__visit_name__` attribute. - - The _compiler_dispatch attribute becomes an instance method which - looks approximately like the following:: - - def _compiler_dispatch (self, visitor, **kw): - '''Look for an attribute named "visit_" + self.__visit_name__ - on the visitor, and call it with the same kw params.''' - visit_attr = 'visit_%s' % self.__visit_name__ - return getattr(visitor, visit_attr)(self, **kw) - - Classes having no __visit_name__ attribute will remain unaffected. - """ - - def __init__(cls, clsname, bases, clsdict): - if clsname != 'Visitable' and \ - hasattr(cls, '__visit_name__'): - _generate_dispatch(cls) - - super(VisitableType, cls).__init__(clsname, bases, clsdict) - - -def _generate_dispatch(cls): - """Return an optimized visit dispatch function for the cls - for use by the compiler. - """ - if '__visit_name__' in cls.__dict__: - visit_name = cls.__visit_name__ - if isinstance(visit_name, str): - # There is an optimization opportunity here because the - # the string name of the class's __visit_name__ is known at - # this early stage (import time) so it can be pre-constructed. - getter = operator.attrgetter("visit_%s" % visit_name) - - def _compiler_dispatch(self, visitor, **kw): - try: - meth = getter(visitor) - except AttributeError: - raise exc.UnsupportedCompilationError(visitor, cls) - else: - return meth(self, **kw) - else: - # The optimization opportunity is lost for this case because the - # __visit_name__ is not yet a string. As a result, the visit - # string has to be recalculated with each compilation. - def _compiler_dispatch(self, visitor, **kw): - visit_attr = 'visit_%s' % self.__visit_name__ - try: - meth = getattr(visitor, visit_attr) - except AttributeError: - raise exc.UnsupportedCompilationError(visitor, cls) - else: - return meth(self, **kw) - - _compiler_dispatch.__doc__ = \ - """Look for an attribute named "visit_" + self.__visit_name__ - on the visitor, and call it with the same kw params. - """ - cls._compiler_dispatch = _compiler_dispatch - - -class Visitable(util.with_metaclass(VisitableType, object)): - """Base class for visitable objects, applies the - ``VisitableType`` metaclass. - - """ - - -class ClauseVisitor(object): - """Base class for visitor objects which can traverse using - the traverse() function. - - """ - - __traverse_options__ = {} - - def traverse_single(self, obj, **kw): - for v in self._visitor_iterator: - meth = getattr(v, "visit_%s" % obj.__visit_name__, None) - if meth: - return meth(obj, **kw) - - def iterate(self, obj): - """traverse the given expression structure, returning an iterator - of all elements. - - """ - return iterate(obj, self.__traverse_options__) - - def traverse(self, obj): - """traverse and visit the given expression structure.""" - - return traverse(obj, self.__traverse_options__, self._visitor_dict) - - @util.memoized_property - def _visitor_dict(self): - visitors = {} - - for name in dir(self): - if name.startswith('visit_'): - visitors[name[6:]] = getattr(self, name) - return visitors - - @property - def _visitor_iterator(self): - """iterate through this visitor and each 'chained' visitor.""" - - v = self - while v: - yield v - v = getattr(v, '_next', None) - - def chain(self, visitor): - """'chain' an additional ClauseVisitor onto this ClauseVisitor. - - the chained visitor will receive all visit events after this one. - - """ - tail = list(self._visitor_iterator)[-1] - tail._next = visitor - return self - - -class CloningVisitor(ClauseVisitor): - """Base class for visitor objects which can traverse using - the cloned_traverse() function. - - """ - - def copy_and_process(self, list_): - """Apply cloned traversal to the given list of elements, and return - the new list. - - """ - return [self.traverse(x) for x in list_] - - def traverse(self, obj): - """traverse and visit the given expression structure.""" - - return cloned_traverse( - obj, self.__traverse_options__, self._visitor_dict) - - -class ReplacingCloningVisitor(CloningVisitor): - """Base class for visitor objects which can traverse using - the replacement_traverse() function. - - """ - - def replace(self, elem): - """receive pre-copied elements during a cloning traversal. - - If the method returns a new element, the element is used - instead of creating a simple copy of the element. Traversal - will halt on the newly returned element if it is re-encountered. - """ - return None - - def traverse(self, obj): - """traverse and visit the given expression structure.""" - - def replace(elem): - for v in self._visitor_iterator: - e = v.replace(elem) - if e is not None: - return e - return replacement_traverse(obj, self.__traverse_options__, replace) - - -def iterate(obj, opts): - """traverse the given expression structure, returning an iterator. - - traversal is configured to be breadth-first. - - """ - # fasttrack for atomic elements like columns - children = obj.get_children(**opts) - if not children: - return [obj] - - traversal = deque() - stack = deque([obj]) - while stack: - t = stack.popleft() - traversal.append(t) - for c in t.get_children(**opts): - stack.append(c) - return iter(traversal) - - -def iterate_depthfirst(obj, opts): - """traverse the given expression structure, returning an iterator. - - traversal is configured to be depth-first. - - """ - # fasttrack for atomic elements like columns - children = obj.get_children(**opts) - if not children: - return [obj] - - stack = deque([obj]) - traversal = deque() - while stack: - t = stack.pop() - traversal.appendleft(t) - for c in t.get_children(**opts): - stack.append(c) - return iter(traversal) - - -def traverse_using(iterator, obj, visitors): - """visit the given expression structure using the given iterator of - objects. - - """ - for target in iterator: - meth = visitors.get(target.__visit_name__, None) - if meth: - meth(target) - return obj - - -def traverse(obj, opts, visitors): - """traverse and visit the given expression structure using the default - iterator. - - """ - return traverse_using(iterate(obj, opts), obj, visitors) - - -def traverse_depthfirst(obj, opts, visitors): - """traverse and visit the given expression structure using the - depth-first iterator. - - """ - return traverse_using(iterate_depthfirst(obj, opts), obj, visitors) - - -def cloned_traverse(obj, opts, visitors): - """clone the given expression structure, allowing - modifications by visitors.""" - - cloned = {} - stop_on = set(opts.get('stop_on', [])) - - def clone(elem): - if elem in stop_on: - return elem - else: - if id(elem) not in cloned: - cloned[id(elem)] = newelem = elem._clone() - newelem._copy_internals(clone=clone) - meth = visitors.get(newelem.__visit_name__, None) - if meth: - meth(newelem) - return cloned[id(elem)] - - if obj is not None: - obj = clone(obj) - return obj - - -def replacement_traverse(obj, opts, replace): - """clone the given expression structure, allowing element - replacement by a given replacement function.""" - - cloned = {} - stop_on = set([id(x) for x in opts.get('stop_on', [])]) - - def clone(elem, **kw): - if id(elem) in stop_on or \ - 'no_replacement_traverse' in elem._annotations: - return elem - else: - newelem = replace(elem) - if newelem is not None: - stop_on.add(id(newelem)) - return newelem - else: - if elem not in cloned: - cloned[elem] = newelem = elem._clone() - newelem._copy_internals(clone=clone, **kw) - return cloned[elem] - - if obj is not None: - obj = clone(obj, **opts) - return obj diff --git a/python/sqlalchemy/testing/__init__.py b/python/sqlalchemy/testing/__init__.py deleted file mode 100644 index bd6377eb..00000000 --- a/python/sqlalchemy/testing/__init__.py +++ /dev/null @@ -1,36 +0,0 @@ -# testing/__init__.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - - -from .warnings import assert_warnings - -from . import config - -from .exclusions import db_spec, _is_excluded, fails_if, skip_if, future,\ - fails_on, fails_on_everything_except, skip, only_on, exclude, \ - against as _against, _server_version, only_if, fails - - -def against(*queries): - return _against(config._current, *queries) - -from .assertions import emits_warning, emits_warning_on, uses_deprecated, \ - eq_, ne_, le_, is_, is_not_, startswith_, assert_raises, \ - assert_raises_message, AssertsCompiledSQL, ComparesTables, \ - AssertsExecutionResults, expect_deprecated, expect_warnings, \ - in_, not_in_ - -from .util import run_as_contextmanager, rowset, fail, \ - provide_metadata, adict, force_drop_names, \ - teardown_events - -crashes = skip - -from .config import db -from .config import requirements as requires - -from . import mock diff --git a/python/sqlalchemy/testing/assertions.py b/python/sqlalchemy/testing/assertions.py deleted file mode 100644 index 21dc3e71..00000000 --- a/python/sqlalchemy/testing/assertions.py +++ /dev/null @@ -1,491 +0,0 @@ -# testing/assertions.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -from __future__ import absolute_import - -from . import util as testutil -from sqlalchemy import pool, orm, util -from sqlalchemy.engine import default, url -from sqlalchemy.util import decorator -from sqlalchemy import types as sqltypes, schema, exc as sa_exc -import warnings -import re -from .exclusions import db_spec, _is_excluded -from . import assertsql -from . import config -from .util import fail -import contextlib -from . import mock - - -def expect_warnings(*messages, **kw): - """Context manager which expects one or more warnings. - - With no arguments, squelches all SAWarnings emitted via - sqlalchemy.util.warn and sqlalchemy.util.warn_limited. Otherwise - pass string expressions that will match selected warnings via regex; - all non-matching warnings are sent through. - - The expect version **asserts** that the warnings were in fact seen. - - Note that the test suite sets SAWarning warnings to raise exceptions. - - """ - return _expect_warnings(sa_exc.SAWarning, messages, **kw) - - -@contextlib.contextmanager -def expect_warnings_on(db, *messages, **kw): - """Context manager which expects one or more warnings on specific - dialects. - - The expect version **asserts** that the warnings were in fact seen. - - """ - spec = db_spec(db) - - if isinstance(db, util.string_types) and not spec(config._current): - yield - else: - with expect_warnings(*messages, **kw): - yield - - -def emits_warning(*messages): - """Decorator form of expect_warnings(). - - Note that emits_warning does **not** assert that the warnings - were in fact seen. - - """ - - @decorator - def decorate(fn, *args, **kw): - with expect_warnings(assert_=False, *messages): - return fn(*args, **kw) - - return decorate - - -def expect_deprecated(*messages, **kw): - return _expect_warnings(sa_exc.SADeprecationWarning, messages, **kw) - - -def emits_warning_on(db, *messages): - """Mark a test as emitting a warning on a specific dialect. - - With no arguments, squelches all SAWarning failures. Or pass one or more - strings; these will be matched to the root of the warning description by - warnings.filterwarnings(). - - Note that emits_warning_on does **not** assert that the warnings - were in fact seen. - - """ - @decorator - def decorate(fn, *args, **kw): - with expect_warnings_on(db, assert_=False, *messages): - return fn(*args, **kw) - - return decorate - - -def uses_deprecated(*messages): - """Mark a test as immune from fatal deprecation warnings. - - With no arguments, squelches all SADeprecationWarning failures. - Or pass one or more strings; these will be matched to the root - of the warning description by warnings.filterwarnings(). - - As a special case, you may pass a function name prefixed with // - and it will be re-written as needed to match the standard warning - verbiage emitted by the sqlalchemy.util.deprecated decorator. - - Note that uses_deprecated does **not** assert that the warnings - were in fact seen. - - """ - - @decorator - def decorate(fn, *args, **kw): - with expect_deprecated(*messages, assert_=False): - return fn(*args, **kw) - return decorate - - -@contextlib.contextmanager -def _expect_warnings(exc_cls, messages, regex=True, assert_=True): - - if regex: - filters = [re.compile(msg, re.I) for msg in messages] - else: - filters = messages - - seen = set(filters) - - real_warn = warnings.warn - - def our_warn(msg, exception, *arg, **kw): - if not issubclass(exception, exc_cls): - return real_warn(msg, exception, *arg, **kw) - - if not filters: - return - - for filter_ in filters: - if (regex and filter_.match(msg)) or \ - (not regex and filter_ == msg): - seen.discard(filter_) - break - else: - real_warn(msg, exception, *arg, **kw) - - with mock.patch("warnings.warn", our_warn): - yield - - if assert_: - assert not seen, "Warnings were not seen: %s" % \ - ", ".join("%r" % (s.pattern if regex else s) for s in seen) - - -def global_cleanup_assertions(): - """Check things that have to be finalized at the end of a test suite. - - Hardcoded at the moment, a modular system can be built here - to support things like PG prepared transactions, tables all - dropped, etc. - - """ - _assert_no_stray_pool_connections() - -_STRAY_CONNECTION_FAILURES = 0 - - -def _assert_no_stray_pool_connections(): - global _STRAY_CONNECTION_FAILURES - - # lazy gc on cPython means "do nothing." pool connections - # shouldn't be in cycles, should go away. - testutil.lazy_gc() - - # however, once in awhile, on an EC2 machine usually, - # there's a ref in there. usually just one. - if pool._refs: - - # OK, let's be somewhat forgiving. - _STRAY_CONNECTION_FAILURES += 1 - - print("Encountered a stray connection in test cleanup: %s" - % str(pool._refs)) - # then do a real GC sweep. We shouldn't even be here - # so a single sweep should really be doing it, otherwise - # there's probably a real unreachable cycle somewhere. - testutil.gc_collect() - - # if we've already had two of these occurrences, or - # after a hard gc sweep we still have pool._refs?! - # now we have to raise. - if pool._refs: - err = str(pool._refs) - - # but clean out the pool refs collection directly, - # reset the counter, - # so the error doesn't at least keep happening. - pool._refs.clear() - _STRAY_CONNECTION_FAILURES = 0 - assert False, "Stray connection refused to leave "\ - "after gc.collect(): %s" % err - elif _STRAY_CONNECTION_FAILURES > 10: - assert False, "Encountered more than 10 stray connections" - _STRAY_CONNECTION_FAILURES = 0 - - -def eq_(a, b, msg=None): - """Assert a == b, with repr messaging on failure.""" - assert a == b, msg or "%r != %r" % (a, b) - - -def ne_(a, b, msg=None): - """Assert a != b, with repr messaging on failure.""" - assert a != b, msg or "%r == %r" % (a, b) - - -def le_(a, b, msg=None): - """Assert a <= b, with repr messaging on failure.""" - assert a <= b, msg or "%r != %r" % (a, b) - - -def is_(a, b, msg=None): - """Assert a is b, with repr messaging on failure.""" - assert a is b, msg or "%r is not %r" % (a, b) - - -def is_not_(a, b, msg=None): - """Assert a is not b, with repr messaging on failure.""" - assert a is not b, msg or "%r is %r" % (a, b) - - -def in_(a, b, msg=None): - """Assert a in b, with repr messaging on failure.""" - assert a in b, msg or "%r not in %r" % (a, b) - - -def not_in_(a, b, msg=None): - """Assert a in not b, with repr messaging on failure.""" - assert a not in b, msg or "%r is in %r" % (a, b) - - -def startswith_(a, fragment, msg=None): - """Assert a.startswith(fragment), with repr messaging on failure.""" - assert a.startswith(fragment), msg or "%r does not start with %r" % ( - a, fragment) - - -def assert_raises(except_cls, callable_, *args, **kw): - try: - callable_(*args, **kw) - success = False - except except_cls: - success = True - - # assert outside the block so it works for AssertionError too ! - assert success, "Callable did not raise an exception" - - -def assert_raises_message(except_cls, msg, callable_, *args, **kwargs): - try: - callable_(*args, **kwargs) - assert False, "Callable did not raise an exception" - except except_cls as e: - assert re.search( - msg, util.text_type(e), re.UNICODE), "%r !~ %s" % (msg, e) - print(util.text_type(e).encode('utf-8')) - - -class AssertsCompiledSQL(object): - def assert_compile(self, clause, result, params=None, - checkparams=None, dialect=None, - checkpositional=None, - check_prefetch=None, - use_default_dialect=False, - allow_dialect_select=False, - literal_binds=False): - if use_default_dialect: - dialect = default.DefaultDialect() - elif allow_dialect_select: - dialect = None - else: - if dialect is None: - dialect = getattr(self, '__dialect__', None) - - if dialect is None: - dialect = config.db.dialect - elif dialect == 'default': - dialect = default.DefaultDialect() - elif isinstance(dialect, util.string_types): - dialect = url.URL(dialect).get_dialect()() - - kw = {} - compile_kwargs = {} - - if params is not None: - kw['column_keys'] = list(params) - - if literal_binds: - compile_kwargs['literal_binds'] = True - - if isinstance(clause, orm.Query): - context = clause._compile_context() - context.statement.use_labels = True - clause = context.statement - - if compile_kwargs: - kw['compile_kwargs'] = compile_kwargs - - c = clause.compile(dialect=dialect, **kw) - - param_str = repr(getattr(c, 'params', {})) - - if util.py3k: - param_str = param_str.encode('utf-8').decode('ascii', 'ignore') - print( - ("\nSQL String:\n" + - util.text_type(c) + - param_str).encode('utf-8')) - else: - print( - "\nSQL String:\n" + - util.text_type(c).encode('utf-8') + - param_str) - - cc = re.sub(r'[\n\t]', '', util.text_type(c)) - - eq_(cc, result, "%r != %r on dialect %r" % (cc, result, dialect)) - - if checkparams is not None: - eq_(c.construct_params(params), checkparams) - if checkpositional is not None: - p = c.construct_params(params) - eq_(tuple([p[x] for x in c.positiontup]), checkpositional) - if check_prefetch is not None: - eq_(c.prefetch, check_prefetch) - - -class ComparesTables(object): - - def assert_tables_equal(self, table, reflected_table, strict_types=False): - assert len(table.c) == len(reflected_table.c) - for c, reflected_c in zip(table.c, reflected_table.c): - eq_(c.name, reflected_c.name) - assert reflected_c is reflected_table.c[c.name] - eq_(c.primary_key, reflected_c.primary_key) - eq_(c.nullable, reflected_c.nullable) - - if strict_types: - msg = "Type '%s' doesn't correspond to type '%s'" - assert isinstance(reflected_c.type, type(c.type)), \ - msg % (reflected_c.type, c.type) - else: - self.assert_types_base(reflected_c, c) - - if isinstance(c.type, sqltypes.String): - eq_(c.type.length, reflected_c.type.length) - - eq_( - set([f.column.name for f in c.foreign_keys]), - set([f.column.name for f in reflected_c.foreign_keys]) - ) - if c.server_default: - assert isinstance(reflected_c.server_default, - schema.FetchedValue) - - assert len(table.primary_key) == len(reflected_table.primary_key) - for c in table.primary_key: - assert reflected_table.primary_key.columns[c.name] is not None - - def assert_types_base(self, c1, c2): - assert c1.type._compare_type_affinity(c2.type),\ - "On column %r, type '%s' doesn't correspond to type '%s'" % \ - (c1.name, c1.type, c2.type) - - -class AssertsExecutionResults(object): - def assert_result(self, result, class_, *objects): - result = list(result) - print(repr(result)) - self.assert_list(result, class_, objects) - - def assert_list(self, result, class_, list): - self.assert_(len(result) == len(list), - "result list is not the same size as test list, " + - "for class " + class_.__name__) - for i in range(0, len(list)): - self.assert_row(class_, result[i], list[i]) - - def assert_row(self, class_, rowobj, desc): - self.assert_(rowobj.__class__ is class_, - "item class is not " + repr(class_)) - for key, value in desc.items(): - if isinstance(value, tuple): - if isinstance(value[1], list): - self.assert_list(getattr(rowobj, key), value[0], value[1]) - else: - self.assert_row(value[0], getattr(rowobj, key), value[1]) - else: - self.assert_(getattr(rowobj, key) == value, - "attribute %s value %s does not match %s" % ( - key, getattr(rowobj, key), value)) - - def assert_unordered_result(self, result, cls, *expected): - """As assert_result, but the order of objects is not considered. - - The algorithm is very expensive but not a big deal for the small - numbers of rows that the test suite manipulates. - """ - - class immutabledict(dict): - def __hash__(self): - return id(self) - - found = util.IdentitySet(result) - expected = set([immutabledict(e) for e in expected]) - - for wrong in util.itertools_filterfalse(lambda o: - isinstance(o, cls), found): - fail('Unexpected type "%s", expected "%s"' % ( - type(wrong).__name__, cls.__name__)) - - if len(found) != len(expected): - fail('Unexpected object count "%s", expected "%s"' % ( - len(found), len(expected))) - - NOVALUE = object() - - def _compare_item(obj, spec): - for key, value in spec.items(): - if isinstance(value, tuple): - try: - self.assert_unordered_result( - getattr(obj, key), value[0], *value[1]) - except AssertionError: - return False - else: - if getattr(obj, key, NOVALUE) != value: - return False - return True - - for expected_item in expected: - for found_item in found: - if _compare_item(found_item, expected_item): - found.remove(found_item) - break - else: - fail( - "Expected %s instance with attributes %s not found." % ( - cls.__name__, repr(expected_item))) - return True - - def sql_execution_asserter(self, db=None): - if db is None: - from . import db as db - - return assertsql.assert_engine(db) - - def assert_sql_execution(self, db, callable_, *rules): - with self.sql_execution_asserter(db) as asserter: - callable_() - asserter.assert_(*rules) - - def assert_sql(self, db, callable_, rules): - - newrules = [] - for rule in rules: - if isinstance(rule, dict): - newrule = assertsql.AllOf(*[ - assertsql.CompiledSQL(k, v) for k, v in rule.items() - ]) - else: - newrule = assertsql.CompiledSQL(*rule) - newrules.append(newrule) - - self.assert_sql_execution(db, callable_, *newrules) - - def assert_sql_count(self, db, callable_, count): - self.assert_sql_execution( - db, callable_, assertsql.CountStatements(count)) - - @contextlib.contextmanager - def assert_execution(self, *rules): - assertsql.asserter.add_rules(rules) - try: - yield - assertsql.asserter.statement_complete() - finally: - assertsql.asserter.clear_rules() - - def assert_statement_count(self, count): - return self.assert_execution(assertsql.CountStatements(count)) diff --git a/python/sqlalchemy/testing/assertsql.py b/python/sqlalchemy/testing/assertsql.py deleted file mode 100644 index 24349360..00000000 --- a/python/sqlalchemy/testing/assertsql.py +++ /dev/null @@ -1,361 +0,0 @@ -# testing/assertsql.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -from ..engine.default import DefaultDialect -from .. import util -import re -import collections -import contextlib -from .. import event -from sqlalchemy.schema import _DDLCompiles -from sqlalchemy.engine.util import _distill_params - - -class AssertRule(object): - - is_consumed = False - errormessage = None - consume_statement = True - - def process_statement(self, execute_observed): - pass - - def no_more_statements(self): - assert False, 'All statements are complete, but pending '\ - 'assertion rules remain' - - -class SQLMatchRule(AssertRule): - pass - - -class CursorSQL(SQLMatchRule): - consume_statement = False - - def __init__(self, statement, params=None): - self.statement = statement - self.params = params - - def process_statement(self, execute_observed): - stmt = execute_observed.statements[0] - if self.statement != stmt.statement or ( - self.params is not None and self.params != stmt.parameters): - self.errormessage = \ - "Testing for exact SQL %s parameters %s received %s %s" % ( - self.statement, self.params, - stmt.statement, stmt.parameters - ) - else: - execute_observed.statements.pop(0) - self.is_consumed = True - if not execute_observed.statements: - self.consume_statement = True - - -class CompiledSQL(SQLMatchRule): - - def __init__(self, statement, params=None): - self.statement = statement - self.params = params - - def _compare_sql(self, execute_observed, received_statement): - stmt = re.sub(r'[\n\t]', '', self.statement) - return received_statement == stmt - - def _compile_dialect(self, execute_observed): - return DefaultDialect() - - def _received_statement(self, execute_observed): - """reconstruct the statement and params in terms - of a target dialect, which for CompiledSQL is just DefaultDialect.""" - - context = execute_observed.context - compare_dialect = self._compile_dialect(execute_observed) - if isinstance(context.compiled.statement, _DDLCompiles): - compiled = \ - context.compiled.statement.compile(dialect=compare_dialect) - else: - compiled = ( - context.compiled.statement.compile( - dialect=compare_dialect, - column_keys=context.compiled.column_keys, - inline=context.compiled.inline) - ) - _received_statement = re.sub(r'[\n\t]', '', util.text_type(compiled)) - parameters = execute_observed.parameters - - if not parameters: - _received_parameters = [compiled.construct_params()] - else: - _received_parameters = [ - compiled.construct_params(m) for m in parameters] - - return _received_statement, _received_parameters - - def process_statement(self, execute_observed): - context = execute_observed.context - - _received_statement, _received_parameters = \ - self._received_statement(execute_observed) - params = self._all_params(context) - - equivalent = self._compare_sql(execute_observed, _received_statement) - - if equivalent: - if params is not None: - all_params = list(params) - all_received = list(_received_parameters) - while all_params and all_received: - param = dict(all_params.pop(0)) - - for idx, received in enumerate(list(all_received)): - # do a positive compare only - for param_key in param: - # a key in param did not match current - # 'received' - if param_key not in received or \ - received[param_key] != param[param_key]: - break - else: - # all keys in param matched 'received'; - # onto next param - del all_received[idx] - break - else: - # param did not match any entry - # in all_received - equivalent = False - break - if all_params or all_received: - equivalent = False - - if equivalent: - self.is_consumed = True - self.errormessage = None - else: - self.errormessage = self._failure_message(params) % { - 'received_statement': _received_statement, - 'received_parameters': _received_parameters - } - - def _all_params(self, context): - if self.params: - if util.callable(self.params): - params = self.params(context) - else: - params = self.params - if not isinstance(params, list): - params = [params] - return params - else: - return None - - def _failure_message(self, expected_params): - return ( - 'Testing for compiled statement %r partial params %r, ' - 'received %%(received_statement)r with params ' - '%%(received_parameters)r' % ( - self.statement, expected_params - ) - ) - - -class RegexSQL(CompiledSQL): - def __init__(self, regex, params=None): - SQLMatchRule.__init__(self) - self.regex = re.compile(regex) - self.orig_regex = regex - self.params = params - - def _failure_message(self, expected_params): - return ( - 'Testing for compiled statement ~%r partial params %r, ' - 'received %%(received_statement)r with params ' - '%%(received_parameters)r' % ( - self.orig_regex, expected_params - ) - ) - - def _compare_sql(self, execute_observed, received_statement): - return bool(self.regex.match(received_statement)) - - -class DialectSQL(CompiledSQL): - def _compile_dialect(self, execute_observed): - return execute_observed.context.dialect - - def _compare_no_space(self, real_stmt, received_stmt): - stmt = re.sub(r'[\n\t]', '', real_stmt) - return received_stmt == stmt - - def _received_statement(self, execute_observed): - received_stmt, received_params = super(DialectSQL, self).\ - _received_statement(execute_observed) - - # TODO: why do we need this part? - for real_stmt in execute_observed.statements: - if self._compare_no_space(real_stmt.statement, received_stmt): - break - else: - raise AssertionError( - "Can't locate compiled statement %r in list of " - "statements actually invoked" % received_stmt) - - return received_stmt, execute_observed.context.compiled_parameters - - def _compare_sql(self, execute_observed, received_statement): - stmt = re.sub(r'[\n\t]', '', self.statement) - # convert our comparison statement to have the - # paramstyle of the received - paramstyle = execute_observed.context.dialect.paramstyle - if paramstyle == 'pyformat': - stmt = re.sub( - r':([\w_]+)', r"%(\1)s", stmt) - else: - # positional params - repl = None - if paramstyle == 'qmark': - repl = "?" - elif paramstyle == 'format': - repl = r"%s" - elif paramstyle == 'numeric': - repl = None - stmt = re.sub(r':([\w_]+)', repl, stmt) - - return received_statement == stmt - - -class CountStatements(AssertRule): - - def __init__(self, count): - self.count = count - self._statement_count = 0 - - def process_statement(self, execute_observed): - self._statement_count += 1 - - def no_more_statements(self): - if self.count != self._statement_count: - assert False, 'desired statement count %d does not match %d' \ - % (self.count, self._statement_count) - - -class AllOf(AssertRule): - - def __init__(self, *rules): - self.rules = set(rules) - - def process_statement(self, execute_observed): - for rule in list(self.rules): - rule.errormessage = None - rule.process_statement(execute_observed) - if rule.is_consumed: - self.rules.discard(rule) - if not self.rules: - self.is_consumed = True - break - elif not rule.errormessage: - # rule is not done yet - self.errormessage = None - break - else: - self.errormessage = list(self.rules)[0].errormessage - - -class Or(AllOf): - - def process_statement(self, execute_observed): - for rule in self.rules: - rule.process_statement(execute_observed) - if rule.is_consumed: - self.is_consumed = True - break - else: - self.errormessage = list(self.rules)[0].errormessage - - -class SQLExecuteObserved(object): - def __init__(self, context, clauseelement, multiparams, params): - self.context = context - self.clauseelement = clauseelement - self.parameters = _distill_params(multiparams, params) - self.statements = [] - - -class SQLCursorExecuteObserved( - collections.namedtuple( - "SQLCursorExecuteObserved", - ["statement", "parameters", "context", "executemany"]) -): - pass - - -class SQLAsserter(object): - def __init__(self): - self.accumulated = [] - - def _close(self): - self._final = self.accumulated - del self.accumulated - - def assert_(self, *rules): - rules = list(rules) - observed = list(self._final) - - while observed and rules: - rule = rules[0] - rule.process_statement(observed[0]) - if rule.is_consumed: - rules.pop(0) - elif rule.errormessage: - assert False, rule.errormessage - - if rule.consume_statement: - observed.pop(0) - - if not observed and rules: - rules[0].no_more_statements() - elif not rules and observed: - assert False, "Additional SQL statements remain" - - -@contextlib.contextmanager -def assert_engine(engine): - asserter = SQLAsserter() - - orig = [] - - @event.listens_for(engine, "before_execute") - def connection_execute(conn, clauseelement, multiparams, params): - # grab the original statement + params before any cursor - # execution - orig[:] = clauseelement, multiparams, params - - @event.listens_for(engine, "after_cursor_execute") - def cursor_execute(conn, cursor, statement, parameters, - context, executemany): - if not context: - return - # then grab real cursor statements and associate them all - # around a single context - if asserter.accumulated and \ - asserter.accumulated[-1].context is context: - obs = asserter.accumulated[-1] - else: - obs = SQLExecuteObserved(context, orig[0], orig[1], orig[2]) - asserter.accumulated.append(obs) - obs.statements.append( - SQLCursorExecuteObserved( - statement, parameters, context, executemany) - ) - - try: - yield asserter - finally: - event.remove(engine, "after_cursor_execute", cursor_execute) - event.remove(engine, "before_execute", connection_execute) - asserter._close() diff --git a/python/sqlalchemy/testing/config.py b/python/sqlalchemy/testing/config.py deleted file mode 100644 index a3d6e169..00000000 --- a/python/sqlalchemy/testing/config.py +++ /dev/null @@ -1,92 +0,0 @@ -# testing/config.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -import collections - -requirements = None -db = None -db_url = None -db_opts = None -file_config = None -test_schema = None -test_schema_2 = None -_current = None -_skip_test_exception = None - - -class Config(object): - def __init__(self, db, db_opts, options, file_config): - self.db = db - self.db_opts = db_opts - self.options = options - self.file_config = file_config - self.test_schema = "test_schema" - self.test_schema_2 = "test_schema_2" - - _stack = collections.deque() - _configs = {} - - @classmethod - def register(cls, db, db_opts, options, file_config): - """add a config as one of the global configs. - - If there are no configs set up yet, this config also - gets set as the "_current". - """ - cfg = Config(db, db_opts, options, file_config) - - cls._configs[cfg.db.name] = cfg - cls._configs[(cfg.db.name, cfg.db.dialect)] = cfg - cls._configs[cfg.db] = cfg - return cfg - - @classmethod - def set_as_current(cls, config, namespace): - global db, _current, db_url, test_schema, test_schema_2, db_opts - _current = config - db_url = config.db.url - db_opts = config.db_opts - test_schema = config.test_schema - test_schema_2 = config.test_schema_2 - namespace.db = db = config.db - - @classmethod - def push_engine(cls, db, namespace): - assert _current, "Can't push without a default Config set up" - cls.push( - Config( - db, _current.db_opts, _current.options, _current.file_config), - namespace - ) - - @classmethod - def push(cls, config, namespace): - cls._stack.append(_current) - cls.set_as_current(config, namespace) - - @classmethod - def reset(cls, namespace): - if cls._stack: - cls.set_as_current(cls._stack[0], namespace) - cls._stack.clear() - - @classmethod - def all_configs(cls): - for cfg in set(cls._configs.values()): - yield cfg - - @classmethod - def all_dbs(cls): - for cfg in cls.all_configs(): - yield cfg.db - - def skip_test(self, msg): - skip_test(msg) - - -def skip_test(msg): - raise _skip_test_exception(msg) diff --git a/python/sqlalchemy/testing/distutils_run.py b/python/sqlalchemy/testing/distutils_run.py deleted file mode 100644 index 38de8872..00000000 --- a/python/sqlalchemy/testing/distutils_run.py +++ /dev/null @@ -1,11 +0,0 @@ -"""Quick and easy way to get setup.py test to run py.test without any -custom setuptools/distutils code. - -""" -import unittest -import pytest - - -class TestSuite(unittest.TestCase): - def test_sqlalchemy(self): - pytest.main(["-n", "4", "-q"]) diff --git a/python/sqlalchemy/testing/engines.py b/python/sqlalchemy/testing/engines.py deleted file mode 100644 index 1eaf6296..00000000 --- a/python/sqlalchemy/testing/engines.py +++ /dev/null @@ -1,346 +0,0 @@ -# testing/engines.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -from __future__ import absolute_import - -import weakref -from . import config -from .util import decorator -from .. import event, pool -import re -import warnings - - -class ConnectionKiller(object): - - def __init__(self): - self.proxy_refs = weakref.WeakKeyDictionary() - self.testing_engines = weakref.WeakKeyDictionary() - self.conns = set() - - def add_engine(self, engine): - self.testing_engines[engine] = True - - def connect(self, dbapi_conn, con_record): - self.conns.add((dbapi_conn, con_record)) - - def checkout(self, dbapi_con, con_record, con_proxy): - self.proxy_refs[con_proxy] = True - - def invalidate(self, dbapi_con, con_record, exception): - self.conns.discard((dbapi_con, con_record)) - - def _safe(self, fn): - try: - fn() - except Exception as e: - warnings.warn( - "testing_reaper couldn't " - "rollback/close connection: %s" % e) - - def rollback_all(self): - for rec in list(self.proxy_refs): - if rec is not None and rec.is_valid: - self._safe(rec.rollback) - - def close_all(self): - for rec in list(self.proxy_refs): - if rec is not None and rec.is_valid: - self._safe(rec._close) - - def _after_test_ctx(self): - # this can cause a deadlock with pg8000 - pg8000 acquires - # prepared statement lock inside of rollback() - if async gc - # is collecting in finalize_fairy, deadlock. - # not sure if this should be if pypy/jython only. - # note that firebird/fdb definitely needs this though - for conn, rec in list(self.conns): - self._safe(conn.rollback) - - def _stop_test_ctx(self): - if config.options.low_connections: - self._stop_test_ctx_minimal() - else: - self._stop_test_ctx_aggressive() - - def _stop_test_ctx_minimal(self): - self.close_all() - - self.conns = set() - - for rec in list(self.testing_engines): - if rec is not config.db: - rec.dispose() - - def _stop_test_ctx_aggressive(self): - self.close_all() - for conn, rec in list(self.conns): - self._safe(conn.close) - rec.connection = None - - self.conns = set() - for rec in list(self.testing_engines): - rec.dispose() - - def assert_all_closed(self): - for rec in self.proxy_refs: - if rec.is_valid: - assert False - -testing_reaper = ConnectionKiller() - - -def drop_all_tables(metadata, bind): - testing_reaper.close_all() - if hasattr(bind, 'close'): - bind.close() - - if not config.db.dialect.supports_alter: - from . import assertions - with assertions.expect_warnings( - "Can't sort tables", assert_=False): - metadata.drop_all(bind) - else: - metadata.drop_all(bind) - - -@decorator -def assert_conns_closed(fn, *args, **kw): - try: - fn(*args, **kw) - finally: - testing_reaper.assert_all_closed() - - -@decorator -def rollback_open_connections(fn, *args, **kw): - """Decorator that rolls back all open connections after fn execution.""" - - try: - fn(*args, **kw) - finally: - testing_reaper.rollback_all() - - -@decorator -def close_first(fn, *args, **kw): - """Decorator that closes all connections before fn execution.""" - - testing_reaper.close_all() - fn(*args, **kw) - - -@decorator -def close_open_connections(fn, *args, **kw): - """Decorator that closes all connections after fn execution.""" - try: - fn(*args, **kw) - finally: - testing_reaper.close_all() - - -def all_dialects(exclude=None): - import sqlalchemy.databases as d - for name in d.__all__: - # TEMPORARY - if exclude and name in exclude: - continue - mod = getattr(d, name, None) - if not mod: - mod = getattr(__import__( - 'sqlalchemy.databases.%s' % name).databases, name) - yield mod.dialect() - - -class ReconnectFixture(object): - - def __init__(self, dbapi): - self.dbapi = dbapi - self.connections = [] - - def __getattr__(self, key): - return getattr(self.dbapi, key) - - def connect(self, *args, **kwargs): - conn = self.dbapi.connect(*args, **kwargs) - self.connections.append(conn) - return conn - - def _safe(self, fn): - try: - fn() - except Exception as e: - warnings.warn( - "ReconnectFixture couldn't " - "close connection: %s" % e) - - def shutdown(self): - # TODO: this doesn't cover all cases - # as nicely as we'd like, namely MySQLdb. - # would need to implement R. Brewer's - # proxy server idea to get better - # coverage. - for c in list(self.connections): - self._safe(c.close) - self.connections = [] - - -def reconnecting_engine(url=None, options=None): - url = url or config.db.url - dbapi = config.db.dialect.dbapi - if not options: - options = {} - options['module'] = ReconnectFixture(dbapi) - engine = testing_engine(url, options) - _dispose = engine.dispose - - def dispose(): - engine.dialect.dbapi.shutdown() - _dispose() - - engine.test_shutdown = engine.dialect.dbapi.shutdown - engine.dispose = dispose - return engine - - -def testing_engine(url=None, options=None): - """Produce an engine configured by --options with optional overrides.""" - - from sqlalchemy import create_engine - from sqlalchemy.engine.url import make_url - - if not options: - use_reaper = True - else: - use_reaper = options.pop('use_reaper', True) - - url = url or config.db.url - - url = make_url(url) - if options is None: - if config.db is None or url.drivername == config.db.url.drivername: - options = config.db_opts - else: - options = {} - - engine = create_engine(url, **options) - engine._has_events = True # enable event blocks, helps with profiling - - if isinstance(engine.pool, pool.QueuePool): - engine.pool._timeout = 0 - engine.pool._max_overflow = 0 - if use_reaper: - event.listen(engine.pool, 'connect', testing_reaper.connect) - event.listen(engine.pool, 'checkout', testing_reaper.checkout) - event.listen(engine.pool, 'invalidate', testing_reaper.invalidate) - testing_reaper.add_engine(engine) - - return engine - - -def mock_engine(dialect_name=None): - """Provides a mocking engine based on the current testing.db. - - This is normally used to test DDL generation flow as emitted - by an Engine. - - It should not be used in other cases, as assert_compile() and - assert_sql_execution() are much better choices with fewer - moving parts. - - """ - - from sqlalchemy import create_engine - - if not dialect_name: - dialect_name = config.db.name - - buffer = [] - - def executor(sql, *a, **kw): - buffer.append(sql) - - def assert_sql(stmts): - recv = [re.sub(r'[\n\t]', '', str(s)) for s in buffer] - assert recv == stmts, recv - - def print_sql(): - d = engine.dialect - return "\n".join( - str(s.compile(dialect=d)) - for s in engine.mock - ) - - engine = create_engine(dialect_name + '://', - strategy='mock', executor=executor) - assert not hasattr(engine, 'mock') - engine.mock = buffer - engine.assert_sql = assert_sql - engine.print_sql = print_sql - return engine - - -class DBAPIProxyCursor(object): - """Proxy a DBAPI cursor. - - Tests can provide subclasses of this to intercept - DBAPI-level cursor operations. - - """ - - def __init__(self, engine, conn, *args, **kwargs): - self.engine = engine - self.connection = conn - self.cursor = conn.cursor(*args, **kwargs) - - def execute(self, stmt, parameters=None, **kw): - if parameters: - return self.cursor.execute(stmt, parameters, **kw) - else: - return self.cursor.execute(stmt, **kw) - - def executemany(self, stmt, params, **kw): - return self.cursor.executemany(stmt, params, **kw) - - def __getattr__(self, key): - return getattr(self.cursor, key) - - -class DBAPIProxyConnection(object): - """Proxy a DBAPI connection. - - Tests can provide subclasses of this to intercept - DBAPI-level connection operations. - - """ - - def __init__(self, engine, cursor_cls): - self.conn = self._sqla_unwrap = engine.pool._creator() - self.engine = engine - self.cursor_cls = cursor_cls - - def cursor(self, *args, **kwargs): - return self.cursor_cls(self.engine, self.conn, *args, **kwargs) - - def close(self): - self.conn.close() - - def __getattr__(self, key): - return getattr(self.conn, key) - - -def proxying_engine(conn_cls=DBAPIProxyConnection, - cursor_cls=DBAPIProxyCursor): - """Produce an engine that provides proxy hooks for - common methods. - - """ - def mock_conn(): - return conn_cls(config.db, cursor_cls) - return testing_engine(options={'creator': mock_conn}) - - diff --git a/python/sqlalchemy/testing/entities.py b/python/sqlalchemy/testing/entities.py deleted file mode 100644 index 65178ea5..00000000 --- a/python/sqlalchemy/testing/entities.py +++ /dev/null @@ -1,101 +0,0 @@ -# testing/entities.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -import sqlalchemy as sa -from sqlalchemy import exc as sa_exc - -_repr_stack = set() - - -class BasicEntity(object): - - def __init__(self, **kw): - for key, value in kw.items(): - setattr(self, key, value) - - def __repr__(self): - if id(self) in _repr_stack: - return object.__repr__(self) - _repr_stack.add(id(self)) - try: - return "%s(%s)" % ( - (self.__class__.__name__), - ', '.join(["%s=%r" % (key, getattr(self, key)) - for key in sorted(self.__dict__.keys()) - if not key.startswith('_')])) - finally: - _repr_stack.remove(id(self)) - -_recursion_stack = set() - - -class ComparableEntity(BasicEntity): - - def __hash__(self): - return hash(self.__class__) - - def __ne__(self, other): - return not self.__eq__(other) - - def __eq__(self, other): - """'Deep, sparse compare. - - Deeply compare two entities, following the non-None attributes of the - non-persisted object, if possible. - - """ - if other is self: - return True - elif not self.__class__ == other.__class__: - return False - - if id(self) in _recursion_stack: - return True - _recursion_stack.add(id(self)) - - try: - # pick the entity that's not SA persisted as the source - try: - self_key = sa.orm.attributes.instance_state(self).key - except sa.orm.exc.NO_STATE: - self_key = None - - if other is None: - a = self - b = other - elif self_key is not None: - a = other - b = self - else: - a = self - b = other - - for attr in list(a.__dict__): - if attr.startswith('_'): - continue - value = getattr(a, attr) - - try: - # handle lazy loader errors - battr = getattr(b, attr) - except (AttributeError, sa_exc.UnboundExecutionError): - return False - - if hasattr(value, '__iter__'): - if hasattr(value, '__getitem__') and not hasattr( - value, 'keys'): - if list(value) != list(battr): - return False - else: - if set(value) != set(battr): - return False - else: - if value is not None and value != battr: - return False - return True - finally: - _recursion_stack.remove(id(self)) diff --git a/python/sqlalchemy/testing/exclusions.py b/python/sqlalchemy/testing/exclusions.py deleted file mode 100644 index 972dec3a..00000000 --- a/python/sqlalchemy/testing/exclusions.py +++ /dev/null @@ -1,440 +0,0 @@ -# testing/exclusions.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - - -import operator -from ..util import decorator -from . import config -from .. import util -import inspect -import contextlib - - -def skip_if(predicate, reason=None): - rule = compound() - pred = _as_predicate(predicate, reason) - rule.skips.add(pred) - return rule - - -def fails_if(predicate, reason=None): - rule = compound() - pred = _as_predicate(predicate, reason) - rule.fails.add(pred) - return rule - - -class compound(object): - def __init__(self): - self.fails = set() - self.skips = set() - self.tags = set() - - def __add__(self, other): - return self.add(other) - - def add(self, *others): - copy = compound() - copy.fails.update(self.fails) - copy.skips.update(self.skips) - copy.tags.update(self.tags) - for other in others: - copy.fails.update(other.fails) - copy.skips.update(other.skips) - copy.tags.update(other.tags) - return copy - - def not_(self): - copy = compound() - copy.fails.update(NotPredicate(fail) for fail in self.fails) - copy.skips.update(NotPredicate(skip) for skip in self.skips) - copy.tags.update(self.tags) - return copy - - @property - def enabled(self): - return self.enabled_for_config(config._current) - - def enabled_for_config(self, config): - for predicate in self.skips.union(self.fails): - if predicate(config): - return False - else: - return True - - def matching_config_reasons(self, config): - return [ - predicate._as_string(config) for predicate - in self.skips.union(self.fails) - if predicate(config) - ] - - def include_test(self, include_tags, exclude_tags): - return bool( - not self.tags.intersection(exclude_tags) and - (not include_tags or self.tags.intersection(include_tags)) - ) - - def _extend(self, other): - self.skips.update(other.skips) - self.fails.update(other.fails) - self.tags.update(other.tags) - - def __call__(self, fn): - if hasattr(fn, '_sa_exclusion_extend'): - fn._sa_exclusion_extend._extend(self) - return fn - - @decorator - def decorate(fn, *args, **kw): - return self._do(config._current, fn, *args, **kw) - decorated = decorate(fn) - decorated._sa_exclusion_extend = self - return decorated - - @contextlib.contextmanager - def fail_if(self): - all_fails = compound() - all_fails.fails.update(self.skips.union(self.fails)) - - try: - yield - except Exception as ex: - all_fails._expect_failure(config._current, ex) - else: - all_fails._expect_success(config._current) - - def _do(self, config, fn, *args, **kw): - for skip in self.skips: - if skip(config): - msg = "'%s' : %s" % ( - fn.__name__, - skip._as_string(config) - ) - config.skip_test(msg) - - try: - return_value = fn(*args, **kw) - except Exception as ex: - self._expect_failure(config, ex, name=fn.__name__) - else: - self._expect_success(config, name=fn.__name__) - return return_value - - def _expect_failure(self, config, ex, name='block'): - for fail in self.fails: - if fail(config): - print(("%s failed as expected (%s): %s " % ( - name, fail._as_string(config), str(ex)))) - break - else: - util.raise_from_cause(ex) - - def _expect_success(self, config, name='block'): - if not self.fails: - return - for fail in self.fails: - if not fail(config): - break - else: - raise AssertionError( - "Unexpected success for '%s' (%s)" % - ( - name, - " and ".join( - fail._as_string(config) - for fail in self.fails - ) - ) - ) - - -def requires_tag(tagname): - return tags([tagname]) - - -def tags(tagnames): - comp = compound() - comp.tags.update(tagnames) - return comp - - -def only_if(predicate, reason=None): - predicate = _as_predicate(predicate) - return skip_if(NotPredicate(predicate), reason) - - -def succeeds_if(predicate, reason=None): - predicate = _as_predicate(predicate) - return fails_if(NotPredicate(predicate), reason) - - -class Predicate(object): - @classmethod - def as_predicate(cls, predicate, description=None): - if isinstance(predicate, compound): - return cls.as_predicate(predicate.enabled_for_config, description) - elif isinstance(predicate, Predicate): - if description and predicate.description is None: - predicate.description = description - return predicate - elif isinstance(predicate, (list, set)): - return OrPredicate( - [cls.as_predicate(pred) for pred in predicate], - description) - elif isinstance(predicate, tuple): - return SpecPredicate(*predicate) - elif isinstance(predicate, util.string_types): - tokens = predicate.split(" ", 2) - op = spec = None - db = tokens.pop(0) - if tokens: - op = tokens.pop(0) - if tokens: - spec = tuple(int(d) for d in tokens.pop(0).split(".")) - return SpecPredicate(db, op, spec, description=description) - elif util.callable(predicate): - return LambdaPredicate(predicate, description) - else: - assert False, "unknown predicate type: %s" % predicate - - def _format_description(self, config, negate=False): - bool_ = self(config) - if negate: - bool_ = not negate - return self.description % { - "driver": config.db.url.get_driver_name(), - "database": config.db.url.get_backend_name(), - "doesnt_support": "doesn't support" if bool_ else "does support", - "does_support": "does support" if bool_ else "doesn't support" - } - - def _as_string(self, config=None, negate=False): - raise NotImplementedError() - - -class BooleanPredicate(Predicate): - def __init__(self, value, description=None): - self.value = value - self.description = description or "boolean %s" % value - - def __call__(self, config): - return self.value - - def _as_string(self, config, negate=False): - return self._format_description(config, negate=negate) - - -class SpecPredicate(Predicate): - def __init__(self, db, op=None, spec=None, description=None): - self.db = db - self.op = op - self.spec = spec - self.description = description - - _ops = { - '<': operator.lt, - '>': operator.gt, - '==': operator.eq, - '!=': operator.ne, - '<=': operator.le, - '>=': operator.ge, - 'in': operator.contains, - 'between': lambda val, pair: val >= pair[0] and val <= pair[1], - } - - def __call__(self, config): - engine = config.db - - if "+" in self.db: - dialect, driver = self.db.split('+') - else: - dialect, driver = self.db, None - - if dialect and engine.name != dialect: - return False - if driver is not None and engine.driver != driver: - return False - - if self.op is not None: - assert driver is None, "DBAPI version specs not supported yet" - - version = _server_version(engine) - oper = hasattr(self.op, '__call__') and self.op \ - or self._ops[self.op] - return oper(version, self.spec) - else: - return True - - def _as_string(self, config, negate=False): - if self.description is not None: - return self._format_description(config) - elif self.op is None: - if negate: - return "not %s" % self.db - else: - return "%s" % self.db - else: - if negate: - return "not %s %s %s" % ( - self.db, - self.op, - self.spec - ) - else: - return "%s %s %s" % ( - self.db, - self.op, - self.spec - ) - - -class LambdaPredicate(Predicate): - def __init__(self, lambda_, description=None, args=None, kw=None): - spec = inspect.getargspec(lambda_) - if not spec[0]: - self.lambda_ = lambda db: lambda_() - else: - self.lambda_ = lambda_ - self.args = args or () - self.kw = kw or {} - if description: - self.description = description - elif lambda_.__doc__: - self.description = lambda_.__doc__ - else: - self.description = "custom function" - - def __call__(self, config): - return self.lambda_(config) - - def _as_string(self, config, negate=False): - return self._format_description(config) - - -class NotPredicate(Predicate): - def __init__(self, predicate, description=None): - self.predicate = predicate - self.description = description - - def __call__(self, config): - return not self.predicate(config) - - def _as_string(self, config, negate=False): - if self.description: - return self._format_description(config, not negate) - else: - return self.predicate._as_string(config, not negate) - - -class OrPredicate(Predicate): - def __init__(self, predicates, description=None): - self.predicates = predicates - self.description = description - - def __call__(self, config): - for pred in self.predicates: - if pred(config): - return True - return False - - def _eval_str(self, config, negate=False): - if negate: - conjunction = " and " - else: - conjunction = " or " - return conjunction.join(p._as_string(config, negate=negate) - for p in self.predicates) - - def _negation_str(self, config): - if self.description is not None: - return "Not " + self._format_description(config) - else: - return self._eval_str(config, negate=True) - - def _as_string(self, config, negate=False): - if negate: - return self._negation_str(config) - else: - if self.description is not None: - return self._format_description(config) - else: - return self._eval_str(config) - - -_as_predicate = Predicate.as_predicate - - -def _is_excluded(db, op, spec): - return SpecPredicate(db, op, spec)(config._current) - - -def _server_version(engine): - """Return a server_version_info tuple.""" - - # force metadata to be retrieved - conn = engine.connect() - version = getattr(engine.dialect, 'server_version_info', ()) - conn.close() - return version - - -def db_spec(*dbs): - return OrPredicate( - [Predicate.as_predicate(db) for db in dbs] - ) - - -def open(): - return skip_if(BooleanPredicate(False, "mark as execute")) - - -def closed(): - return skip_if(BooleanPredicate(True, "marked as skip")) - - -def fails(): - return fails_if(BooleanPredicate(True, "expected to fail")) - - -@decorator -def future(fn, *arg): - return fails_if(LambdaPredicate(fn), "Future feature") - - -def fails_on(db, reason=None): - return fails_if(SpecPredicate(db), reason) - - -def fails_on_everything_except(*dbs): - return succeeds_if( - OrPredicate([ - SpecPredicate(db) for db in dbs - ]) - ) - - -def skip(db, reason=None): - return skip_if(SpecPredicate(db), reason) - - -def only_on(dbs, reason=None): - return only_if( - OrPredicate([Predicate.as_predicate(db) for db in util.to_list(dbs)]) - ) - - -def exclude(db, op, spec, reason=None): - return skip_if(SpecPredicate(db, op, spec), reason) - - -def against(config, *queries): - assert queries, "no queries sent!" - return OrPredicate([ - Predicate.as_predicate(query) - for query in queries - ])(config) diff --git a/python/sqlalchemy/testing/fixtures.py b/python/sqlalchemy/testing/fixtures.py deleted file mode 100644 index e16bc77c..00000000 --- a/python/sqlalchemy/testing/fixtures.py +++ /dev/null @@ -1,376 +0,0 @@ -# testing/fixtures.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -from . import config -from . import assertions, schema -from .util import adict -from .. import util -from .engines import drop_all_tables -from .entities import BasicEntity, ComparableEntity -import sys -import sqlalchemy as sa -from sqlalchemy.ext.declarative import declarative_base, DeclarativeMeta - -# whether or not we use unittest changes things dramatically, -# as far as how py.test collection works. - - -class TestBase(object): - # A sequence of database names to always run, regardless of the - # constraints below. - __whitelist__ = () - - # A sequence of requirement names matching testing.requires decorators - __requires__ = () - - # A sequence of dialect names to exclude from the test class. - __unsupported_on__ = () - - # If present, test class is only runnable for the *single* specified - # dialect. If you need multiple, use __unsupported_on__ and invert. - __only_on__ = None - - # A sequence of no-arg callables. If any are True, the entire testcase is - # skipped. - __skip_if__ = None - - def assert_(self, val, msg=None): - assert val, msg - - # apparently a handful of tests are doing this....OK - def setup(self): - if hasattr(self, "setUp"): - self.setUp() - - def teardown(self): - if hasattr(self, "tearDown"): - self.tearDown() - - -class TablesTest(TestBase): - - # 'once', None - run_setup_bind = 'once' - - # 'once', 'each', None - run_define_tables = 'once' - - # 'once', 'each', None - run_create_tables = 'once' - - # 'once', 'each', None - run_inserts = 'each' - - # 'each', None - run_deletes = 'each' - - # 'once', None - run_dispose_bind = None - - bind = None - metadata = None - tables = None - other = None - - @classmethod - def setup_class(cls): - cls._init_class() - - cls._setup_once_tables() - - cls._setup_once_inserts() - - @classmethod - def _init_class(cls): - if cls.run_define_tables == 'each': - if cls.run_create_tables == 'once': - cls.run_create_tables = 'each' - assert cls.run_inserts in ('each', None) - - cls.other = adict() - cls.tables = adict() - - cls.bind = cls.setup_bind() - cls.metadata = sa.MetaData() - cls.metadata.bind = cls.bind - - @classmethod - def _setup_once_inserts(cls): - if cls.run_inserts == 'once': - cls._load_fixtures() - cls.insert_data() - - @classmethod - def _setup_once_tables(cls): - if cls.run_define_tables == 'once': - cls.define_tables(cls.metadata) - if cls.run_create_tables == 'once': - cls.metadata.create_all(cls.bind) - cls.tables.update(cls.metadata.tables) - - def _setup_each_tables(self): - if self.run_define_tables == 'each': - self.tables.clear() - if self.run_create_tables == 'each': - drop_all_tables(self.metadata, self.bind) - self.metadata.clear() - self.define_tables(self.metadata) - if self.run_create_tables == 'each': - self.metadata.create_all(self.bind) - self.tables.update(self.metadata.tables) - elif self.run_create_tables == 'each': - drop_all_tables(self.metadata, self.bind) - self.metadata.create_all(self.bind) - - def _setup_each_inserts(self): - if self.run_inserts == 'each': - self._load_fixtures() - self.insert_data() - - def _teardown_each_tables(self): - # no need to run deletes if tables are recreated on setup - if self.run_define_tables != 'each' and self.run_deletes == 'each': - with self.bind.connect() as conn: - for table in reversed(self.metadata.sorted_tables): - try: - conn.execute(table.delete()) - except sa.exc.DBAPIError as ex: - util.print_( - ("Error emptying table %s: %r" % (table, ex)), - file=sys.stderr) - - def setup(self): - self._setup_each_tables() - self._setup_each_inserts() - - def teardown(self): - self._teardown_each_tables() - - @classmethod - def _teardown_once_metadata_bind(cls): - if cls.run_create_tables: - drop_all_tables(cls.metadata, cls.bind) - - if cls.run_dispose_bind == 'once': - cls.dispose_bind(cls.bind) - - cls.metadata.bind = None - - if cls.run_setup_bind is not None: - cls.bind = None - - @classmethod - def teardown_class(cls): - cls._teardown_once_metadata_bind() - - @classmethod - def setup_bind(cls): - return config.db - - @classmethod - def dispose_bind(cls, bind): - if hasattr(bind, 'dispose'): - bind.dispose() - elif hasattr(bind, 'close'): - bind.close() - - @classmethod - def define_tables(cls, metadata): - pass - - @classmethod - def fixtures(cls): - return {} - - @classmethod - def insert_data(cls): - pass - - def sql_count_(self, count, fn): - self.assert_sql_count(self.bind, fn, count) - - def sql_eq_(self, callable_, statements): - self.assert_sql(self.bind, callable_, statements) - - @classmethod - def _load_fixtures(cls): - """Insert rows as represented by the fixtures() method.""" - headers, rows = {}, {} - for table, data in cls.fixtures().items(): - if len(data) < 2: - continue - if isinstance(table, util.string_types): - table = cls.tables[table] - headers[table] = data[0] - rows[table] = data[1:] - for table in cls.metadata.sorted_tables: - if table not in headers: - continue - cls.bind.execute( - table.insert(), - [dict(zip(headers[table], column_values)) - for column_values in rows[table]]) - -from sqlalchemy import event - - -class RemovesEvents(object): - @util.memoized_property - def _event_fns(self): - return set() - - def event_listen(self, target, name, fn): - self._event_fns.add((target, name, fn)) - event.listen(target, name, fn) - - def teardown(self): - for key in self._event_fns: - event.remove(*key) - super_ = super(RemovesEvents, self) - if hasattr(super_, "teardown"): - super_.teardown() - - -class _ORMTest(object): - - @classmethod - def teardown_class(cls): - sa.orm.session.Session.close_all() - sa.orm.clear_mappers() - - -class ORMTest(_ORMTest, TestBase): - pass - - -class MappedTest(_ORMTest, TablesTest, assertions.AssertsExecutionResults): - # 'once', 'each', None - run_setup_classes = 'once' - - # 'once', 'each', None - run_setup_mappers = 'each' - - classes = None - - @classmethod - def setup_class(cls): - cls._init_class() - - if cls.classes is None: - cls.classes = adict() - - cls._setup_once_tables() - cls._setup_once_classes() - cls._setup_once_mappers() - cls._setup_once_inserts() - - @classmethod - def teardown_class(cls): - cls._teardown_once_class() - cls._teardown_once_metadata_bind() - - def setup(self): - self._setup_each_tables() - self._setup_each_mappers() - self._setup_each_inserts() - - def teardown(self): - sa.orm.session.Session.close_all() - self._teardown_each_mappers() - self._teardown_each_tables() - - @classmethod - def _teardown_once_class(cls): - cls.classes.clear() - _ORMTest.teardown_class() - - @classmethod - def _setup_once_classes(cls): - if cls.run_setup_classes == 'once': - cls._with_register_classes(cls.setup_classes) - - @classmethod - def _setup_once_mappers(cls): - if cls.run_setup_mappers == 'once': - cls._with_register_classes(cls.setup_mappers) - - def _setup_each_mappers(self): - if self.run_setup_mappers == 'each': - self._with_register_classes(self.setup_mappers) - - @classmethod - def _with_register_classes(cls, fn): - """Run a setup method, framing the operation with a Base class - that will catch new subclasses to be established within - the "classes" registry. - - """ - cls_registry = cls.classes - - class FindFixture(type): - def __init__(cls, classname, bases, dict_): - cls_registry[classname] = cls - return type.__init__(cls, classname, bases, dict_) - - class _Base(util.with_metaclass(FindFixture, object)): - pass - - class Basic(BasicEntity, _Base): - pass - - class Comparable(ComparableEntity, _Base): - pass - - cls.Basic = Basic - cls.Comparable = Comparable - fn() - - def _teardown_each_mappers(self): - # some tests create mappers in the test bodies - # and will define setup_mappers as None - - # clear mappers in any case - if self.run_setup_mappers != 'once': - sa.orm.clear_mappers() - - @classmethod - def setup_classes(cls): - pass - - @classmethod - def setup_mappers(cls): - pass - - -class DeclarativeMappedTest(MappedTest): - run_setup_classes = 'once' - run_setup_mappers = 'once' - - @classmethod - def _setup_once_tables(cls): - pass - - @classmethod - def _with_register_classes(cls, fn): - cls_registry = cls.classes - - class FindFixtureDeclarative(DeclarativeMeta): - def __init__(cls, classname, bases, dict_): - cls_registry[classname] = cls - return DeclarativeMeta.__init__( - cls, classname, bases, dict_) - - class DeclarativeBasic(object): - __table_cls__ = schema.Table - - _DeclBase = declarative_base(metadata=cls.metadata, - metaclass=FindFixtureDeclarative, - cls=DeclarativeBasic) - cls.DeclarativeBasic = _DeclBase - fn() - - if cls.metadata.tables and cls.run_create_tables: - cls.metadata.create_all(config.db) diff --git a/python/sqlalchemy/testing/mock.py b/python/sqlalchemy/testing/mock.py deleted file mode 100644 index c836bb40..00000000 --- a/python/sqlalchemy/testing/mock.py +++ /dev/null @@ -1,21 +0,0 @@ -# testing/mock.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Import stub for mock library. -""" -from __future__ import absolute_import -from ..util import py33 - -if py33: - from unittest.mock import MagicMock, Mock, call, patch, ANY -else: - try: - from mock import MagicMock, Mock, call, patch, ANY - except ImportError: - raise ImportError( - "SQLAlchemy's test suite requires the " - "'mock' library as of 0.8.2.") diff --git a/python/sqlalchemy/testing/pickleable.py b/python/sqlalchemy/testing/pickleable.py deleted file mode 100644 index 7b696ad6..00000000 --- a/python/sqlalchemy/testing/pickleable.py +++ /dev/null @@ -1,143 +0,0 @@ -# testing/pickleable.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Classes used in pickling tests, need to be at the module level for -unpickling. -""" - -from . import fixtures - - -class User(fixtures.ComparableEntity): - pass - - -class Order(fixtures.ComparableEntity): - pass - - -class Dingaling(fixtures.ComparableEntity): - pass - - -class EmailUser(User): - pass - - -class Address(fixtures.ComparableEntity): - pass - - -# TODO: these are kind of arbitrary.... -class Child1(fixtures.ComparableEntity): - pass - - -class Child2(fixtures.ComparableEntity): - pass - - -class Parent(fixtures.ComparableEntity): - pass - - -class Screen(object): - - def __init__(self, obj, parent=None): - self.obj = obj - self.parent = parent - - -class Foo(object): - - def __init__(self, moredata): - self.data = 'im data' - self.stuff = 'im stuff' - self.moredata = moredata - - __hash__ = object.__hash__ - - def __eq__(self, other): - return other.data == self.data and \ - other.stuff == self.stuff and \ - other.moredata == self.moredata - - -class Bar(object): - - def __init__(self, x, y): - self.x = x - self.y = y - - __hash__ = object.__hash__ - - def __eq__(self, other): - return other.__class__ is self.__class__ and \ - other.x == self.x and \ - other.y == self.y - - def __str__(self): - return "Bar(%d, %d)" % (self.x, self.y) - - -class OldSchool: - - def __init__(self, x, y): - self.x = x - self.y = y - - def __eq__(self, other): - return other.__class__ is self.__class__ and \ - other.x == self.x and \ - other.y == self.y - - -class OldSchoolWithoutCompare: - - def __init__(self, x, y): - self.x = x - self.y = y - - -class BarWithoutCompare(object): - - def __init__(self, x, y): - self.x = x - self.y = y - - def __str__(self): - return "Bar(%d, %d)" % (self.x, self.y) - - -class NotComparable(object): - - def __init__(self, data): - self.data = data - - def __hash__(self): - return id(self) - - def __eq__(self, other): - return NotImplemented - - def __ne__(self, other): - return NotImplemented - - -class BrokenComparable(object): - - def __init__(self, data): - self.data = data - - def __hash__(self): - return id(self) - - def __eq__(self, other): - raise NotImplementedError - - def __ne__(self, other): - raise NotImplementedError diff --git a/python/sqlalchemy/testing/plugin/__init__.py b/python/sqlalchemy/testing/plugin/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/python/sqlalchemy/testing/plugin/bootstrap.py b/python/sqlalchemy/testing/plugin/bootstrap.py deleted file mode 100644 index 497fcb7e..00000000 --- a/python/sqlalchemy/testing/plugin/bootstrap.py +++ /dev/null @@ -1,44 +0,0 @@ -""" -Bootstrapper for nose/pytest plugins. - -The entire rationale for this system is to get the modules in plugin/ -imported without importing all of the supporting library, so that we can -set up things for testing before coverage starts. - -The rationale for all of plugin/ being *in* the supporting library in the -first place is so that the testing and plugin suite is available to other -libraries, mainly external SQLAlchemy and Alembic dialects, to make use -of the same test environment and standard suites available to -SQLAlchemy/Alembic themselves without the need to ship/install a separate -package outside of SQLAlchemy. - -NOTE: copied/adapted from SQLAlchemy master for backwards compatibility; -this should be removable when Alembic targets SQLAlchemy 1.0.0. - -""" - -import os -import sys - -bootstrap_file = locals()['bootstrap_file'] -to_bootstrap = locals()['to_bootstrap'] - - -def load_file_as_module(name): - path = os.path.join(os.path.dirname(bootstrap_file), "%s.py" % name) - if sys.version_info >= (3, 3): - from importlib import machinery - mod = machinery.SourceFileLoader(name, path).load_module() - else: - import imp - mod = imp.load_source(name, path) - return mod - -if to_bootstrap == "pytest": - sys.modules["sqla_plugin_base"] = load_file_as_module("plugin_base") - sys.modules["sqla_pytestplugin"] = load_file_as_module("pytestplugin") -elif to_bootstrap == "nose": - sys.modules["sqla_plugin_base"] = load_file_as_module("plugin_base") - sys.modules["sqla_noseplugin"] = load_file_as_module("noseplugin") -else: - raise Exception("unknown bootstrap: %s" % to_bootstrap) # noqa diff --git a/python/sqlalchemy/testing/plugin/noseplugin.py b/python/sqlalchemy/testing/plugin/noseplugin.py deleted file mode 100644 index 4c390d40..00000000 --- a/python/sqlalchemy/testing/plugin/noseplugin.py +++ /dev/null @@ -1,107 +0,0 @@ -# plugin/noseplugin.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Enhance nose with extra options and behaviors for running SQLAlchemy tests. - -Must be run via ./sqla_nose.py so that it is imported in the expected -way (e.g. as a package-less import). - -""" - -try: - # installed by bootstrap.py - import sqla_plugin_base as plugin_base -except ImportError: - # assume we're a package, use traditional import - from . import plugin_base - - -import os -import sys - -from nose.plugins import Plugin -import nose -fixtures = None - -py3k = sys.version_info >= (3, 0) - - -class NoseSQLAlchemy(Plugin): - enabled = True - - name = 'sqla_testing' - score = 100 - - def options(self, parser, env=os.environ): - Plugin.options(self, parser, env) - opt = parser.add_option - - def make_option(name, **kw): - callback_ = kw.pop("callback", None) - if callback_: - def wrap_(option, opt_str, value, parser): - callback_(opt_str, value, parser) - kw["callback"] = wrap_ - opt(name, **kw) - - plugin_base.setup_options(make_option) - plugin_base.read_config() - - def configure(self, options, conf): - super(NoseSQLAlchemy, self).configure(options, conf) - plugin_base.pre_begin(options) - - plugin_base.set_coverage_flag(options.enable_plugin_coverage) - - plugin_base.set_skip_test(nose.SkipTest) - - def begin(self): - global fixtures - from sqlalchemy.testing import fixtures # noqa - - plugin_base.post_begin() - - def describeTest(self, test): - return "" - - def wantFunction(self, fn): - return False - - def wantMethod(self, fn): - if py3k: - if not hasattr(fn.__self__, 'cls'): - return False - cls = fn.__self__.cls - else: - cls = fn.im_class - return plugin_base.want_method(cls, fn) - - def wantClass(self, cls): - return plugin_base.want_class(cls) - - def beforeTest(self, test): - if not hasattr(test.test, 'cls'): - return - plugin_base.before_test( - test, - test.test.cls.__module__, - test.test.cls, test.test.method.__name__) - - def afterTest(self, test): - plugin_base.after_test(test) - - def startContext(self, ctx): - if not isinstance(ctx, type) \ - or not issubclass(ctx, fixtures.TestBase): - return - plugin_base.start_test_class(ctx) - - def stopContext(self, ctx): - if not isinstance(ctx, type) \ - or not issubclass(ctx, fixtures.TestBase): - return - plugin_base.stop_test_class(ctx) diff --git a/python/sqlalchemy/testing/plugin/plugin_base.py b/python/sqlalchemy/testing/plugin/plugin_base.py deleted file mode 100644 index 6cdec05a..00000000 --- a/python/sqlalchemy/testing/plugin/plugin_base.py +++ /dev/null @@ -1,552 +0,0 @@ -# plugin/plugin_base.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Testing extensions. - -this module is designed to work as a testing-framework-agnostic library, -so that we can continue to support nose and also begin adding new -functionality via py.test. - -""" - -from __future__ import absolute_import - -import sys -import re - -py3k = sys.version_info >= (3, 0) - -if py3k: - import configparser -else: - import ConfigParser as configparser - -# late imports -fixtures = None -engines = None -exclusions = None -warnings = None -profiling = None -assertions = None -requirements = None -config = None -testing = None -util = None -file_config = None - - -logging = None -include_tags = set() -exclude_tags = set() -options = None - - -def setup_options(make_option): - make_option("--log-info", action="callback", type="string", callback=_log, - help="turn on info logging for (multiple OK)") - make_option("--log-debug", action="callback", - type="string", callback=_log, - help="turn on debug logging for (multiple OK)") - make_option("--db", action="append", type="string", dest="db", - help="Use prefab database uri. Multiple OK, " - "first one is run by default.") - make_option('--dbs', action='callback', callback=_list_dbs, - help="List available prefab dbs") - make_option("--dburi", action="append", type="string", dest="dburi", - help="Database uri. Multiple OK, " - "first one is run by default.") - make_option("--dropfirst", action="store_true", dest="dropfirst", - help="Drop all tables in the target database first") - make_option("--backend-only", action="store_true", dest="backend_only", - help="Run only tests marked with __backend__") - make_option("--low-connections", action="store_true", - dest="low_connections", - help="Use a low number of distinct connections - " - "i.e. for Oracle TNS") - make_option("--reversetop", action="store_true", - dest="reversetop", default=False, - help="Use a random-ordering set implementation in the ORM " - "(helps reveal dependency issues)") - make_option("--requirements", action="callback", type="string", - callback=_requirements_opt, - help="requirements class for testing, overrides setup.cfg") - make_option("--with-cdecimal", action="store_true", - dest="cdecimal", default=False, - help="Monkeypatch the cdecimal library into Python 'decimal' " - "for all tests") - make_option("--include-tag", action="callback", callback=_include_tag, - type="string", - help="Include tests with tag ") - make_option("--exclude-tag", action="callback", callback=_exclude_tag, - type="string", - help="Exclude tests with tag ") - make_option("--write-profiles", action="store_true", - dest="write_profiles", default=False, - help="Write/update failing profiling data.") - make_option("--force-write-profiles", action="store_true", - dest="force_write_profiles", default=False, - help="Unconditionally write/update profiling data.") - - -def configure_follower(follower_ident): - """Configure required state for a follower. - - This invokes in the parent process and typically includes - database creation. - - """ - from sqlalchemy.testing import provision - provision.FOLLOWER_IDENT = follower_ident - - -def memoize_important_follower_config(dict_): - """Store important configuration we will need to send to a follower. - - This invokes in the parent process after normal config is set up. - - This is necessary as py.test seems to not be using forking, so we - start with nothing in memory, *but* it isn't running our argparse - callables, so we have to just copy all of that over. - - """ - dict_['memoized_config'] = { - 'include_tags': include_tags, - 'exclude_tags': exclude_tags - } - - -def restore_important_follower_config(dict_): - """Restore important configuration needed by a follower. - - This invokes in the follower process. - - """ - global include_tags, exclude_tags - include_tags.update(dict_['memoized_config']['include_tags']) - exclude_tags.update(dict_['memoized_config']['exclude_tags']) - - -def read_config(): - global file_config - file_config = configparser.ConfigParser() - file_config.read(['setup.cfg', 'test.cfg']) - - -def pre_begin(opt): - """things to set up early, before coverage might be setup.""" - global options - options = opt - for fn in pre_configure: - fn(options, file_config) - - -def set_coverage_flag(value): - options.has_coverage = value - -_skip_test_exception = None - - -def set_skip_test(exc): - global _skip_test_exception - _skip_test_exception = exc - - -def post_begin(): - """things to set up later, once we know coverage is running.""" - # Lazy setup of other options (post coverage) - for fn in post_configure: - fn(options, file_config) - - # late imports, has to happen after config as well - # as nose plugins like coverage - global util, fixtures, engines, exclusions, \ - assertions, warnings, profiling,\ - config, testing - from sqlalchemy import testing # noqa - from sqlalchemy.testing import fixtures, engines, exclusions # noqa - from sqlalchemy.testing import assertions, warnings, profiling # noqa - from sqlalchemy.testing import config # noqa - from sqlalchemy import util # noqa - warnings.setup_filters() - - -def _log(opt_str, value, parser): - global logging - if not logging: - import logging - logging.basicConfig() - - if opt_str.endswith('-info'): - logging.getLogger(value).setLevel(logging.INFO) - elif opt_str.endswith('-debug'): - logging.getLogger(value).setLevel(logging.DEBUG) - - -def _list_dbs(*args): - print("Available --db options (use --dburi to override)") - for macro in sorted(file_config.options('db')): - print("%20s\t%s" % (macro, file_config.get('db', macro))) - sys.exit(0) - - -def _requirements_opt(opt_str, value, parser): - _setup_requirements(value) - - -def _exclude_tag(opt_str, value, parser): - exclude_tags.add(value.replace('-', '_')) - - -def _include_tag(opt_str, value, parser): - include_tags.add(value.replace('-', '_')) - -pre_configure = [] -post_configure = [] - - -def pre(fn): - pre_configure.append(fn) - return fn - - -def post(fn): - post_configure.append(fn) - return fn - - -@pre -def _setup_options(opt, file_config): - global options - options = opt - - -@pre -def _monkeypatch_cdecimal(options, file_config): - if options.cdecimal: - import cdecimal - sys.modules['decimal'] = cdecimal - - -@post -def _init_skiptest(options, file_config): - from sqlalchemy.testing import config - - config._skip_test_exception = _skip_test_exception - - -@post -def _engine_uri(options, file_config): - from sqlalchemy.testing import config - from sqlalchemy import testing - from sqlalchemy.testing import provision - - if options.dburi: - db_urls = list(options.dburi) - else: - db_urls = [] - - if options.db: - for db_token in options.db: - for db in re.split(r'[,\s]+', db_token): - if db not in file_config.options('db'): - raise RuntimeError( - "Unknown URI specifier '%s'. " - "Specify --dbs for known uris." - % db) - else: - db_urls.append(file_config.get('db', db)) - - if not db_urls: - db_urls.append(file_config.get('db', 'default')) - - for db_url in db_urls: - cfg = provision.setup_config( - db_url, options, file_config, provision.FOLLOWER_IDENT) - - if not config._current: - cfg.set_as_current(cfg, testing) - - -@post -def _requirements(options, file_config): - - requirement_cls = file_config.get('sqla_testing', "requirement_cls") - _setup_requirements(requirement_cls) - - -def _setup_requirements(argument): - from sqlalchemy.testing import config - from sqlalchemy import testing - - if config.requirements is not None: - return - - modname, clsname = argument.split(":") - - # importlib.import_module() only introduced in 2.7, a little - # late - mod = __import__(modname) - for component in modname.split(".")[1:]: - mod = getattr(mod, component) - req_cls = getattr(mod, clsname) - - config.requirements = testing.requires = req_cls() - - -@post -def _prep_testing_database(options, file_config): - from sqlalchemy.testing import config, util - from sqlalchemy.testing.exclusions import against - from sqlalchemy import schema, inspect - - if options.dropfirst: - for cfg in config.Config.all_configs(): - e = cfg.db - inspector = inspect(e) - try: - view_names = inspector.get_view_names() - except NotImplementedError: - pass - else: - for vname in view_names: - e.execute(schema._DropView( - schema.Table(vname, schema.MetaData()) - )) - - if config.requirements.schemas.enabled_for_config(cfg): - try: - view_names = inspector.get_view_names( - schema="test_schema") - except NotImplementedError: - pass - else: - for vname in view_names: - e.execute(schema._DropView( - schema.Table(vname, schema.MetaData(), - schema="test_schema") - )) - - util.drop_all_tables(e, inspector) - - if config.requirements.schemas.enabled_for_config(cfg): - util.drop_all_tables(e, inspector, schema=cfg.test_schema) - - if against(cfg, "postgresql"): - from sqlalchemy.dialects import postgresql - for enum in inspector.get_enums("*"): - e.execute(postgresql.DropEnumType( - postgresql.ENUM( - name=enum['name'], - schema=enum['schema']))) - - -@post -def _reverse_topological(options, file_config): - if options.reversetop: - from sqlalchemy.orm.util import randomize_unitofwork - randomize_unitofwork() - - -@post -def _post_setup_options(opt, file_config): - from sqlalchemy.testing import config - config.options = options - config.file_config = file_config - - -@post -def _setup_profiling(options, file_config): - from sqlalchemy.testing import profiling - profiling._profile_stats = profiling.ProfileStatsFile( - file_config.get('sqla_testing', 'profile_file')) - - -def want_class(cls): - if not issubclass(cls, fixtures.TestBase): - return False - elif cls.__name__.startswith('_'): - return False - elif config.options.backend_only and not getattr(cls, '__backend__', - False): - return False - else: - return True - - -def want_method(cls, fn): - if not fn.__name__.startswith("test_"): - return False - elif fn.__module__ is None: - return False - elif include_tags: - return ( - hasattr(cls, '__tags__') and - exclusions.tags(cls.__tags__).include_test( - include_tags, exclude_tags) - ) or ( - hasattr(fn, '_sa_exclusion_extend') and - fn._sa_exclusion_extend.include_test( - include_tags, exclude_tags) - ) - elif exclude_tags and hasattr(cls, '__tags__'): - return exclusions.tags(cls.__tags__).include_test( - include_tags, exclude_tags) - elif exclude_tags and hasattr(fn, '_sa_exclusion_extend'): - return fn._sa_exclusion_extend.include_test(include_tags, exclude_tags) - else: - return True - - -def generate_sub_tests(cls, module): - if getattr(cls, '__backend__', False): - for cfg in _possible_configs_for_cls(cls): - name = "%s_%s_%s" % (cls.__name__, cfg.db.name, cfg.db.driver) - subcls = type( - name, - (cls, ), - { - "__only_on__": ("%s+%s" % (cfg.db.name, cfg.db.driver)), - } - ) - setattr(module, name, subcls) - yield subcls - else: - yield cls - - -def start_test_class(cls): - _do_skips(cls) - _setup_engine(cls) - - -def stop_test_class(cls): - #from sqlalchemy import inspect - #assert not inspect(testing.db).get_table_names() - engines.testing_reaper._stop_test_ctx() - if not options.low_connections: - assertions.global_cleanup_assertions() - _restore_engine() - - -def _restore_engine(): - config._current.reset(testing) - - -def _setup_engine(cls): - if getattr(cls, '__engine_options__', None): - eng = engines.testing_engine(options=cls.__engine_options__) - config._current.push_engine(eng, testing) - - -def before_test(test, test_module_name, test_class, test_name): - - # like a nose id, e.g.: - # "test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause" - name = test_class.__name__ - - suffix = "_%s_%s" % (config.db.name, config.db.driver) - if name.endswith(suffix): - name = name[0:-(len(suffix))] - - id_ = "%s.%s.%s" % (test_module_name, name, test_name) - - profiling._current_test = id_ - - -def after_test(test): - engines.testing_reaper._after_test_ctx() - - -def _possible_configs_for_cls(cls, reasons=None): - all_configs = set(config.Config.all_configs()) - - if cls.__unsupported_on__: - spec = exclusions.db_spec(*cls.__unsupported_on__) - for config_obj in list(all_configs): - if spec(config_obj): - all_configs.remove(config_obj) - - if getattr(cls, '__only_on__', None): - spec = exclusions.db_spec(*util.to_list(cls.__only_on__)) - for config_obj in list(all_configs): - if not spec(config_obj): - all_configs.remove(config_obj) - - if hasattr(cls, '__requires__'): - requirements = config.requirements - for config_obj in list(all_configs): - for requirement in cls.__requires__: - check = getattr(requirements, requirement) - - skip_reasons = check.matching_config_reasons(config_obj) - if skip_reasons: - all_configs.remove(config_obj) - if reasons is not None: - reasons.extend(skip_reasons) - break - - if hasattr(cls, '__prefer_requires__'): - non_preferred = set() - requirements = config.requirements - for config_obj in list(all_configs): - for requirement in cls.__prefer_requires__: - check = getattr(requirements, requirement) - - if not check.enabled_for_config(config_obj): - non_preferred.add(config_obj) - if all_configs.difference(non_preferred): - all_configs.difference_update(non_preferred) - - return all_configs - - -def _do_skips(cls): - reasons = [] - all_configs = _possible_configs_for_cls(cls, reasons) - - if getattr(cls, '__skip_if__', False): - for c in getattr(cls, '__skip_if__'): - if c(): - config.skip_test("'%s' skipped by %s" % ( - cls.__name__, c.__name__) - ) - - if not all_configs: - if getattr(cls, '__backend__', False): - msg = "'%s' unsupported for implementation '%s'" % ( - cls.__name__, cls.__only_on__) - else: - msg = "'%s' unsupported on any DB implementation %s%s" % ( - cls.__name__, - ", ".join( - "'%s(%s)+%s'" % ( - config_obj.db.name, - ".".join( - str(dig) for dig in - config_obj.db.dialect.server_version_info), - config_obj.db.driver - ) - for config_obj in config.Config.all_configs() - ), - ", ".join(reasons) - ) - config.skip_test(msg) - elif hasattr(cls, '__prefer_backends__'): - non_preferred = set() - spec = exclusions.db_spec(*util.to_list(cls.__prefer_backends__)) - for config_obj in all_configs: - if not spec(config_obj): - non_preferred.add(config_obj) - if all_configs.difference(non_preferred): - all_configs.difference_update(non_preferred) - - if config._current not in all_configs: - _setup_config(all_configs.pop(), cls) - - -def _setup_config(config_obj, ctx): - config._current.push(config_obj, testing) diff --git a/python/sqlalchemy/testing/plugin/pytestplugin.py b/python/sqlalchemy/testing/plugin/pytestplugin.py deleted file mode 100644 index 30d7aa73..00000000 --- a/python/sqlalchemy/testing/plugin/pytestplugin.py +++ /dev/null @@ -1,181 +0,0 @@ -try: - # installed by bootstrap.py - import sqla_plugin_base as plugin_base -except ImportError: - # assume we're a package, use traditional import - from . import plugin_base - -import pytest -import argparse -import inspect -import collections -import itertools - -try: - import xdist # noqa - has_xdist = True -except ImportError: - has_xdist = False - - -def pytest_addoption(parser): - group = parser.getgroup("sqlalchemy") - - def make_option(name, **kw): - callback_ = kw.pop("callback", None) - if callback_: - class CallableAction(argparse.Action): - def __call__(self, parser, namespace, - values, option_string=None): - callback_(option_string, values, parser) - kw["action"] = CallableAction - - group.addoption(name, **kw) - - plugin_base.setup_options(make_option) - plugin_base.read_config() - - -def pytest_configure(config): - if hasattr(config, "slaveinput"): - plugin_base.restore_important_follower_config(config.slaveinput) - plugin_base.configure_follower( - config.slaveinput["follower_ident"] - ) - - plugin_base.pre_begin(config.option) - - plugin_base.set_coverage_flag(bool(getattr(config.option, - "cov_source", False))) - - plugin_base.set_skip_test(pytest.skip.Exception) - - -def pytest_sessionstart(session): - plugin_base.post_begin() - -if has_xdist: - _follower_count = itertools.count(1) - - def pytest_configure_node(node): - # the master for each node fills slaveinput dictionary - # which pytest-xdist will transfer to the subprocess - - plugin_base.memoize_important_follower_config(node.slaveinput) - - node.slaveinput["follower_ident"] = "test_%s" % next(_follower_count) - from sqlalchemy.testing import provision - provision.create_follower_db(node.slaveinput["follower_ident"]) - - def pytest_testnodedown(node, error): - from sqlalchemy.testing import provision - provision.drop_follower_db(node.slaveinput["follower_ident"]) - - -def pytest_collection_modifyitems(session, config, items): - # look for all those classes that specify __backend__ and - # expand them out into per-database test cases. - - # this is much easier to do within pytest_pycollect_makeitem, however - # pytest is iterating through cls.__dict__ as makeitem is - # called which causes a "dictionary changed size" error on py3k. - # I'd submit a pullreq for them to turn it into a list first, but - # it's to suit the rather odd use case here which is that we are adding - # new classes to a module on the fly. - - rebuilt_items = collections.defaultdict(list) - items[:] = [ - item for item in - items if isinstance(item.parent, pytest.Instance) - and not item.parent.parent.name.startswith("_")] - test_classes = set(item.parent for item in items) - for test_class in test_classes: - for sub_cls in plugin_base.generate_sub_tests( - test_class.cls, test_class.parent.module): - if sub_cls is not test_class.cls: - list_ = rebuilt_items[test_class.cls] - - for inst in pytest.Class( - sub_cls.__name__, - parent=test_class.parent.parent).collect(): - list_.extend(inst.collect()) - - newitems = [] - for item in items: - if item.parent.cls in rebuilt_items: - newitems.extend(rebuilt_items[item.parent.cls]) - rebuilt_items[item.parent.cls][:] = [] - else: - newitems.append(item) - - # seems like the functions attached to a test class aren't sorted already? - # is that true and why's that? (when using unittest, they're sorted) - items[:] = sorted(newitems, key=lambda item: ( - item.parent.parent.parent.name, - item.parent.parent.name, - item.name - )) - - -def pytest_pycollect_makeitem(collector, name, obj): - if inspect.isclass(obj) and plugin_base.want_class(obj): - return pytest.Class(name, parent=collector) - elif inspect.isfunction(obj) and \ - isinstance(collector, pytest.Instance) and \ - plugin_base.want_method(collector.cls, obj): - return pytest.Function(name, parent=collector) - else: - return [] - -_current_class = None - - -def pytest_runtest_setup(item): - # here we seem to get called only based on what we collected - # in pytest_collection_modifyitems. So to do class-based stuff - # we have to tear that out. - global _current_class - - if not isinstance(item, pytest.Function): - return - - # ... so we're doing a little dance here to figure it out... - if _current_class is None: - class_setup(item.parent.parent) - _current_class = item.parent.parent - - # this is needed for the class-level, to ensure that the - # teardown runs after the class is completed with its own - # class-level teardown... - def finalize(): - global _current_class - class_teardown(item.parent.parent) - _current_class = None - item.parent.parent.addfinalizer(finalize) - - test_setup(item) - - -def pytest_runtest_teardown(item): - # ...but this works better as the hook here rather than - # using a finalizer, as the finalizer seems to get in the way - # of the test reporting failures correctly (you get a bunch of - # py.test assertion stuff instead) - test_teardown(item) - - -def test_setup(item): - plugin_base.before_test(item, item.parent.module.__name__, - item.parent.cls, item.name) - - -def test_teardown(item): - plugin_base.after_test(item) - - -def class_setup(item): - plugin_base.start_test_class(item.cls) - - -def class_teardown(item): - plugin_base.stop_test_class(item.cls) diff --git a/python/sqlalchemy/testing/profiling.py b/python/sqlalchemy/testing/profiling.py deleted file mode 100644 index 35773565..00000000 --- a/python/sqlalchemy/testing/profiling.py +++ /dev/null @@ -1,260 +0,0 @@ -# testing/profiling.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Profiling support for unit and performance tests. - -These are special purpose profiling methods which operate -in a more fine-grained way than nose's profiling plugin. - -""" - -import os -import sys -from .util import gc_collect -from . import config -import pstats -import collections -import contextlib - -try: - import cProfile -except ImportError: - cProfile = None -from ..util import jython, pypy, win32, update_wrapper - -_current_test = None - -# ProfileStatsFile instance, set up in plugin_base -_profile_stats = None - - -class ProfileStatsFile(object): - """"Store per-platform/fn profiling results in a file. - - We're still targeting Py2.5, 2.4 on 0.7 with no dependencies, - so no json lib :( need to roll something silly - - """ - - def __init__(self, filename): - self.force_write = ( - config.options is not None and - config.options.force_write_profiles - ) - self.write = self.force_write or ( - config.options is not None and - config.options.write_profiles - ) - self.fname = os.path.abspath(filename) - self.short_fname = os.path.split(self.fname)[-1] - self.data = collections.defaultdict( - lambda: collections.defaultdict(dict)) - self._read() - if self.write: - # rewrite for the case where features changed, - # etc. - self._write() - - @property - def platform_key(self): - - dbapi_key = config.db.name + "_" + config.db.driver - - # keep it at 2.7, 3.1, 3.2, etc. for now. - py_version = '.'.join([str(v) for v in sys.version_info[0:2]]) - - platform_tokens = [py_version] - platform_tokens.append(dbapi_key) - if jython: - platform_tokens.append("jython") - if pypy: - platform_tokens.append("pypy") - if win32: - platform_tokens.append("win") - _has_cext = config.requirements._has_cextensions() - platform_tokens.append(_has_cext and "cextensions" or "nocextensions") - return "_".join(platform_tokens) - - def has_stats(self): - test_key = _current_test - return ( - test_key in self.data and - self.platform_key in self.data[test_key] - ) - - def result(self, callcount): - test_key = _current_test - per_fn = self.data[test_key] - per_platform = per_fn[self.platform_key] - - if 'counts' not in per_platform: - per_platform['counts'] = counts = [] - else: - counts = per_platform['counts'] - - if 'current_count' not in per_platform: - per_platform['current_count'] = current_count = 0 - else: - current_count = per_platform['current_count'] - - has_count = len(counts) > current_count - - if not has_count: - counts.append(callcount) - if self.write: - self._write() - result = None - else: - result = per_platform['lineno'], counts[current_count] - per_platform['current_count'] += 1 - return result - - def replace(self, callcount): - test_key = _current_test - per_fn = self.data[test_key] - per_platform = per_fn[self.platform_key] - counts = per_platform['counts'] - current_count = per_platform['current_count'] - if current_count < len(counts): - counts[current_count - 1] = callcount - else: - counts[-1] = callcount - if self.write: - self._write() - - def _header(self): - return ( - "# %s\n" - "# This file is written out on a per-environment basis.\n" - "# For each test in aaa_profiling, the corresponding " - "function and \n" - "# environment is located within this file. " - "If it doesn't exist,\n" - "# the test is skipped.\n" - "# If a callcount does exist, it is compared " - "to what we received. \n" - "# assertions are raised if the counts do not match.\n" - "# \n" - "# To add a new callcount test, apply the function_call_count \n" - "# decorator and re-run the tests using the --write-profiles \n" - "# option - this file will be rewritten including the new count.\n" - "# \n" - ) % (self.fname) - - def _read(self): - try: - profile_f = open(self.fname) - except IOError: - return - for lineno, line in enumerate(profile_f): - line = line.strip() - if not line or line.startswith("#"): - continue - - test_key, platform_key, counts = line.split() - per_fn = self.data[test_key] - per_platform = per_fn[platform_key] - c = [int(count) for count in counts.split(",")] - per_platform['counts'] = c - per_platform['lineno'] = lineno + 1 - per_platform['current_count'] = 0 - profile_f.close() - - def _write(self): - print(("Writing profile file %s" % self.fname)) - profile_f = open(self.fname, "w") - profile_f.write(self._header()) - for test_key in sorted(self.data): - - per_fn = self.data[test_key] - profile_f.write("\n# TEST: %s\n\n" % test_key) - for platform_key in sorted(per_fn): - per_platform = per_fn[platform_key] - c = ",".join(str(count) for count in per_platform['counts']) - profile_f.write("%s %s %s\n" % (test_key, platform_key, c)) - profile_f.close() - - -def function_call_count(variance=0.05): - """Assert a target for a test case's function call count. - - The main purpose of this assertion is to detect changes in - callcounts for various functions - the actual number is not as important. - Callcounts are stored in a file keyed to Python version and OS platform - information. This file is generated automatically for new tests, - and versioned so that unexpected changes in callcounts will be detected. - - """ - - def decorate(fn): - def wrap(*args, **kw): - with count_functions(variance=variance): - return fn(*args, **kw) - return update_wrapper(wrap, fn) - return decorate - - -@contextlib.contextmanager -def count_functions(variance=0.05): - if cProfile is None: - raise SkipTest("cProfile is not installed") - - if not _profile_stats.has_stats() and not _profile_stats.write: - config.skip_test( - "No profiling stats available on this " - "platform for this function. Run tests with " - "--write-profiles to add statistics to %s for " - "this platform." % _profile_stats.short_fname) - - gc_collect() - - pr = cProfile.Profile() - pr.enable() - #began = time.time() - yield - #ended = time.time() - pr.disable() - - #s = compat.StringIO() - stats = pstats.Stats(pr, stream=sys.stdout) - - #timespent = ended - began - callcount = stats.total_calls - - expected = _profile_stats.result(callcount) - - if expected is None: - expected_count = None - else: - line_no, expected_count = expected - - print(("Pstats calls: %d Expected %s" % ( - callcount, - expected_count - ) - )) - stats.sort_stats("cumulative") - stats.print_stats() - - if expected_count: - deviance = int(callcount * variance) - failed = abs(callcount - expected_count) > deviance - - if failed or _profile_stats.force_write: - if _profile_stats.write: - _profile_stats.replace(callcount) - else: - raise AssertionError( - "Adjusted function call count %s not within %s%% " - "of expected %s, platform %s. Rerun with " - "--write-profiles to " - "regenerate this callcount." - % ( - callcount, (variance * 100), - expected_count, _profile_stats.platform_key)) - - diff --git a/python/sqlalchemy/testing/provision.py b/python/sqlalchemy/testing/provision.py deleted file mode 100644 index 77527571..00000000 --- a/python/sqlalchemy/testing/provision.py +++ /dev/null @@ -1,201 +0,0 @@ -from sqlalchemy.engine import url as sa_url -from sqlalchemy import text -from sqlalchemy.util import compat -from . import config, engines - - -FOLLOWER_IDENT = None - - -class register(object): - def __init__(self): - self.fns = {} - - @classmethod - def init(cls, fn): - return register().for_db("*")(fn) - - def for_db(self, dbname): - def decorate(fn): - self.fns[dbname] = fn - return self - return decorate - - def __call__(self, cfg, *arg): - if isinstance(cfg, compat.string_types): - url = sa_url.make_url(cfg) - elif isinstance(cfg, sa_url.URL): - url = cfg - else: - url = cfg.db.url - backend = url.get_backend_name() - if backend in self.fns: - return self.fns[backend](cfg, *arg) - else: - return self.fns['*'](cfg, *arg) - - -def create_follower_db(follower_ident): - - for cfg in _configs_for_db_operation(): - _create_db(cfg, cfg.db, follower_ident) - - -def configure_follower(follower_ident): - for cfg in config.Config.all_configs(): - _configure_follower(cfg, follower_ident) - - -def setup_config(db_url, options, file_config, follower_ident): - if follower_ident: - db_url = _follower_url_from_main(db_url, follower_ident) - db_opts = {} - _update_db_opts(db_url, db_opts) - eng = engines.testing_engine(db_url, db_opts) - eng.connect().close() - cfg = config.Config.register(eng, db_opts, options, file_config) - if follower_ident: - _configure_follower(cfg, follower_ident) - return cfg - - -def drop_follower_db(follower_ident): - for cfg in _configs_for_db_operation(): - _drop_db(cfg, cfg.db, follower_ident) - - -def _configs_for_db_operation(): - hosts = set() - - for cfg in config.Config.all_configs(): - cfg.db.dispose() - - for cfg in config.Config.all_configs(): - url = cfg.db.url - backend = url.get_backend_name() - host_conf = ( - backend, - url.username, url.host, url.database) - - if host_conf not in hosts: - yield cfg - hosts.add(host_conf) - - for cfg in config.Config.all_configs(): - cfg.db.dispose() - - -@register.init -def _create_db(cfg, eng, ident): - raise NotImplementedError("no DB creation routine for cfg: %s" % eng.url) - - -@register.init -def _drop_db(cfg, eng, ident): - raise NotImplementedError("no DB drop routine for cfg: %s" % eng.url) - - -@register.init -def _update_db_opts(db_url, db_opts): - pass - - -@register.init -def _configure_follower(cfg, ident): - pass - - -@register.init -def _follower_url_from_main(url, ident): - url = sa_url.make_url(url) - url.database = ident - return url - - -@_update_db_opts.for_db("mssql") -def _mssql_update_db_opts(db_url, db_opts): - db_opts['legacy_schema_aliasing'] = False - - -@_follower_url_from_main.for_db("sqlite") -def _sqlite_follower_url_from_main(url, ident): - url = sa_url.make_url(url) - if not url.database or url.database == ':memory:': - return url - else: - return sa_url.make_url("sqlite:///%s.db" % ident) - - -@_create_db.for_db("postgresql") -def _pg_create_db(cfg, eng, ident): - with eng.connect().execution_options( - isolation_level="AUTOCOMMIT") as conn: - try: - _pg_drop_db(cfg, conn, ident) - except Exception: - pass - currentdb = conn.scalar("select current_database()") - conn.execute("CREATE DATABASE %s TEMPLATE %s" % (ident, currentdb)) - - -@_create_db.for_db("mysql") -def _mysql_create_db(cfg, eng, ident): - with eng.connect() as conn: - try: - _mysql_drop_db(cfg, conn, ident) - except Exception: - pass - conn.execute("CREATE DATABASE %s" % ident) - conn.execute("CREATE DATABASE %s_test_schema" % ident) - conn.execute("CREATE DATABASE %s_test_schema_2" % ident) - - -@_configure_follower.for_db("mysql") -def _mysql_configure_follower(config, ident): - config.test_schema = "%s_test_schema" % ident - config.test_schema_2 = "%s_test_schema_2" % ident - - -@_create_db.for_db("sqlite") -def _sqlite_create_db(cfg, eng, ident): - pass - - -@_drop_db.for_db("postgresql") -def _pg_drop_db(cfg, eng, ident): - with eng.connect().execution_options( - isolation_level="AUTOCOMMIT") as conn: - conn.execute( - text( - "select pg_terminate_backend(pid) from pg_stat_activity " - "where usename=current_user and pid != pg_backend_pid() " - "and datname=:dname" - ), dname=ident) - conn.execute("DROP DATABASE %s" % ident) - - -@_drop_db.for_db("sqlite") -def _sqlite_drop_db(cfg, eng, ident): - pass - #os.remove("%s.db" % ident) - - -@_drop_db.for_db("mysql") -def _mysql_drop_db(cfg, eng, ident): - with eng.connect() as conn: - try: - conn.execute("DROP DATABASE %s_test_schema" % ident) - except Exception: - pass - try: - conn.execute("DROP DATABASE %s_test_schema_2" % ident) - except Exception: - pass - try: - conn.execute("DROP DATABASE %s" % ident) - except Exception: - pass - - - - diff --git a/python/sqlalchemy/testing/replay_fixture.py b/python/sqlalchemy/testing/replay_fixture.py deleted file mode 100644 index b50f52e3..00000000 --- a/python/sqlalchemy/testing/replay_fixture.py +++ /dev/null @@ -1,172 +0,0 @@ -from . import fixtures -from . import profiling -from .. import util -import types -from collections import deque -import contextlib -from . import config -from sqlalchemy import MetaData -from sqlalchemy import create_engine -from sqlalchemy.orm import Session - - -class ReplayFixtureTest(fixtures.TestBase): - - @contextlib.contextmanager - def _dummy_ctx(self, *arg, **kw): - yield - - def test_invocation(self): - - dbapi_session = ReplayableSession() - creator = config.db.pool._creator - recorder = lambda: dbapi_session.recorder(creator()) - engine = create_engine( - config.db.url, creator=recorder, - use_native_hstore=False) - self.metadata = MetaData(engine) - self.engine = engine - self.session = Session(engine) - - self.setup_engine() - try: - self._run_steps(ctx=self._dummy_ctx) - finally: - self.teardown_engine() - engine.dispose() - - player = lambda: dbapi_session.player() - engine = create_engine( - config.db.url, creator=player, - use_native_hstore=False) - - self.metadata = MetaData(engine) - self.engine = engine - self.session = Session(engine) - - self.setup_engine() - try: - self._run_steps(ctx=profiling.count_functions) - finally: - self.session.close() - engine.dispose() - - def setup_engine(self): - pass - - def teardown_engine(self): - pass - - def _run_steps(self, ctx): - raise NotImplementedError() - - -class ReplayableSession(object): - """A simple record/playback tool. - - This is *not* a mock testing class. It only records a session for later - playback and makes no assertions on call consistency whatsoever. It's - unlikely to be suitable for anything other than DB-API recording. - - """ - - Callable = object() - NoAttribute = object() - - if util.py2k: - Natives = set([getattr(types, t) - for t in dir(types) if not t.startswith('_')]).\ - difference([getattr(types, t) - for t in ('FunctionType', 'BuiltinFunctionType', - 'MethodType', 'BuiltinMethodType', - 'LambdaType', 'UnboundMethodType',)]) - else: - Natives = set([getattr(types, t) - for t in dir(types) if not t.startswith('_')]).\ - union([type(t) if not isinstance(t, type) - else t for t in __builtins__.values()]).\ - difference([getattr(types, t) - for t in ('FunctionType', 'BuiltinFunctionType', - 'MethodType', 'BuiltinMethodType', - 'LambdaType', )]) - - def __init__(self): - self.buffer = deque() - - def recorder(self, base): - return self.Recorder(self.buffer, base) - - def player(self): - return self.Player(self.buffer) - - class Recorder(object): - def __init__(self, buffer, subject): - self._buffer = buffer - self._subject = subject - - def __call__(self, *args, **kw): - subject, buffer = [object.__getattribute__(self, x) - for x in ('_subject', '_buffer')] - - result = subject(*args, **kw) - if type(result) not in ReplayableSession.Natives: - buffer.append(ReplayableSession.Callable) - return type(self)(buffer, result) - else: - buffer.append(result) - return result - - @property - def _sqla_unwrap(self): - return self._subject - - def __getattribute__(self, key): - try: - return object.__getattribute__(self, key) - except AttributeError: - pass - - subject, buffer = [object.__getattribute__(self, x) - for x in ('_subject', '_buffer')] - try: - result = type(subject).__getattribute__(subject, key) - except AttributeError: - buffer.append(ReplayableSession.NoAttribute) - raise - else: - if type(result) not in ReplayableSession.Natives: - buffer.append(ReplayableSession.Callable) - return type(self)(buffer, result) - else: - buffer.append(result) - return result - - class Player(object): - def __init__(self, buffer): - self._buffer = buffer - - def __call__(self, *args, **kw): - buffer = object.__getattribute__(self, '_buffer') - result = buffer.popleft() - if result is ReplayableSession.Callable: - return self - else: - return result - - @property - def _sqla_unwrap(self): - return None - - def __getattribute__(self, key): - try: - return object.__getattribute__(self, key) - except AttributeError: - pass - buffer = object.__getattribute__(self, '_buffer') - result = buffer.popleft() - if result is ReplayableSession.Callable: - return self - elif result is ReplayableSession.NoAttribute: - raise AttributeError(key) - else: - return result diff --git a/python/sqlalchemy/testing/requirements.py b/python/sqlalchemy/testing/requirements.py deleted file mode 100644 index e8b3a995..00000000 --- a/python/sqlalchemy/testing/requirements.py +++ /dev/null @@ -1,709 +0,0 @@ -# testing/requirements.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Global database feature support policy. - -Provides decorators to mark tests requiring specific feature support from the -target database. - -External dialect test suites should subclass SuiteRequirements -to provide specific inclusion/exclusions. - -""" - -from . import exclusions -from .. import util - - -class Requirements(object): - pass - - -class SuiteRequirements(Requirements): - - @property - def create_table(self): - """target platform can emit basic CreateTable DDL.""" - - return exclusions.open() - - @property - def drop_table(self): - """target platform can emit basic DropTable DDL.""" - - return exclusions.open() - - @property - def foreign_keys(self): - """Target database must support foreign keys.""" - - return exclusions.open() - - @property - def on_update_cascade(self): - """"target database must support ON UPDATE..CASCADE behavior in - foreign keys.""" - - return exclusions.open() - - @property - def non_updating_cascade(self): - """target database must *not* support ON UPDATE..CASCADE behavior in - foreign keys.""" - return exclusions.closed() - - @property - def deferrable_fks(self): - return exclusions.closed() - - @property - def on_update_or_deferrable_fks(self): - # TODO: exclusions should be composable, - # somehow only_if([x, y]) isn't working here, negation/conjunctions - # getting confused. - return exclusions.only_if( - lambda: self.on_update_cascade.enabled or - self.deferrable_fks.enabled - ) - - @property - def self_referential_foreign_keys(self): - """Target database must support self-referential foreign keys.""" - - return exclusions.open() - - @property - def foreign_key_ddl(self): - """Target database must support the DDL phrases for FOREIGN KEY.""" - - return exclusions.open() - - @property - def named_constraints(self): - """target database must support names for constraints.""" - - return exclusions.open() - - @property - def subqueries(self): - """Target database must support subqueries.""" - - return exclusions.open() - - @property - def offset(self): - """target database can render OFFSET, or an equivalent, in a - SELECT. - """ - - return exclusions.open() - - @property - def bound_limit_offset(self): - """target database can render LIMIT and/or OFFSET using a bound - parameter - """ - - return exclusions.open() - - @property - def boolean_col_expressions(self): - """Target database must support boolean expressions as columns""" - - return exclusions.closed() - - @property - def nullsordering(self): - """Target backends that support nulls ordering.""" - - return exclusions.closed() - - @property - def standalone_binds(self): - """target database/driver supports bound parameters as column expressions - without being in the context of a typed column. - - """ - return exclusions.closed() - - @property - def intersect(self): - """Target database must support INTERSECT or equivalent.""" - return exclusions.closed() - - @property - def except_(self): - """Target database must support EXCEPT or equivalent (i.e. MINUS).""" - return exclusions.closed() - - @property - def window_functions(self): - """Target database must support window functions.""" - return exclusions.closed() - - @property - def autoincrement_insert(self): - """target platform generates new surrogate integer primary key values - when insert() is executed, excluding the pk column.""" - - return exclusions.open() - - @property - def fetch_rows_post_commit(self): - """target platform will allow cursor.fetchone() to proceed after a - COMMIT. - - Typically this refers to an INSERT statement with RETURNING which - is invoked within "autocommit". If the row can be returned - after the autocommit, then this rule can be open. - - """ - - return exclusions.open() - - @property - def empty_inserts(self): - """target platform supports INSERT with no values, i.e. - INSERT DEFAULT VALUES or equivalent.""" - - return exclusions.only_if( - lambda config: config.db.dialect.supports_empty_insert or - config.db.dialect.supports_default_values, - "empty inserts not supported" - ) - - @property - def insert_from_select(self): - """target platform supports INSERT from a SELECT.""" - - return exclusions.open() - - @property - def returning(self): - """target platform supports RETURNING.""" - - return exclusions.only_if( - lambda config: config.db.dialect.implicit_returning, - "%(database)s %(does_support)s 'returning'" - ) - - @property - def duplicate_names_in_cursor_description(self): - """target platform supports a SELECT statement that has - the same name repeated more than once in the columns list.""" - - return exclusions.open() - - @property - def denormalized_names(self): - """Target database must have 'denormalized', i.e. - UPPERCASE as case insensitive names.""" - - return exclusions.skip_if( - lambda config: not config.db.dialect.requires_name_normalize, - "Backend does not require denormalized names." - ) - - @property - def multivalues_inserts(self): - """target database must support multiple VALUES clauses in an - INSERT statement.""" - - return exclusions.skip_if( - lambda config: not config.db.dialect.supports_multivalues_insert, - "Backend does not support multirow inserts." - ) - - @property - def implements_get_lastrowid(self): - """"target dialect implements the executioncontext.get_lastrowid() - method without reliance on RETURNING. - - """ - return exclusions.open() - - @property - def emulated_lastrowid(self): - """"target dialect retrieves cursor.lastrowid, or fetches - from a database-side function after an insert() construct executes, - within the get_lastrowid() method. - - Only dialects that "pre-execute", or need RETURNING to get last - inserted id, would return closed/fail/skip for this. - - """ - return exclusions.closed() - - @property - def dbapi_lastrowid(self): - """"target platform includes a 'lastrowid' accessor on the DBAPI - cursor object. - - """ - return exclusions.closed() - - @property - def views(self): - """Target database must support VIEWs.""" - - return exclusions.closed() - - @property - def schemas(self): - """Target database must support external schemas, and have one - named 'test_schema'.""" - - return exclusions.closed() - - @property - def sequences(self): - """Target database must support SEQUENCEs.""" - - return exclusions.only_if([ - lambda config: config.db.dialect.supports_sequences - ], "no sequence support") - - @property - def sequences_optional(self): - """Target database supports sequences, but also optionally - as a means of generating new PK values.""" - - return exclusions.only_if([ - lambda config: config.db.dialect.supports_sequences and - config.db.dialect.sequences_optional - ], "no sequence support, or sequences not optional") - - @property - def reflects_pk_names(self): - return exclusions.closed() - - @property - def table_reflection(self): - return exclusions.open() - - @property - def view_column_reflection(self): - """target database must support retrieval of the columns in a view, - similarly to how a table is inspected. - - This does not include the full CREATE VIEW definition. - - """ - return self.views - - @property - def view_reflection(self): - """target database must support inspection of the full CREATE VIEW definition. - """ - return self.views - - @property - def schema_reflection(self): - return self.schemas - - @property - def primary_key_constraint_reflection(self): - return exclusions.open() - - @property - def foreign_key_constraint_reflection(self): - return exclusions.open() - - @property - def temp_table_reflection(self): - return exclusions.open() - - @property - def temp_table_names(self): - """target dialect supports listing of temporary table names""" - return exclusions.closed() - - @property - def temporary_tables(self): - """target database supports temporary tables""" - return exclusions.open() - - @property - def temporary_views(self): - """target database supports temporary views""" - return exclusions.closed() - - @property - def index_reflection(self): - return exclusions.open() - - @property - def unique_constraint_reflection(self): - """target dialect supports reflection of unique constraints""" - return exclusions.open() - - @property - def duplicate_key_raises_integrity_error(self): - """target dialect raises IntegrityError when reporting an INSERT - with a primary key violation. (hint: it should) - - """ - return exclusions.open() - - @property - def unbounded_varchar(self): - """Target database must support VARCHAR with no length""" - - return exclusions.open() - - @property - def unicode_data(self): - """Target database/dialect must support Python unicode objects with - non-ASCII characters represented, delivered as bound parameters - as well as in result rows. - - """ - return exclusions.open() - - @property - def unicode_ddl(self): - """Target driver must support some degree of non-ascii symbol - names. - """ - return exclusions.closed() - - @property - def datetime_literals(self): - """target dialect supports rendering of a date, time, or datetime as a - literal string, e.g. via the TypeEngine.literal_processor() method. - - """ - - return exclusions.closed() - - @property - def datetime(self): - """target dialect supports representation of Python - datetime.datetime() objects.""" - - return exclusions.open() - - @property - def datetime_microseconds(self): - """target dialect supports representation of Python - datetime.datetime() with microsecond objects.""" - - return exclusions.open() - - @property - def datetime_historic(self): - """target dialect supports representation of Python - datetime.datetime() objects with historic (pre 1970) values.""" - - return exclusions.closed() - - @property - def date(self): - """target dialect supports representation of Python - datetime.date() objects.""" - - return exclusions.open() - - @property - def date_coerces_from_datetime(self): - """target dialect accepts a datetime object as the target - of a date column.""" - - return exclusions.open() - - @property - def date_historic(self): - """target dialect supports representation of Python - datetime.datetime() objects with historic (pre 1970) values.""" - - return exclusions.closed() - - @property - def time(self): - """target dialect supports representation of Python - datetime.time() objects.""" - - return exclusions.open() - - @property - def time_microseconds(self): - """target dialect supports representation of Python - datetime.time() with microsecond objects.""" - - return exclusions.open() - - @property - def binary_comparisons(self): - """target database/driver can allow BLOB/BINARY fields to be compared - against a bound parameter value. - """ - - return exclusions.open() - - @property - def binary_literals(self): - """target backend supports simple binary literals, e.g. an - expression like:: - - SELECT CAST('foo' AS BINARY) - - Where ``BINARY`` is the type emitted from :class:`.LargeBinary`, - e.g. it could be ``BLOB`` or similar. - - Basically fails on Oracle. - - """ - - return exclusions.open() - - @property - def precision_numerics_general(self): - """target backend has general support for moderately high-precision - numerics.""" - return exclusions.open() - - @property - def precision_numerics_enotation_small(self): - """target backend supports Decimal() objects using E notation - to represent very small values.""" - return exclusions.closed() - - @property - def precision_numerics_enotation_large(self): - """target backend supports Decimal() objects using E notation - to represent very large values.""" - return exclusions.closed() - - @property - def precision_numerics_many_significant_digits(self): - """target backend supports values with many digits on both sides, - such as 319438950232418390.273596, 87673.594069654243 - - """ - return exclusions.closed() - - @property - def precision_numerics_retains_significant_digits(self): - """A precision numeric type will return empty significant digits, - i.e. a value such as 10.000 will come back in Decimal form with - the .000 maintained.""" - - return exclusions.closed() - - @property - def precision_generic_float_type(self): - """target backend will return native floating point numbers with at - least seven decimal places when using the generic Float type. - - """ - return exclusions.open() - - @property - def floats_to_four_decimals(self): - """target backend can return a floating-point number with four - significant digits (such as 15.7563) accurately - (i.e. without FP inaccuracies, such as 15.75629997253418). - - """ - return exclusions.open() - - @property - def fetch_null_from_numeric(self): - """target backend doesn't crash when you try to select a NUMERIC - value that has a value of NULL. - - Added to support Pyodbc bug #351. - """ - - return exclusions.open() - - @property - def text_type(self): - """Target database must support an unbounded Text() " - "type such as TEXT or CLOB""" - - return exclusions.open() - - @property - def empty_strings_varchar(self): - """target database can persist/return an empty string with a - varchar. - - """ - return exclusions.open() - - @property - def empty_strings_text(self): - """target database can persist/return an empty string with an - unbounded text.""" - - return exclusions.open() - - @property - def selectone(self): - """target driver must support the literal statement 'select 1'""" - return exclusions.open() - - @property - def savepoints(self): - """Target database must support savepoints.""" - - return exclusions.closed() - - @property - def two_phase_transactions(self): - """Target database must support two-phase transactions.""" - - return exclusions.closed() - - @property - def update_from(self): - """Target must support UPDATE..FROM syntax""" - return exclusions.closed() - - @property - def update_where_target_in_subquery(self): - """Target must support UPDATE where the same table is present in a - subquery in the WHERE clause. - - This is an ANSI-standard syntax that apparently MySQL can't handle, - such as: - - UPDATE documents SET flag=1 WHERE documents.title IN - (SELECT max(documents.title) AS title - FROM documents GROUP BY documents.user_id - ) - """ - return exclusions.open() - - @property - def mod_operator_as_percent_sign(self): - """target database must use a plain percent '%' as the 'modulus' - operator.""" - return exclusions.closed() - - @property - def percent_schema_names(self): - """target backend supports weird identifiers with percent signs - in them, e.g. 'some % column'. - - this is a very weird use case but often has problems because of - DBAPIs that use python formatting. It's not a critical use - case either. - - """ - return exclusions.closed() - - @property - def order_by_label_with_expression(self): - """target backend supports ORDER BY a column label within an - expression. - - Basically this:: - - select data as foo from test order by foo || 'bar' - - Lots of databases including Postgresql don't support this, - so this is off by default. - - """ - return exclusions.closed() - - @property - def unicode_connections(self): - """Target driver must support non-ASCII characters being passed at - all. - """ - return exclusions.open() - - @property - def graceful_disconnects(self): - """Target driver must raise a DBAPI-level exception, such as - InterfaceError, when the underlying connection has been closed - and the execute() method is called. - """ - return exclusions.open() - - @property - def skip_mysql_on_windows(self): - """Catchall for a large variety of MySQL on Windows failures""" - return exclusions.open() - - @property - def ad_hoc_engines(self): - """Test environment must allow ad-hoc engine/connection creation. - - DBs that scale poorly for many connections, even when closed, i.e. - Oracle, may use the "--low-connections" option which flags this - requirement as not present. - - """ - return exclusions.skip_if( - lambda config: config.options.low_connections) - - @property - def timing_intensive(self): - return exclusions.requires_tag("timing_intensive") - - @property - def memory_intensive(self): - return exclusions.requires_tag("memory_intensive") - - @property - def threading_with_mock(self): - """Mark tests that use threading and mock at the same time - stability - issues have been observed with coverage + python 3.3 - - """ - return exclusions.skip_if( - lambda config: util.py3k and config.options.has_coverage, - "Stability issues with coverage + py3k" - ) - - @property - def no_coverage(self): - """Test should be skipped if coverage is enabled. - - This is to block tests that exercise libraries that seem to be - sensitive to coverage, such as Postgresql notice logging. - - """ - return exclusions.skip_if( - lambda config: config.options.has_coverage, - "Issues observed when coverage is enabled" - ) - - def _has_mysql_on_windows(self, config): - return False - - def _has_mysql_fully_case_sensitive(self, config): - return False - - @property - def sqlite(self): - return exclusions.skip_if(lambda: not self._has_sqlite()) - - @property - def cextensions(self): - return exclusions.skip_if( - lambda: not self._has_cextensions(), "C extensions not installed" - ) - - def _has_sqlite(self): - from sqlalchemy import create_engine - try: - create_engine('sqlite://') - return True - except ImportError: - return False - - def _has_cextensions(self): - try: - from sqlalchemy import cresultproxy, cprocessors - return True - except ImportError: - return False diff --git a/python/sqlalchemy/testing/runner.py b/python/sqlalchemy/testing/runner.py deleted file mode 100644 index 92a03061..00000000 --- a/python/sqlalchemy/testing/runner.py +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env python -# testing/runner.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php -""" -Nose test runner module. - -This script is a front-end to "nosetests" which -installs SQLAlchemy's testing plugin into the local environment. - -The script is intended to be used by third-party dialects and extensions -that run within SQLAlchemy's testing framework. The runner can -be invoked via:: - - python -m sqlalchemy.testing.runner - -The script is then essentially the same as the "nosetests" script, including -all of the usual Nose options. The test environment requires that a -setup.cfg is locally present including various required options. - -Note that when using this runner, Nose's "coverage" plugin will not be -able to provide coverage for SQLAlchemy itself, since SQLAlchemy is -imported into sys.modules before coverage is started. The special -script sqla_nose.py is provided as a top-level script which loads the -plugin in a special (somewhat hacky) way so that coverage against -SQLAlchemy itself is possible. - -""" - -from .plugin.noseplugin import NoseSQLAlchemy - -import nose - - -def main(): - nose.main(addplugins=[NoseSQLAlchemy()]) - - -def setup_py_test(): - """Runner to use for the 'test_suite' entry of your setup.py. - - Prevents any name clash shenanigans from the command line - argument "test" that the "setup.py test" command sends - to nose. - - """ - nose.main(addplugins=[NoseSQLAlchemy()], argv=['runner']) diff --git a/python/sqlalchemy/testing/schema.py b/python/sqlalchemy/testing/schema.py deleted file mode 100644 index 93b52ad5..00000000 --- a/python/sqlalchemy/testing/schema.py +++ /dev/null @@ -1,98 +0,0 @@ -# testing/schema.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -from . import exclusions -from .. import schema, event -from . import config - -__all__ = 'Table', 'Column', - -table_options = {} - - -def Table(*args, **kw): - """A schema.Table wrapper/hook for dialect-specific tweaks.""" - - test_opts = dict([(k, kw.pop(k)) for k in list(kw) - if k.startswith('test_')]) - - kw.update(table_options) - - if exclusions.against(config._current, 'mysql'): - if 'mysql_engine' not in kw and 'mysql_type' not in kw: - if 'test_needs_fk' in test_opts or 'test_needs_acid' in test_opts: - kw['mysql_engine'] = 'InnoDB' - else: - kw['mysql_engine'] = 'MyISAM' - - # Apply some default cascading rules for self-referential foreign keys. - # MySQL InnoDB has some issues around seleting self-refs too. - if exclusions.against(config._current, 'firebird'): - table_name = args[0] - unpack = (config.db.dialect. - identifier_preparer.unformat_identifiers) - - # Only going after ForeignKeys in Columns. May need to - # expand to ForeignKeyConstraint too. - fks = [fk - for col in args if isinstance(col, schema.Column) - for fk in col.foreign_keys] - - for fk in fks: - # root around in raw spec - ref = fk._colspec - if isinstance(ref, schema.Column): - name = ref.table.name - else: - # take just the table name: on FB there cannot be - # a schema, so the first element is always the - # table name, possibly followed by the field name - name = unpack(ref)[0] - if name == table_name: - if fk.ondelete is None: - fk.ondelete = 'CASCADE' - if fk.onupdate is None: - fk.onupdate = 'CASCADE' - - return schema.Table(*args, **kw) - - -def Column(*args, **kw): - """A schema.Column wrapper/hook for dialect-specific tweaks.""" - - test_opts = dict([(k, kw.pop(k)) for k in list(kw) - if k.startswith('test_')]) - - if not config.requirements.foreign_key_ddl.enabled_for_config(config): - args = [arg for arg in args if not isinstance(arg, schema.ForeignKey)] - - col = schema.Column(*args, **kw) - if 'test_needs_autoincrement' in test_opts and \ - kw.get('primary_key', False): - - # allow any test suite to pick up on this - col.info['test_needs_autoincrement'] = True - - # hardcoded rule for firebird, oracle; this should - # be moved out - if exclusions.against(config._current, 'firebird', 'oracle'): - def add_seq(c, tbl): - c._init_items( - schema.Sequence(_truncate_name( - config.db.dialect, tbl.name + '_' + c.name + '_seq'), - optional=True) - ) - event.listen(col, 'after_parent_attach', add_seq, propagate=True) - return col - - -def _truncate_name(dialect, name): - if len(name) > dialect.max_identifier_length: - return name[0:max(dialect.max_identifier_length - 6, 0)] + \ - "_" + hex(hash(name) % 64)[2:] - else: - return name diff --git a/python/sqlalchemy/testing/suite/__init__.py b/python/sqlalchemy/testing/suite/__init__.py deleted file mode 100644 index 9eeffd4c..00000000 --- a/python/sqlalchemy/testing/suite/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ - -from sqlalchemy.testing.suite.test_dialect import * -from sqlalchemy.testing.suite.test_ddl import * -from sqlalchemy.testing.suite.test_insert import * -from sqlalchemy.testing.suite.test_sequence import * -from sqlalchemy.testing.suite.test_select import * -from sqlalchemy.testing.suite.test_results import * -from sqlalchemy.testing.suite.test_update_delete import * -from sqlalchemy.testing.suite.test_reflection import * -from sqlalchemy.testing.suite.test_types import * diff --git a/python/sqlalchemy/testing/suite/test_ddl.py b/python/sqlalchemy/testing/suite/test_ddl.py deleted file mode 100644 index 1d8010c8..00000000 --- a/python/sqlalchemy/testing/suite/test_ddl.py +++ /dev/null @@ -1,65 +0,0 @@ - - -from .. import fixtures, config, util -from ..config import requirements -from ..assertions import eq_ - -from sqlalchemy import Table, Column, Integer, String - - -class TableDDLTest(fixtures.TestBase): - __backend__ = True - - def _simple_fixture(self): - return Table('test_table', self.metadata, - Column('id', Integer, primary_key=True, - autoincrement=False), - Column('data', String(50)) - ) - - def _underscore_fixture(self): - return Table('_test_table', self.metadata, - Column('id', Integer, primary_key=True, - autoincrement=False), - Column('_data', String(50)) - ) - - def _simple_roundtrip(self, table): - with config.db.begin() as conn: - conn.execute(table.insert().values((1, 'some data'))) - result = conn.execute(table.select()) - eq_( - result.first(), - (1, 'some data') - ) - - @requirements.create_table - @util.provide_metadata - def test_create_table(self): - table = self._simple_fixture() - table.create( - config.db, checkfirst=False - ) - self._simple_roundtrip(table) - - @requirements.drop_table - @util.provide_metadata - def test_drop_table(self): - table = self._simple_fixture() - table.create( - config.db, checkfirst=False - ) - table.drop( - config.db, checkfirst=False - ) - - @requirements.create_table - @util.provide_metadata - def test_underscore_names(self): - table = self._underscore_fixture() - table.create( - config.db, checkfirst=False - ) - self._simple_roundtrip(table) - -__all__ = ('TableDDLTest', ) diff --git a/python/sqlalchemy/testing/suite/test_dialect.py b/python/sqlalchemy/testing/suite/test_dialect.py deleted file mode 100644 index 00884a21..00000000 --- a/python/sqlalchemy/testing/suite/test_dialect.py +++ /dev/null @@ -1,41 +0,0 @@ -from .. import fixtures, config -from ..config import requirements -from sqlalchemy import exc -from sqlalchemy import Integer, String -from .. import assert_raises -from ..schema import Table, Column - - -class ExceptionTest(fixtures.TablesTest): - """Test basic exception wrapping. - - DBAPIs vary a lot in exception behavior so to actually anticipate - specific exceptions from real round trips, we need to be conservative. - - """ - run_deletes = 'each' - - __backend__ = True - - @classmethod - def define_tables(cls, metadata): - Table('manual_pk', metadata, - Column('id', Integer, primary_key=True, autoincrement=False), - Column('data', String(50)) - ) - - @requirements.duplicate_key_raises_integrity_error - def test_integrity_error(self): - - with config.db.begin() as conn: - conn.execute( - self.tables.manual_pk.insert(), - {'id': 1, 'data': 'd1'} - ) - - assert_raises( - exc.IntegrityError, - conn.execute, - self.tables.manual_pk.insert(), - {'id': 1, 'data': 'd1'} - ) diff --git a/python/sqlalchemy/testing/suite/test_insert.py b/python/sqlalchemy/testing/suite/test_insert.py deleted file mode 100644 index 70e8a6b1..00000000 --- a/python/sqlalchemy/testing/suite/test_insert.py +++ /dev/null @@ -1,269 +0,0 @@ -from .. import fixtures, config -from ..config import requirements -from .. import exclusions -from ..assertions import eq_ -from .. import engines - -from sqlalchemy import Integer, String, select, literal_column, literal - -from ..schema import Table, Column - - -class LastrowidTest(fixtures.TablesTest): - run_deletes = 'each' - - __backend__ = True - - __requires__ = 'implements_get_lastrowid', 'autoincrement_insert' - - __engine_options__ = {"implicit_returning": False} - - @classmethod - def define_tables(cls, metadata): - Table('autoinc_pk', metadata, - Column('id', Integer, primary_key=True, - test_needs_autoincrement=True), - Column('data', String(50)) - ) - - Table('manual_pk', metadata, - Column('id', Integer, primary_key=True, autoincrement=False), - Column('data', String(50)) - ) - - def _assert_round_trip(self, table, conn): - row = conn.execute(table.select()).first() - eq_( - row, - (config.db.dialect.default_sequence_base, "some data") - ) - - def test_autoincrement_on_insert(self): - - config.db.execute( - self.tables.autoinc_pk.insert(), - data="some data" - ) - self._assert_round_trip(self.tables.autoinc_pk, config.db) - - def test_last_inserted_id(self): - - r = config.db.execute( - self.tables.autoinc_pk.insert(), - data="some data" - ) - pk = config.db.scalar(select([self.tables.autoinc_pk.c.id])) - eq_( - r.inserted_primary_key, - [pk] - ) - - # failed on pypy1.9 but seems to be OK on pypy 2.1 - # @exclusions.fails_if(lambda: util.pypy, - # "lastrowid not maintained after " - # "connection close") - @requirements.dbapi_lastrowid - def test_native_lastrowid_autoinc(self): - r = config.db.execute( - self.tables.autoinc_pk.insert(), - data="some data" - ) - lastrowid = r.lastrowid - pk = config.db.scalar(select([self.tables.autoinc_pk.c.id])) - eq_( - lastrowid, pk - ) - - -class InsertBehaviorTest(fixtures.TablesTest): - run_deletes = 'each' - __backend__ = True - - @classmethod - def define_tables(cls, metadata): - Table('autoinc_pk', metadata, - Column('id', Integer, primary_key=True, - test_needs_autoincrement=True), - Column('data', String(50)) - ) - Table('manual_pk', metadata, - Column('id', Integer, primary_key=True, autoincrement=False), - Column('data', String(50)) - ) - Table('includes_defaults', metadata, - Column('id', Integer, primary_key=True, - test_needs_autoincrement=True), - Column('data', String(50)), - Column('x', Integer, default=5), - Column('y', Integer, - default=literal_column("2", type_=Integer) + literal(2))) - - def test_autoclose_on_insert(self): - if requirements.returning.enabled: - engine = engines.testing_engine( - options={'implicit_returning': False}) - else: - engine = config.db - - r = engine.execute( - self.tables.autoinc_pk.insert(), - data="some data" - ) - assert r._soft_closed - assert not r.closed - assert r.is_insert - assert not r.returns_rows - - @requirements.returning - def test_autoclose_on_insert_implicit_returning(self): - r = config.db.execute( - self.tables.autoinc_pk.insert(), - data="some data" - ) - assert r._soft_closed - assert not r.closed - assert r.is_insert - assert not r.returns_rows - - @requirements.empty_inserts - def test_empty_insert(self): - r = config.db.execute( - self.tables.autoinc_pk.insert(), - ) - assert r._soft_closed - assert not r.closed - - r = config.db.execute( - self.tables.autoinc_pk.select(). - where(self.tables.autoinc_pk.c.id != None) - ) - - assert len(r.fetchall()) - - @requirements.insert_from_select - def test_insert_from_select(self): - table = self.tables.manual_pk - config.db.execute( - table.insert(), - [ - dict(id=1, data="data1"), - dict(id=2, data="data2"), - dict(id=3, data="data3"), - ] - ) - - config.db.execute( - table.insert(inline=True). - from_select(("id", "data",), - select([table.c.id + 5, table.c.data]). - where(table.c.data.in_(["data2", "data3"])) - ), - ) - - eq_( - config.db.execute( - select([table.c.data]).order_by(table.c.data) - ).fetchall(), - [("data1", ), ("data2", ), ("data2", ), - ("data3", ), ("data3", )] - ) - - @requirements.insert_from_select - def test_insert_from_select_with_defaults(self): - table = self.tables.includes_defaults - config.db.execute( - table.insert(), - [ - dict(id=1, data="data1"), - dict(id=2, data="data2"), - dict(id=3, data="data3"), - ] - ) - - config.db.execute( - table.insert(inline=True). - from_select(("id", "data",), - select([table.c.id + 5, table.c.data]). - where(table.c.data.in_(["data2", "data3"])) - ), - ) - - eq_( - config.db.execute( - select([table]).order_by(table.c.data, table.c.id) - ).fetchall(), - [(1, 'data1', 5, 4), (2, 'data2', 5, 4), - (7, 'data2', 5, 4), (3, 'data3', 5, 4), (8, 'data3', 5, 4)] - ) - - -class ReturningTest(fixtures.TablesTest): - run_create_tables = 'each' - __requires__ = 'returning', 'autoincrement_insert' - __backend__ = True - - __engine_options__ = {"implicit_returning": True} - - def _assert_round_trip(self, table, conn): - row = conn.execute(table.select()).first() - eq_( - row, - (config.db.dialect.default_sequence_base, "some data") - ) - - @classmethod - def define_tables(cls, metadata): - Table('autoinc_pk', metadata, - Column('id', Integer, primary_key=True, - test_needs_autoincrement=True), - Column('data', String(50)) - ) - - @requirements.fetch_rows_post_commit - def test_explicit_returning_pk_autocommit(self): - engine = config.db - table = self.tables.autoinc_pk - r = engine.execute( - table.insert().returning( - table.c.id), - data="some data" - ) - pk = r.first()[0] - fetched_pk = config.db.scalar(select([table.c.id])) - eq_(fetched_pk, pk) - - def test_explicit_returning_pk_no_autocommit(self): - engine = config.db - table = self.tables.autoinc_pk - with engine.begin() as conn: - r = conn.execute( - table.insert().returning( - table.c.id), - data="some data" - ) - pk = r.first()[0] - fetched_pk = config.db.scalar(select([table.c.id])) - eq_(fetched_pk, pk) - - def test_autoincrement_on_insert_implcit_returning(self): - - config.db.execute( - self.tables.autoinc_pk.insert(), - data="some data" - ) - self._assert_round_trip(self.tables.autoinc_pk, config.db) - - def test_last_inserted_id_implicit_returning(self): - - r = config.db.execute( - self.tables.autoinc_pk.insert(), - data="some data" - ) - pk = config.db.scalar(select([self.tables.autoinc_pk.c.id])) - eq_( - r.inserted_primary_key, - [pk] - ) - - -__all__ = ('LastrowidTest', 'InsertBehaviorTest', 'ReturningTest') diff --git a/python/sqlalchemy/testing/suite/test_reflection.py b/python/sqlalchemy/testing/suite/test_reflection.py deleted file mode 100644 index 288a8597..00000000 --- a/python/sqlalchemy/testing/suite/test_reflection.py +++ /dev/null @@ -1,647 +0,0 @@ - - -import sqlalchemy as sa -from sqlalchemy import exc as sa_exc -from sqlalchemy import types as sql_types -from sqlalchemy import inspect -from sqlalchemy import MetaData, Integer, String -from sqlalchemy.engine.reflection import Inspector -from sqlalchemy.testing import engines, fixtures -from sqlalchemy.testing.schema import Table, Column -from sqlalchemy.testing import eq_, assert_raises_message -from sqlalchemy import testing -from .. import config -import operator -from sqlalchemy.schema import DDL, Index -from sqlalchemy import event - -metadata, users = None, None - - -class HasTableTest(fixtures.TablesTest): - __backend__ = True - - @classmethod - def define_tables(cls, metadata): - Table('test_table', metadata, - Column('id', Integer, primary_key=True), - Column('data', String(50)) - ) - - def test_has_table(self): - with config.db.begin() as conn: - assert config.db.dialect.has_table(conn, "test_table") - assert not config.db.dialect.has_table(conn, "nonexistent_table") - - -class ComponentReflectionTest(fixtures.TablesTest): - run_inserts = run_deletes = None - - __backend__ = True - - @classmethod - def define_tables(cls, metadata): - cls.define_reflected_tables(metadata, None) - if testing.requires.schemas.enabled: - cls.define_reflected_tables(metadata, testing.config.test_schema) - - @classmethod - def define_reflected_tables(cls, metadata, schema): - if schema: - schema_prefix = schema + "." - else: - schema_prefix = "" - - if testing.requires.self_referential_foreign_keys.enabled: - users = Table('users', metadata, - Column('user_id', sa.INT, primary_key=True), - Column('test1', sa.CHAR(5), nullable=False), - Column('test2', sa.Float(5), nullable=False), - Column('parent_user_id', sa.Integer, - sa.ForeignKey('%susers.user_id' % - schema_prefix)), - schema=schema, - test_needs_fk=True, - ) - else: - users = Table('users', metadata, - Column('user_id', sa.INT, primary_key=True), - Column('test1', sa.CHAR(5), nullable=False), - Column('test2', sa.Float(5), nullable=False), - schema=schema, - test_needs_fk=True, - ) - - Table("dingalings", metadata, - Column('dingaling_id', sa.Integer, primary_key=True), - Column('address_id', sa.Integer, - sa.ForeignKey('%semail_addresses.address_id' % - schema_prefix)), - Column('data', sa.String(30)), - schema=schema, - test_needs_fk=True, - ) - Table('email_addresses', metadata, - Column('address_id', sa.Integer), - Column('remote_user_id', sa.Integer, - sa.ForeignKey(users.c.user_id)), - Column('email_address', sa.String(20)), - sa.PrimaryKeyConstraint('address_id', name='email_ad_pk'), - schema=schema, - test_needs_fk=True, - ) - - if testing.requires.index_reflection.enabled: - cls.define_index(metadata, users) - if testing.requires.view_column_reflection.enabled: - cls.define_views(metadata, schema) - if not schema and testing.requires.temp_table_reflection.enabled: - cls.define_temp_tables(metadata) - - @classmethod - def define_temp_tables(cls, metadata): - # cheat a bit, we should fix this with some dialect-level - # temp table fixture - if testing.against("oracle"): - kw = { - 'prefixes': ["GLOBAL TEMPORARY"], - 'oracle_on_commit': 'PRESERVE ROWS' - } - else: - kw = { - 'prefixes': ["TEMPORARY"], - } - - user_tmp = Table( - "user_tmp", metadata, - Column("id", sa.INT, primary_key=True), - Column('name', sa.VARCHAR(50)), - Column('foo', sa.INT), - sa.UniqueConstraint('name', name='user_tmp_uq'), - sa.Index("user_tmp_ix", "foo"), - **kw - ) - if testing.requires.view_reflection.enabled and \ - testing.requires.temporary_views.enabled: - event.listen( - user_tmp, "after_create", - DDL("create temporary view user_tmp_v as " - "select * from user_tmp") - ) - event.listen( - user_tmp, "before_drop", - DDL("drop view user_tmp_v") - ) - - @classmethod - def define_index(cls, metadata, users): - Index("users_t_idx", users.c.test1, users.c.test2) - Index("users_all_idx", users.c.user_id, users.c.test2, users.c.test1) - - @classmethod - def define_views(cls, metadata, schema): - for table_name in ('users', 'email_addresses'): - fullname = table_name - if schema: - fullname = "%s.%s" % (schema, table_name) - view_name = fullname + '_v' - query = "CREATE VIEW %s AS SELECT * FROM %s" % ( - view_name, fullname) - - event.listen( - metadata, - "after_create", - DDL(query) - ) - event.listen( - metadata, - "before_drop", - DDL("DROP VIEW %s" % view_name) - ) - - @testing.requires.schema_reflection - def test_get_schema_names(self): - insp = inspect(testing.db) - - self.assert_(testing.config.test_schema in insp.get_schema_names()) - - @testing.requires.schema_reflection - def test_dialect_initialize(self): - engine = engines.testing_engine() - assert not hasattr(engine.dialect, 'default_schema_name') - inspect(engine) - assert hasattr(engine.dialect, 'default_schema_name') - - @testing.requires.schema_reflection - def test_get_default_schema_name(self): - insp = inspect(testing.db) - eq_(insp.default_schema_name, testing.db.dialect.default_schema_name) - - @testing.provide_metadata - def _test_get_table_names(self, schema=None, table_type='table', - order_by=None): - meta = self.metadata - users, addresses, dingalings = self.tables.users, \ - self.tables.email_addresses, self.tables.dingalings - insp = inspect(meta.bind) - - if table_type == 'view': - table_names = insp.get_view_names(schema) - table_names.sort() - answer = ['email_addresses_v', 'users_v'] - eq_(sorted(table_names), answer) - else: - table_names = insp.get_table_names(schema, - order_by=order_by) - if order_by == 'foreign_key': - answer = ['users', 'email_addresses', 'dingalings'] - eq_(table_names, answer) - else: - answer = ['dingalings', 'email_addresses', 'users'] - eq_(sorted(table_names), answer) - - @testing.requires.temp_table_names - def test_get_temp_table_names(self): - insp = inspect(testing.db) - temp_table_names = insp.get_temp_table_names() - eq_(sorted(temp_table_names), ['user_tmp']) - - @testing.requires.view_reflection - @testing.requires.temp_table_names - @testing.requires.temporary_views - def test_get_temp_view_names(self): - insp = inspect(self.metadata.bind) - temp_table_names = insp.get_temp_view_names() - eq_(sorted(temp_table_names), ['user_tmp_v']) - - @testing.requires.table_reflection - def test_get_table_names(self): - self._test_get_table_names() - - @testing.requires.table_reflection - @testing.requires.foreign_key_constraint_reflection - def test_get_table_names_fks(self): - self._test_get_table_names(order_by='foreign_key') - - @testing.requires.table_reflection - @testing.requires.schemas - def test_get_table_names_with_schema(self): - self._test_get_table_names(testing.config.test_schema) - - @testing.requires.view_column_reflection - def test_get_view_names(self): - self._test_get_table_names(table_type='view') - - @testing.requires.view_column_reflection - @testing.requires.schemas - def test_get_view_names_with_schema(self): - self._test_get_table_names( - testing.config.test_schema, table_type='view') - - @testing.requires.table_reflection - @testing.requires.view_column_reflection - def test_get_tables_and_views(self): - self._test_get_table_names() - self._test_get_table_names(table_type='view') - - def _test_get_columns(self, schema=None, table_type='table'): - meta = MetaData(testing.db) - users, addresses, dingalings = self.tables.users, \ - self.tables.email_addresses, self.tables.dingalings - table_names = ['users', 'email_addresses'] - if table_type == 'view': - table_names = ['users_v', 'email_addresses_v'] - insp = inspect(meta.bind) - for table_name, table in zip(table_names, (users, - addresses)): - schema_name = schema - cols = insp.get_columns(table_name, schema=schema_name) - self.assert_(len(cols) > 0, len(cols)) - - # should be in order - - for i, col in enumerate(table.columns): - eq_(col.name, cols[i]['name']) - ctype = cols[i]['type'].__class__ - ctype_def = col.type - if isinstance(ctype_def, sa.types.TypeEngine): - ctype_def = ctype_def.__class__ - - # Oracle returns Date for DateTime. - - if testing.against('oracle') and ctype_def \ - in (sql_types.Date, sql_types.DateTime): - ctype_def = sql_types.Date - - # assert that the desired type and return type share - # a base within one of the generic types. - - self.assert_(len(set(ctype.__mro__). - intersection(ctype_def.__mro__). - intersection([ - sql_types.Integer, - sql_types.Numeric, - sql_types.DateTime, - sql_types.Date, - sql_types.Time, - sql_types.String, - sql_types._Binary, - ])) > 0, '%s(%s), %s(%s)' % - (col.name, col.type, cols[i]['name'], ctype)) - - if not col.primary_key: - assert cols[i]['default'] is None - - @testing.requires.table_reflection - def test_get_columns(self): - self._test_get_columns() - - @testing.provide_metadata - def _type_round_trip(self, *types): - t = Table('t', self.metadata, - *[ - Column('t%d' % i, type_) - for i, type_ in enumerate(types) - ] - ) - t.create() - - return [ - c['type'] for c in - inspect(self.metadata.bind).get_columns('t') - ] - - @testing.requires.table_reflection - def test_numeric_reflection(self): - for typ in self._type_round_trip( - sql_types.Numeric(18, 5), - ): - assert isinstance(typ, sql_types.Numeric) - eq_(typ.precision, 18) - eq_(typ.scale, 5) - - @testing.requires.table_reflection - def test_varchar_reflection(self): - typ = self._type_round_trip(sql_types.String(52))[0] - assert isinstance(typ, sql_types.String) - eq_(typ.length, 52) - - @testing.requires.table_reflection - @testing.provide_metadata - def test_nullable_reflection(self): - t = Table('t', self.metadata, - Column('a', Integer, nullable=True), - Column('b', Integer, nullable=False)) - t.create() - eq_( - dict( - (col['name'], col['nullable']) - for col in inspect(self.metadata.bind).get_columns('t') - ), - {"a": True, "b": False} - ) - - @testing.requires.table_reflection - @testing.requires.schemas - def test_get_columns_with_schema(self): - self._test_get_columns(schema=testing.config.test_schema) - - @testing.requires.temp_table_reflection - def test_get_temp_table_columns(self): - meta = MetaData(testing.db) - user_tmp = self.tables.user_tmp - insp = inspect(meta.bind) - cols = insp.get_columns('user_tmp') - self.assert_(len(cols) > 0, len(cols)) - - for i, col in enumerate(user_tmp.columns): - eq_(col.name, cols[i]['name']) - - @testing.requires.temp_table_reflection - @testing.requires.view_column_reflection - @testing.requires.temporary_views - def test_get_temp_view_columns(self): - insp = inspect(self.metadata.bind) - cols = insp.get_columns('user_tmp_v') - eq_( - [col['name'] for col in cols], - ['id', 'name', 'foo'] - ) - - @testing.requires.view_column_reflection - def test_get_view_columns(self): - self._test_get_columns(table_type='view') - - @testing.requires.view_column_reflection - @testing.requires.schemas - def test_get_view_columns_with_schema(self): - self._test_get_columns( - schema=testing.config.test_schema, table_type='view') - - @testing.provide_metadata - def _test_get_pk_constraint(self, schema=None): - meta = self.metadata - users, addresses = self.tables.users, self.tables.email_addresses - insp = inspect(meta.bind) - - users_cons = insp.get_pk_constraint(users.name, schema=schema) - users_pkeys = users_cons['constrained_columns'] - eq_(users_pkeys, ['user_id']) - - addr_cons = insp.get_pk_constraint(addresses.name, schema=schema) - addr_pkeys = addr_cons['constrained_columns'] - eq_(addr_pkeys, ['address_id']) - - with testing.requires.reflects_pk_names.fail_if(): - eq_(addr_cons['name'], 'email_ad_pk') - - @testing.requires.primary_key_constraint_reflection - def test_get_pk_constraint(self): - self._test_get_pk_constraint() - - @testing.requires.table_reflection - @testing.requires.primary_key_constraint_reflection - @testing.requires.schemas - def test_get_pk_constraint_with_schema(self): - self._test_get_pk_constraint(schema=testing.config.test_schema) - - @testing.requires.table_reflection - @testing.provide_metadata - def test_deprecated_get_primary_keys(self): - meta = self.metadata - users = self.tables.users - insp = Inspector(meta.bind) - assert_raises_message( - sa_exc.SADeprecationWarning, - "Call to deprecated method get_primary_keys." - " Use get_pk_constraint instead.", - insp.get_primary_keys, users.name - ) - - @testing.provide_metadata - def _test_get_foreign_keys(self, schema=None): - meta = self.metadata - users, addresses, dingalings = self.tables.users, \ - self.tables.email_addresses, self.tables.dingalings - insp = inspect(meta.bind) - expected_schema = schema - # users - - if testing.requires.self_referential_foreign_keys.enabled: - users_fkeys = insp.get_foreign_keys(users.name, - schema=schema) - fkey1 = users_fkeys[0] - - with testing.requires.named_constraints.fail_if(): - self.assert_(fkey1['name'] is not None) - - eq_(fkey1['referred_schema'], expected_schema) - eq_(fkey1['referred_table'], users.name) - eq_(fkey1['referred_columns'], ['user_id', ]) - if testing.requires.self_referential_foreign_keys.enabled: - eq_(fkey1['constrained_columns'], ['parent_user_id']) - - # addresses - addr_fkeys = insp.get_foreign_keys(addresses.name, - schema=schema) - fkey1 = addr_fkeys[0] - - with testing.requires.named_constraints.fail_if(): - self.assert_(fkey1['name'] is not None) - - eq_(fkey1['referred_schema'], expected_schema) - eq_(fkey1['referred_table'], users.name) - eq_(fkey1['referred_columns'], ['user_id', ]) - eq_(fkey1['constrained_columns'], ['remote_user_id']) - - @testing.requires.foreign_key_constraint_reflection - def test_get_foreign_keys(self): - self._test_get_foreign_keys() - - @testing.requires.foreign_key_constraint_reflection - @testing.requires.schemas - def test_get_foreign_keys_with_schema(self): - self._test_get_foreign_keys(schema=testing.config.test_schema) - - @testing.provide_metadata - def _test_get_indexes(self, schema=None): - meta = self.metadata - users, addresses, dingalings = self.tables.users, \ - self.tables.email_addresses, self.tables.dingalings - # The database may decide to create indexes for foreign keys, etc. - # so there may be more indexes than expected. - insp = inspect(meta.bind) - indexes = insp.get_indexes('users', schema=schema) - expected_indexes = [ - {'unique': False, - 'column_names': ['test1', 'test2'], - 'name': 'users_t_idx'}, - {'unique': False, - 'column_names': ['user_id', 'test2', 'test1'], - 'name': 'users_all_idx'} - ] - index_names = [d['name'] for d in indexes] - for e_index in expected_indexes: - assert e_index['name'] in index_names - index = indexes[index_names.index(e_index['name'])] - for key in e_index: - eq_(e_index[key], index[key]) - - @testing.requires.index_reflection - def test_get_indexes(self): - self._test_get_indexes() - - @testing.requires.index_reflection - @testing.requires.schemas - def test_get_indexes_with_schema(self): - self._test_get_indexes(schema=testing.config.test_schema) - - @testing.requires.unique_constraint_reflection - def test_get_unique_constraints(self): - self._test_get_unique_constraints() - - @testing.requires.temp_table_reflection - @testing.requires.unique_constraint_reflection - def test_get_temp_table_unique_constraints(self): - insp = inspect(self.metadata.bind) - reflected = insp.get_unique_constraints('user_tmp') - for refl in reflected: - # Different dialects handle duplicate index and constraints - # differently, so ignore this flag - refl.pop('duplicates_index', None) - eq_(reflected, [{'column_names': ['name'], 'name': 'user_tmp_uq'}]) - - @testing.requires.temp_table_reflection - def test_get_temp_table_indexes(self): - insp = inspect(self.metadata.bind) - indexes = insp.get_indexes('user_tmp') - for ind in indexes: - ind.pop('dialect_options', None) - eq_( - # TODO: we need to add better filtering for indexes/uq constraints - # that are doubled up - [idx for idx in indexes if idx['name'] == 'user_tmp_ix'], - [{'unique': False, 'column_names': ['foo'], 'name': 'user_tmp_ix'}] - ) - - @testing.requires.unique_constraint_reflection - @testing.requires.schemas - def test_get_unique_constraints_with_schema(self): - self._test_get_unique_constraints(schema=testing.config.test_schema) - - @testing.provide_metadata - def _test_get_unique_constraints(self, schema=None): - # SQLite dialect needs to parse the names of the constraints - # separately from what it gets from PRAGMA index_list(), and - # then matches them up. so same set of column_names in two - # constraints will confuse it. Perhaps we should no longer - # bother with index_list() here since we have the whole - # CREATE TABLE? - uniques = sorted( - [ - {'name': 'unique_a', 'column_names': ['a']}, - {'name': 'unique_a_b_c', 'column_names': ['a', 'b', 'c']}, - {'name': 'unique_c_a_b', 'column_names': ['c', 'a', 'b']}, - {'name': 'unique_asc_key', 'column_names': ['asc', 'key']}, - {'name': 'i.have.dots', 'column_names': ['b']}, - {'name': 'i have spaces', 'column_names': ['c']}, - ], - key=operator.itemgetter('name') - ) - orig_meta = self.metadata - table = Table( - 'testtbl', orig_meta, - Column('a', sa.String(20)), - Column('b', sa.String(30)), - Column('c', sa.Integer), - # reserved identifiers - Column('asc', sa.String(30)), - Column('key', sa.String(30)), - schema=schema - ) - for uc in uniques: - table.append_constraint( - sa.UniqueConstraint(*uc['column_names'], name=uc['name']) - ) - orig_meta.create_all() - - inspector = inspect(orig_meta.bind) - reflected = sorted( - inspector.get_unique_constraints('testtbl', schema=schema), - key=operator.itemgetter('name') - ) - - for orig, refl in zip(uniques, reflected): - # Different dialects handle duplicate index and constraints - # differently, so ignore this flag - refl.pop('duplicates_index', None) - eq_(orig, refl) - - @testing.provide_metadata - def _test_get_view_definition(self, schema=None): - meta = self.metadata - users, addresses, dingalings = self.tables.users, \ - self.tables.email_addresses, self.tables.dingalings - view_name1 = 'users_v' - view_name2 = 'email_addresses_v' - insp = inspect(meta.bind) - v1 = insp.get_view_definition(view_name1, schema=schema) - self.assert_(v1) - v2 = insp.get_view_definition(view_name2, schema=schema) - self.assert_(v2) - - @testing.requires.view_reflection - def test_get_view_definition(self): - self._test_get_view_definition() - - @testing.requires.view_reflection - @testing.requires.schemas - def test_get_view_definition_with_schema(self): - self._test_get_view_definition(schema=testing.config.test_schema) - - @testing.only_on("postgresql", "PG specific feature") - @testing.provide_metadata - def _test_get_table_oid(self, table_name, schema=None): - meta = self.metadata - users, addresses, dingalings = self.tables.users, \ - self.tables.email_addresses, self.tables.dingalings - insp = inspect(meta.bind) - oid = insp.get_table_oid(table_name, schema) - self.assert_(isinstance(oid, int)) - - def test_get_table_oid(self): - self._test_get_table_oid('users') - - @testing.requires.schemas - def test_get_table_oid_with_schema(self): - self._test_get_table_oid('users', schema=testing.config.test_schema) - - @testing.requires.table_reflection - @testing.provide_metadata - def test_autoincrement_col(self): - """test that 'autoincrement' is reflected according to sqla's policy. - - Don't mark this test as unsupported for any backend ! - - (technically it fails with MySQL InnoDB since "id" comes before "id2") - - A backend is better off not returning "autoincrement" at all, - instead of potentially returning "False" for an auto-incrementing - primary key column. - - """ - - meta = self.metadata - insp = inspect(meta.bind) - - for tname, cname in [ - ('users', 'user_id'), - ('email_addresses', 'address_id'), - ('dingalings', 'dingaling_id'), - ]: - cols = insp.get_columns(tname) - id_ = dict((c['name'], c) for c in cols)[cname] - assert id_.get('autoincrement', True) - - -__all__ = ('ComponentReflectionTest', 'HasTableTest') diff --git a/python/sqlalchemy/testing/suite/test_results.py b/python/sqlalchemy/testing/suite/test_results.py deleted file mode 100644 index 9ffaa6e0..00000000 --- a/python/sqlalchemy/testing/suite/test_results.py +++ /dev/null @@ -1,220 +0,0 @@ -from .. import fixtures, config -from ..config import requirements -from .. import exclusions -from ..assertions import eq_ -from .. import engines - -from sqlalchemy import Integer, String, select, util, sql, DateTime -import datetime -from ..schema import Table, Column - - -class RowFetchTest(fixtures.TablesTest): - __backend__ = True - - @classmethod - def define_tables(cls, metadata): - Table('plain_pk', metadata, - Column('id', Integer, primary_key=True), - Column('data', String(50)) - ) - Table('has_dates', metadata, - Column('id', Integer, primary_key=True), - Column('today', DateTime) - ) - - @classmethod - def insert_data(cls): - config.db.execute( - cls.tables.plain_pk.insert(), - [ - {"id": 1, "data": "d1"}, - {"id": 2, "data": "d2"}, - {"id": 3, "data": "d3"}, - ] - ) - - config.db.execute( - cls.tables.has_dates.insert(), - [ - {"id": 1, "today": datetime.datetime(2006, 5, 12, 12, 0, 0)} - ] - ) - - def test_via_string(self): - row = config.db.execute( - self.tables.plain_pk.select(). - order_by(self.tables.plain_pk.c.id) - ).first() - - eq_( - row['id'], 1 - ) - eq_( - row['data'], "d1" - ) - - def test_via_int(self): - row = config.db.execute( - self.tables.plain_pk.select(). - order_by(self.tables.plain_pk.c.id) - ).first() - - eq_( - row[0], 1 - ) - eq_( - row[1], "d1" - ) - - def test_via_col_object(self): - row = config.db.execute( - self.tables.plain_pk.select(). - order_by(self.tables.plain_pk.c.id) - ).first() - - eq_( - row[self.tables.plain_pk.c.id], 1 - ) - eq_( - row[self.tables.plain_pk.c.data], "d1" - ) - - @requirements.duplicate_names_in_cursor_description - def test_row_with_dupe_names(self): - result = config.db.execute( - select([self.tables.plain_pk.c.data, - self.tables.plain_pk.c.data.label('data')]). - order_by(self.tables.plain_pk.c.id) - ) - row = result.first() - eq_(result.keys(), ['data', 'data']) - eq_(row, ('d1', 'd1')) - - def test_row_w_scalar_select(self): - """test that a scalar select as a column is returned as such - and that type conversion works OK. - - (this is half a SQLAlchemy Core test and half to catch database - backends that may have unusual behavior with scalar selects.) - - """ - datetable = self.tables.has_dates - s = select([datetable.alias('x').c.today]).as_scalar() - s2 = select([datetable.c.id, s.label('somelabel')]) - row = config.db.execute(s2).first() - - eq_(row['somelabel'], datetime.datetime(2006, 5, 12, 12, 0, 0)) - - -class PercentSchemaNamesTest(fixtures.TablesTest): - """tests using percent signs, spaces in table and column names. - - This is a very fringe use case, doesn't work for MySQL - or Postgresql. the requirement, "percent_schema_names", - is marked "skip" by default. - - """ - - __requires__ = ('percent_schema_names', ) - - __backend__ = True - - @classmethod - def define_tables(cls, metadata): - cls.tables.percent_table = Table('percent%table', metadata, - Column("percent%", Integer), - Column( - "spaces % more spaces", Integer), - ) - cls.tables.lightweight_percent_table = sql.table( - 'percent%table', sql.column("percent%"), - sql.column("spaces % more spaces") - ) - - def test_single_roundtrip(self): - percent_table = self.tables.percent_table - for params in [ - {'percent%': 5, 'spaces % more spaces': 12}, - {'percent%': 7, 'spaces % more spaces': 11}, - {'percent%': 9, 'spaces % more spaces': 10}, - {'percent%': 11, 'spaces % more spaces': 9} - ]: - config.db.execute(percent_table.insert(), params) - self._assert_table() - - def test_executemany_roundtrip(self): - percent_table = self.tables.percent_table - config.db.execute( - percent_table.insert(), - {'percent%': 5, 'spaces % more spaces': 12} - ) - config.db.execute( - percent_table.insert(), - [{'percent%': 7, 'spaces % more spaces': 11}, - {'percent%': 9, 'spaces % more spaces': 10}, - {'percent%': 11, 'spaces % more spaces': 9}] - ) - self._assert_table() - - def _assert_table(self): - percent_table = self.tables.percent_table - lightweight_percent_table = self.tables.lightweight_percent_table - - for table in ( - percent_table, - percent_table.alias(), - lightweight_percent_table, - lightweight_percent_table.alias()): - eq_( - list( - config.db.execute( - table.select().order_by(table.c['percent%']) - ) - ), - [ - (5, 12), - (7, 11), - (9, 10), - (11, 9) - ] - ) - - eq_( - list( - config.db.execute( - table.select(). - where(table.c['spaces % more spaces'].in_([9, 10])). - order_by(table.c['percent%']), - ) - ), - [ - (9, 10), - (11, 9) - ] - ) - - row = config.db.execute(table.select(). - order_by(table.c['percent%'])).first() - eq_(row['percent%'], 5) - eq_(row['spaces % more spaces'], 12) - - eq_(row[table.c['percent%']], 5) - eq_(row[table.c['spaces % more spaces']], 12) - - config.db.execute( - percent_table.update().values( - {percent_table.c['spaces % more spaces']: 15} - ) - ) - - eq_( - list( - config.db.execute( - percent_table. - select(). - order_by(percent_table.c['percent%']) - ) - ), - [(5, 15), (7, 15), (9, 15), (11, 15)] - ) diff --git a/python/sqlalchemy/testing/suite/test_select.py b/python/sqlalchemy/testing/suite/test_select.py deleted file mode 100644 index d4bf63b5..00000000 --- a/python/sqlalchemy/testing/suite/test_select.py +++ /dev/null @@ -1,192 +0,0 @@ -from .. import fixtures, config -from ..assertions import eq_ - -from sqlalchemy import util -from sqlalchemy import Integer, String, select, func, bindparam -from sqlalchemy import testing - -from ..schema import Table, Column - - -class OrderByLabelTest(fixtures.TablesTest): - """Test the dialect sends appropriate ORDER BY expressions when - labels are used. - - This essentially exercises the "supports_simple_order_by_label" - setting. - - """ - __backend__ = True - - @classmethod - def define_tables(cls, metadata): - Table("some_table", metadata, - Column('id', Integer, primary_key=True), - Column('x', Integer), - Column('y', Integer), - Column('q', String(50)), - Column('p', String(50)) - ) - - @classmethod - def insert_data(cls): - config.db.execute( - cls.tables.some_table.insert(), - [ - {"id": 1, "x": 1, "y": 2, "q": "q1", "p": "p3"}, - {"id": 2, "x": 2, "y": 3, "q": "q2", "p": "p2"}, - {"id": 3, "x": 3, "y": 4, "q": "q3", "p": "p1"}, - ] - ) - - def _assert_result(self, select, result): - eq_( - config.db.execute(select).fetchall(), - result - ) - - def test_plain(self): - table = self.tables.some_table - lx = table.c.x.label('lx') - self._assert_result( - select([lx]).order_by(lx), - [(1, ), (2, ), (3, )] - ) - - def test_composed_int(self): - table = self.tables.some_table - lx = (table.c.x + table.c.y).label('lx') - self._assert_result( - select([lx]).order_by(lx), - [(3, ), (5, ), (7, )] - ) - - def test_composed_multiple(self): - table = self.tables.some_table - lx = (table.c.x + table.c.y).label('lx') - ly = (func.lower(table.c.q) + table.c.p).label('ly') - self._assert_result( - select([lx, ly]).order_by(lx, ly.desc()), - [(3, util.u('q1p3')), (5, util.u('q2p2')), (7, util.u('q3p1'))] - ) - - def test_plain_desc(self): - table = self.tables.some_table - lx = table.c.x.label('lx') - self._assert_result( - select([lx]).order_by(lx.desc()), - [(3, ), (2, ), (1, )] - ) - - def test_composed_int_desc(self): - table = self.tables.some_table - lx = (table.c.x + table.c.y).label('lx') - self._assert_result( - select([lx]).order_by(lx.desc()), - [(7, ), (5, ), (3, )] - ) - - def test_group_by_composed(self): - table = self.tables.some_table - expr = (table.c.x + table.c.y).label('lx') - stmt = select([func.count(table.c.id), expr]).group_by(expr).order_by(expr) - self._assert_result( - stmt, - [(1, 3), (1, 5), (1, 7)] - ) - - -class LimitOffsetTest(fixtures.TablesTest): - __backend__ = True - - @classmethod - def define_tables(cls, metadata): - Table("some_table", metadata, - Column('id', Integer, primary_key=True), - Column('x', Integer), - Column('y', Integer)) - - @classmethod - def insert_data(cls): - config.db.execute( - cls.tables.some_table.insert(), - [ - {"id": 1, "x": 1, "y": 2}, - {"id": 2, "x": 2, "y": 3}, - {"id": 3, "x": 3, "y": 4}, - {"id": 4, "x": 4, "y": 5}, - ] - ) - - def _assert_result(self, select, result, params=()): - eq_( - config.db.execute(select, params).fetchall(), - result - ) - - def test_simple_limit(self): - table = self.tables.some_table - self._assert_result( - select([table]).order_by(table.c.id).limit(2), - [(1, 1, 2), (2, 2, 3)] - ) - - @testing.requires.offset - def test_simple_offset(self): - table = self.tables.some_table - self._assert_result( - select([table]).order_by(table.c.id).offset(2), - [(3, 3, 4), (4, 4, 5)] - ) - - @testing.requires.offset - def test_simple_limit_offset(self): - table = self.tables.some_table - self._assert_result( - select([table]).order_by(table.c.id).limit(2).offset(1), - [(2, 2, 3), (3, 3, 4)] - ) - - @testing.requires.offset - def test_limit_offset_nobinds(self): - """test that 'literal binds' mode works - no bound params.""" - - table = self.tables.some_table - stmt = select([table]).order_by(table.c.id).limit(2).offset(1) - sql = stmt.compile( - dialect=config.db.dialect, - compile_kwargs={"literal_binds": True}) - sql = str(sql) - - self._assert_result( - sql, - [(2, 2, 3), (3, 3, 4)] - ) - - @testing.requires.bound_limit_offset - def test_bound_limit(self): - table = self.tables.some_table - self._assert_result( - select([table]).order_by(table.c.id).limit(bindparam('l')), - [(1, 1, 2), (2, 2, 3)], - params={"l": 2} - ) - - @testing.requires.bound_limit_offset - def test_bound_offset(self): - table = self.tables.some_table - self._assert_result( - select([table]).order_by(table.c.id).offset(bindparam('o')), - [(3, 3, 4), (4, 4, 5)], - params={"o": 2} - ) - - @testing.requires.bound_limit_offset - def test_bound_limit_offset(self): - table = self.tables.some_table - self._assert_result( - select([table]).order_by(table.c.id). - limit(bindparam("l")).offset(bindparam("o")), - [(2, 2, 3), (3, 3, 4)], - params={"l": 2, "o": 1} - ) diff --git a/python/sqlalchemy/testing/suite/test_sequence.py b/python/sqlalchemy/testing/suite/test_sequence.py deleted file mode 100644 index bbb4ba65..00000000 --- a/python/sqlalchemy/testing/suite/test_sequence.py +++ /dev/null @@ -1,126 +0,0 @@ -from .. import fixtures, config -from ..config import requirements -from ..assertions import eq_ -from ... import testing - -from ... import Integer, String, Sequence, schema - -from ..schema import Table, Column - - -class SequenceTest(fixtures.TablesTest): - __requires__ = ('sequences',) - __backend__ = True - - run_create_tables = 'each' - - @classmethod - def define_tables(cls, metadata): - Table('seq_pk', metadata, - Column('id', Integer, Sequence('tab_id_seq'), primary_key=True), - Column('data', String(50)) - ) - - Table('seq_opt_pk', metadata, - Column('id', Integer, Sequence('tab_id_seq', optional=True), - primary_key=True), - Column('data', String(50)) - ) - - def test_insert_roundtrip(self): - config.db.execute( - self.tables.seq_pk.insert(), - data="some data" - ) - self._assert_round_trip(self.tables.seq_pk, config.db) - - def test_insert_lastrowid(self): - r = config.db.execute( - self.tables.seq_pk.insert(), - data="some data" - ) - eq_( - r.inserted_primary_key, - [1] - ) - - def test_nextval_direct(self): - r = config.db.execute( - self.tables.seq_pk.c.id.default - ) - eq_( - r, 1 - ) - - @requirements.sequences_optional - def test_optional_seq(self): - r = config.db.execute( - self.tables.seq_opt_pk.insert(), - data="some data" - ) - eq_( - r.inserted_primary_key, - [1] - ) - - def _assert_round_trip(self, table, conn): - row = conn.execute(table.select()).first() - eq_( - row, - (1, "some data") - ) - - -class HasSequenceTest(fixtures.TestBase): - __requires__ = 'sequences', - __backend__ = True - - def test_has_sequence(self): - s1 = Sequence('user_id_seq') - testing.db.execute(schema.CreateSequence(s1)) - try: - eq_(testing.db.dialect.has_sequence(testing.db, - 'user_id_seq'), True) - finally: - testing.db.execute(schema.DropSequence(s1)) - - @testing.requires.schemas - def test_has_sequence_schema(self): - s1 = Sequence('user_id_seq', schema="test_schema") - testing.db.execute(schema.CreateSequence(s1)) - try: - eq_(testing.db.dialect.has_sequence( - testing.db, 'user_id_seq', schema="test_schema"), True) - finally: - testing.db.execute(schema.DropSequence(s1)) - - def test_has_sequence_neg(self): - eq_(testing.db.dialect.has_sequence(testing.db, 'user_id_seq'), - False) - - @testing.requires.schemas - def test_has_sequence_schemas_neg(self): - eq_(testing.db.dialect.has_sequence(testing.db, 'user_id_seq', - schema="test_schema"), - False) - - @testing.requires.schemas - def test_has_sequence_default_not_in_remote(self): - s1 = Sequence('user_id_seq') - testing.db.execute(schema.CreateSequence(s1)) - try: - eq_(testing.db.dialect.has_sequence(testing.db, 'user_id_seq', - schema="test_schema"), - False) - finally: - testing.db.execute(schema.DropSequence(s1)) - - @testing.requires.schemas - def test_has_sequence_remote_not_in_default(self): - s1 = Sequence('user_id_seq', schema="test_schema") - testing.db.execute(schema.CreateSequence(s1)) - try: - eq_(testing.db.dialect.has_sequence(testing.db, 'user_id_seq'), - False) - finally: - testing.db.execute(schema.DropSequence(s1)) diff --git a/python/sqlalchemy/testing/suite/test_types.py b/python/sqlalchemy/testing/suite/test_types.py deleted file mode 100644 index 230aeb1e..00000000 --- a/python/sqlalchemy/testing/suite/test_types.py +++ /dev/null @@ -1,594 +0,0 @@ -# coding: utf-8 - -from .. import fixtures, config -from ..assertions import eq_ -from ..config import requirements -from sqlalchemy import Integer, Unicode, UnicodeText, select -from sqlalchemy import Date, DateTime, Time, MetaData, String, \ - Text, Numeric, Float, literal, Boolean -from ..schema import Table, Column -from ... import testing -import decimal -import datetime -from ...util import u -from ... import util - - -class _LiteralRoundTripFixture(object): - @testing.provide_metadata - def _literal_round_trip(self, type_, input_, output, filter_=None): - """test literal rendering """ - - # for literal, we test the literal render in an INSERT - # into a typed column. we can then SELECT it back as its - # official type; ideally we'd be able to use CAST here - # but MySQL in particular can't CAST fully - t = Table('t', self.metadata, Column('x', type_)) - t.create() - - for value in input_: - ins = t.insert().values(x=literal(value)).compile( - dialect=testing.db.dialect, - compile_kwargs=dict(literal_binds=True) - ) - testing.db.execute(ins) - - for row in t.select().execute(): - value = row[0] - if filter_ is not None: - value = filter_(value) - assert value in output - - -class _UnicodeFixture(_LiteralRoundTripFixture): - __requires__ = 'unicode_data', - - data = u("Alors vous imaginez ma surprise, au lever du jour, " - "quand une drôle de petite voix m’a réveillé. Elle " - "disait: « S’il vous plaît… dessine-moi un mouton! »") - - @classmethod - def define_tables(cls, metadata): - Table('unicode_table', metadata, - Column('id', Integer, primary_key=True, - test_needs_autoincrement=True), - Column('unicode_data', cls.datatype), - ) - - def test_round_trip(self): - unicode_table = self.tables.unicode_table - - config.db.execute( - unicode_table.insert(), - { - 'unicode_data': self.data, - } - ) - - row = config.db.execute( - select([ - unicode_table.c.unicode_data, - ]) - ).first() - - eq_( - row, - (self.data, ) - ) - assert isinstance(row[0], util.text_type) - - def test_round_trip_executemany(self): - unicode_table = self.tables.unicode_table - - config.db.execute( - unicode_table.insert(), - [ - { - 'unicode_data': self.data, - } - for i in range(3) - ] - ) - - rows = config.db.execute( - select([ - unicode_table.c.unicode_data, - ]) - ).fetchall() - eq_( - rows, - [(self.data, ) for i in range(3)] - ) - for row in rows: - assert isinstance(row[0], util.text_type) - - def _test_empty_strings(self): - unicode_table = self.tables.unicode_table - - config.db.execute( - unicode_table.insert(), - {"unicode_data": u('')} - ) - row = config.db.execute( - select([unicode_table.c.unicode_data]) - ).first() - eq_(row, (u(''),)) - - def test_literal(self): - self._literal_round_trip(self.datatype, [self.data], [self.data]) - - -class UnicodeVarcharTest(_UnicodeFixture, fixtures.TablesTest): - __requires__ = 'unicode_data', - __backend__ = True - - datatype = Unicode(255) - - @requirements.empty_strings_varchar - def test_empty_strings_varchar(self): - self._test_empty_strings() - - -class UnicodeTextTest(_UnicodeFixture, fixtures.TablesTest): - __requires__ = 'unicode_data', 'text_type' - __backend__ = True - - datatype = UnicodeText() - - @requirements.empty_strings_text - def test_empty_strings_text(self): - self._test_empty_strings() - - -class TextTest(_LiteralRoundTripFixture, fixtures.TablesTest): - __requires__ = 'text_type', - __backend__ = True - - @classmethod - def define_tables(cls, metadata): - Table('text_table', metadata, - Column('id', Integer, primary_key=True, - test_needs_autoincrement=True), - Column('text_data', Text), - ) - - def test_text_roundtrip(self): - text_table = self.tables.text_table - - config.db.execute( - text_table.insert(), - {"text_data": 'some text'} - ) - row = config.db.execute( - select([text_table.c.text_data]) - ).first() - eq_(row, ('some text',)) - - def test_text_empty_strings(self): - text_table = self.tables.text_table - - config.db.execute( - text_table.insert(), - {"text_data": ''} - ) - row = config.db.execute( - select([text_table.c.text_data]) - ).first() - eq_(row, ('',)) - - def test_literal(self): - self._literal_round_trip(Text, ["some text"], ["some text"]) - - def test_literal_quoting(self): - data = '''some 'text' hey "hi there" that's text''' - self._literal_round_trip(Text, [data], [data]) - - def test_literal_backslashes(self): - data = r'backslash one \ backslash two \\ end' - self._literal_round_trip(Text, [data], [data]) - - -class StringTest(_LiteralRoundTripFixture, fixtures.TestBase): - __backend__ = True - - @requirements.unbounded_varchar - def test_nolength_string(self): - metadata = MetaData() - foo = Table('foo', metadata, - Column('one', String) - ) - - foo.create(config.db) - foo.drop(config.db) - - def test_literal(self): - self._literal_round_trip(String(40), ["some text"], ["some text"]) - - def test_literal_quoting(self): - data = '''some 'text' hey "hi there" that's text''' - self._literal_round_trip(String(40), [data], [data]) - - def test_literal_backslashes(self): - data = r'backslash one \ backslash two \\ end' - self._literal_round_trip(String(40), [data], [data]) - - -class _DateFixture(_LiteralRoundTripFixture): - compare = None - - @classmethod - def define_tables(cls, metadata): - Table('date_table', metadata, - Column('id', Integer, primary_key=True, - test_needs_autoincrement=True), - Column('date_data', cls.datatype), - ) - - def test_round_trip(self): - date_table = self.tables.date_table - - config.db.execute( - date_table.insert(), - {'date_data': self.data} - ) - - row = config.db.execute( - select([ - date_table.c.date_data, - ]) - ).first() - - compare = self.compare or self.data - eq_(row, - (compare, )) - assert isinstance(row[0], type(compare)) - - def test_null(self): - date_table = self.tables.date_table - - config.db.execute( - date_table.insert(), - {'date_data': None} - ) - - row = config.db.execute( - select([ - date_table.c.date_data, - ]) - ).first() - eq_(row, (None,)) - - @testing.requires.datetime_literals - def test_literal(self): - compare = self.compare or self.data - self._literal_round_trip(self.datatype, [self.data], [compare]) - - -class DateTimeTest(_DateFixture, fixtures.TablesTest): - __requires__ = 'datetime', - __backend__ = True - datatype = DateTime - data = datetime.datetime(2012, 10, 15, 12, 57, 18) - - -class DateTimeMicrosecondsTest(_DateFixture, fixtures.TablesTest): - __requires__ = 'datetime_microseconds', - __backend__ = True - datatype = DateTime - data = datetime.datetime(2012, 10, 15, 12, 57, 18, 396) - - -class TimeTest(_DateFixture, fixtures.TablesTest): - __requires__ = 'time', - __backend__ = True - datatype = Time - data = datetime.time(12, 57, 18) - - -class TimeMicrosecondsTest(_DateFixture, fixtures.TablesTest): - __requires__ = 'time_microseconds', - __backend__ = True - datatype = Time - data = datetime.time(12, 57, 18, 396) - - -class DateTest(_DateFixture, fixtures.TablesTest): - __requires__ = 'date', - __backend__ = True - datatype = Date - data = datetime.date(2012, 10, 15) - - -class DateTimeCoercedToDateTimeTest(_DateFixture, fixtures.TablesTest): - __requires__ = 'date', 'date_coerces_from_datetime' - __backend__ = True - datatype = Date - data = datetime.datetime(2012, 10, 15, 12, 57, 18) - compare = datetime.date(2012, 10, 15) - - -class DateTimeHistoricTest(_DateFixture, fixtures.TablesTest): - __requires__ = 'datetime_historic', - __backend__ = True - datatype = DateTime - data = datetime.datetime(1850, 11, 10, 11, 52, 35) - - -class DateHistoricTest(_DateFixture, fixtures.TablesTest): - __requires__ = 'date_historic', - __backend__ = True - datatype = Date - data = datetime.date(1727, 4, 1) - - -class IntegerTest(_LiteralRoundTripFixture, fixtures.TestBase): - __backend__ = True - - def test_literal(self): - self._literal_round_trip(Integer, [5], [5]) - - -class NumericTest(_LiteralRoundTripFixture, fixtures.TestBase): - __backend__ = True - - @testing.emits_warning(r".*does \*not\* support Decimal objects natively") - @testing.provide_metadata - def _do_test(self, type_, input_, output, - filter_=None, check_scale=False): - metadata = self.metadata - t = Table('t', metadata, Column('x', type_)) - t.create() - t.insert().execute([{'x': x} for x in input_]) - - result = set([row[0] for row in t.select().execute()]) - output = set(output) - if filter_: - result = set(filter_(x) for x in result) - output = set(filter_(x) for x in output) - eq_(result, output) - if check_scale: - eq_( - [str(x) for x in result], - [str(x) for x in output], - ) - - @testing.emits_warning(r".*does \*not\* support Decimal objects natively") - def test_render_literal_numeric(self): - self._literal_round_trip( - Numeric(precision=8, scale=4), - [15.7563, decimal.Decimal("15.7563")], - [decimal.Decimal("15.7563")], - ) - - @testing.emits_warning(r".*does \*not\* support Decimal objects natively") - def test_render_literal_numeric_asfloat(self): - self._literal_round_trip( - Numeric(precision=8, scale=4, asdecimal=False), - [15.7563, decimal.Decimal("15.7563")], - [15.7563], - ) - - def test_render_literal_float(self): - self._literal_round_trip( - Float(4), - [15.7563, decimal.Decimal("15.7563")], - [15.7563, ], - filter_=lambda n: n is not None and round(n, 5) or None - ) - - @testing.requires.precision_generic_float_type - def test_float_custom_scale(self): - self._do_test( - Float(None, decimal_return_scale=7, asdecimal=True), - [15.7563827, decimal.Decimal("15.7563827")], - [decimal.Decimal("15.7563827"), ], - check_scale=True - ) - - def test_numeric_as_decimal(self): - self._do_test( - Numeric(precision=8, scale=4), - [15.7563, decimal.Decimal("15.7563")], - [decimal.Decimal("15.7563")], - ) - - def test_numeric_as_float(self): - self._do_test( - Numeric(precision=8, scale=4, asdecimal=False), - [15.7563, decimal.Decimal("15.7563")], - [15.7563], - ) - - @testing.requires.fetch_null_from_numeric - def test_numeric_null_as_decimal(self): - self._do_test( - Numeric(precision=8, scale=4), - [None], - [None], - ) - - @testing.requires.fetch_null_from_numeric - def test_numeric_null_as_float(self): - self._do_test( - Numeric(precision=8, scale=4, asdecimal=False), - [None], - [None], - ) - - @testing.requires.floats_to_four_decimals - def test_float_as_decimal(self): - self._do_test( - Float(precision=8, asdecimal=True), - [15.7563, decimal.Decimal("15.7563"), None], - [decimal.Decimal("15.7563"), None], - ) - - def test_float_as_float(self): - self._do_test( - Float(precision=8), - [15.7563, decimal.Decimal("15.7563")], - [15.7563], - filter_=lambda n: n is not None and round(n, 5) or None - ) - - @testing.requires.precision_numerics_general - def test_precision_decimal(self): - numbers = set([ - decimal.Decimal("54.234246451650"), - decimal.Decimal("0.004354"), - decimal.Decimal("900.0"), - ]) - - self._do_test( - Numeric(precision=18, scale=12), - numbers, - numbers, - ) - - @testing.requires.precision_numerics_enotation_large - def test_enotation_decimal(self): - """test exceedingly small decimals. - - Decimal reports values with E notation when the exponent - is greater than 6. - - """ - - numbers = set([ - decimal.Decimal('1E-2'), - decimal.Decimal('1E-3'), - decimal.Decimal('1E-4'), - decimal.Decimal('1E-5'), - decimal.Decimal('1E-6'), - decimal.Decimal('1E-7'), - decimal.Decimal('1E-8'), - decimal.Decimal("0.01000005940696"), - decimal.Decimal("0.00000005940696"), - decimal.Decimal("0.00000000000696"), - decimal.Decimal("0.70000000000696"), - decimal.Decimal("696E-12"), - ]) - self._do_test( - Numeric(precision=18, scale=14), - numbers, - numbers - ) - - @testing.requires.precision_numerics_enotation_large - def test_enotation_decimal_large(self): - """test exceedingly large decimals. - - """ - - numbers = set([ - decimal.Decimal('4E+8'), - decimal.Decimal("5748E+15"), - decimal.Decimal('1.521E+15'), - decimal.Decimal('00000000000000.1E+12'), - ]) - self._do_test( - Numeric(precision=25, scale=2), - numbers, - numbers - ) - - @testing.requires.precision_numerics_many_significant_digits - def test_many_significant_digits(self): - numbers = set([ - decimal.Decimal("31943874831932418390.01"), - decimal.Decimal("319438950232418390.273596"), - decimal.Decimal("87673.594069654243"), - ]) - self._do_test( - Numeric(precision=38, scale=12), - numbers, - numbers - ) - - @testing.requires.precision_numerics_retains_significant_digits - def test_numeric_no_decimal(self): - numbers = set([ - decimal.Decimal("1.000") - ]) - self._do_test( - Numeric(precision=5, scale=3), - numbers, - numbers, - check_scale=True - ) - - -class BooleanTest(_LiteralRoundTripFixture, fixtures.TablesTest): - __backend__ = True - - @classmethod - def define_tables(cls, metadata): - Table('boolean_table', metadata, - Column('id', Integer, primary_key=True, autoincrement=False), - Column('value', Boolean), - Column('unconstrained_value', Boolean(create_constraint=False)), - ) - - def test_render_literal_bool(self): - self._literal_round_trip( - Boolean(), - [True, False], - [True, False] - ) - - def test_round_trip(self): - boolean_table = self.tables.boolean_table - - config.db.execute( - boolean_table.insert(), - { - 'id': 1, - 'value': True, - 'unconstrained_value': False - } - ) - - row = config.db.execute( - select([ - boolean_table.c.value, - boolean_table.c.unconstrained_value - ]) - ).first() - - eq_( - row, - (True, False) - ) - assert isinstance(row[0], bool) - - def test_null(self): - boolean_table = self.tables.boolean_table - - config.db.execute( - boolean_table.insert(), - { - 'id': 1, - 'value': None, - 'unconstrained_value': None - } - ) - - row = config.db.execute( - select([ - boolean_table.c.value, - boolean_table.c.unconstrained_value - ]) - ).first() - - eq_( - row, - (None, None) - ) - - -__all__ = ('UnicodeVarcharTest', 'UnicodeTextTest', - 'DateTest', 'DateTimeTest', 'TextTest', - 'NumericTest', 'IntegerTest', - 'DateTimeHistoricTest', 'DateTimeCoercedToDateTimeTest', - 'TimeMicrosecondsTest', 'TimeTest', 'DateTimeMicrosecondsTest', - 'DateHistoricTest', 'StringTest', 'BooleanTest') diff --git a/python/sqlalchemy/testing/suite/test_update_delete.py b/python/sqlalchemy/testing/suite/test_update_delete.py deleted file mode 100644 index e4c61e74..00000000 --- a/python/sqlalchemy/testing/suite/test_update_delete.py +++ /dev/null @@ -1,63 +0,0 @@ -from .. import fixtures, config -from ..assertions import eq_ - -from sqlalchemy import Integer, String -from ..schema import Table, Column - - -class SimpleUpdateDeleteTest(fixtures.TablesTest): - run_deletes = 'each' - __backend__ = True - - @classmethod - def define_tables(cls, metadata): - Table('plain_pk', metadata, - Column('id', Integer, primary_key=True), - Column('data', String(50)) - ) - - @classmethod - def insert_data(cls): - config.db.execute( - cls.tables.plain_pk.insert(), - [ - {"id": 1, "data": "d1"}, - {"id": 2, "data": "d2"}, - {"id": 3, "data": "d3"}, - ] - ) - - def test_update(self): - t = self.tables.plain_pk - r = config.db.execute( - t.update().where(t.c.id == 2), - data="d2_new" - ) - assert not r.is_insert - assert not r.returns_rows - - eq_( - config.db.execute(t.select().order_by(t.c.id)).fetchall(), - [ - (1, "d1"), - (2, "d2_new"), - (3, "d3") - ] - ) - - def test_delete(self): - t = self.tables.plain_pk - r = config.db.execute( - t.delete().where(t.c.id == 2) - ) - assert not r.is_insert - assert not r.returns_rows - eq_( - config.db.execute(t.select().order_by(t.c.id)).fetchall(), - [ - (1, "d1"), - (3, "d3") - ] - ) - -__all__ = ('SimpleUpdateDeleteTest', ) diff --git a/python/sqlalchemy/testing/util.py b/python/sqlalchemy/testing/util.py deleted file mode 100644 index e9437948..00000000 --- a/python/sqlalchemy/testing/util.py +++ /dev/null @@ -1,280 +0,0 @@ -# testing/util.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -from ..util import jython, pypy, defaultdict, decorator, py2k -import decimal -import gc -import time -import random -import sys -import types - -if jython: - def jython_gc_collect(*args): - """aggressive gc.collect for tests.""" - gc.collect() - time.sleep(0.1) - gc.collect() - gc.collect() - return 0 - - # "lazy" gc, for VM's that don't GC on refcount == 0 - gc_collect = lazy_gc = jython_gc_collect -elif pypy: - def pypy_gc_collect(*args): - gc.collect() - gc.collect() - gc_collect = lazy_gc = pypy_gc_collect -else: - # assume CPython - straight gc.collect, lazy_gc() is a pass - gc_collect = gc.collect - - def lazy_gc(): - pass - - -def picklers(): - picklers = set() - if py2k: - try: - import cPickle - picklers.add(cPickle) - except ImportError: - pass - - import pickle - picklers.add(pickle) - - # yes, this thing needs this much testing - for pickle_ in picklers: - for protocol in -1, 0, 1, 2: - yield pickle_.loads, lambda d: pickle_.dumps(d, protocol) - - -def round_decimal(value, prec): - if isinstance(value, float): - return round(value, prec) - - # can also use shift() here but that is 2.6 only - return (value * decimal.Decimal("1" + "0" * prec) - ).to_integral(decimal.ROUND_FLOOR) / \ - pow(10, prec) - - -class RandomSet(set): - def __iter__(self): - l = list(set.__iter__(self)) - random.shuffle(l) - return iter(l) - - def pop(self): - index = random.randint(0, len(self) - 1) - item = list(set.__iter__(self))[index] - self.remove(item) - return item - - def union(self, other): - return RandomSet(set.union(self, other)) - - def difference(self, other): - return RandomSet(set.difference(self, other)) - - def intersection(self, other): - return RandomSet(set.intersection(self, other)) - - def copy(self): - return RandomSet(self) - - -def conforms_partial_ordering(tuples, sorted_elements): - """True if the given sorting conforms to the given partial ordering.""" - - deps = defaultdict(set) - for parent, child in tuples: - deps[parent].add(child) - for i, node in enumerate(sorted_elements): - for n in sorted_elements[i:]: - if node in deps[n]: - return False - else: - return True - - -def all_partial_orderings(tuples, elements): - edges = defaultdict(set) - for parent, child in tuples: - edges[child].add(parent) - - def _all_orderings(elements): - - if len(elements) == 1: - yield list(elements) - else: - for elem in elements: - subset = set(elements).difference([elem]) - if not subset.intersection(edges[elem]): - for sub_ordering in _all_orderings(subset): - yield [elem] + sub_ordering - - return iter(_all_orderings(elements)) - - -def function_named(fn, name): - """Return a function with a given __name__. - - Will assign to __name__ and return the original function if possible on - the Python implementation, otherwise a new function will be constructed. - - This function should be phased out as much as possible - in favor of @decorator. Tests that "generate" many named tests - should be modernized. - - """ - try: - fn.__name__ = name - except TypeError: - fn = types.FunctionType(fn.__code__, fn.__globals__, name, - fn.__defaults__, fn.__closure__) - return fn - - -def run_as_contextmanager(ctx, fn, *arg, **kw): - """Run the given function under the given contextmanager, - simulating the behavior of 'with' to support older - Python versions. - - This is not necessary anymore as we have placed 2.6 - as minimum Python version, however some tests are still using - this structure. - - """ - - obj = ctx.__enter__() - try: - result = fn(obj, *arg, **kw) - ctx.__exit__(None, None, None) - return result - except: - exc_info = sys.exc_info() - raise_ = ctx.__exit__(*exc_info) - if raise_ is None: - raise - else: - return raise_ - - -def rowset(results): - """Converts the results of sql execution into a plain set of column tuples. - - Useful for asserting the results of an unordered query. - """ - - return set([tuple(row) for row in results]) - - -def fail(msg): - assert False, msg - - -@decorator -def provide_metadata(fn, *args, **kw): - """Provide bound MetaData for a single test, dropping afterwards.""" - - from . import config - from . import engines - from sqlalchemy import schema - - metadata = schema.MetaData(config.db) - self = args[0] - prev_meta = getattr(self, 'metadata', None) - self.metadata = metadata - try: - return fn(*args, **kw) - finally: - engines.drop_all_tables(metadata, config.db) - self.metadata = prev_meta - - -def force_drop_names(*names): - """Force the given table names to be dropped after test complete, - isolating for foreign key cycles - - """ - from . import config - from sqlalchemy import inspect - - @decorator - def go(fn, *args, **kw): - - try: - return fn(*args, **kw) - finally: - drop_all_tables( - config.db, inspect(config.db), include_names=names) - return go - - -class adict(dict): - """Dict keys available as attributes. Shadows.""" - - def __getattribute__(self, key): - try: - return self[key] - except KeyError: - return dict.__getattribute__(self, key) - - def __call__(self, *keys): - return tuple([self[key] for key in keys]) - - get_all = __call__ - - -def drop_all_tables(engine, inspector, schema=None, include_names=None): - from sqlalchemy import Column, Table, Integer, MetaData, \ - ForeignKeyConstraint - from sqlalchemy.schema import DropTable, DropConstraint - - if include_names is not None: - include_names = set(include_names) - - with engine.connect() as conn: - for tname, fkcs in reversed( - inspector.get_sorted_table_and_fkc_names(schema=schema)): - if tname: - if include_names is not None and tname not in include_names: - continue - conn.execute(DropTable( - Table(tname, MetaData()) - )) - elif fkcs: - if not engine.dialect.supports_alter: - continue - for tname, fkc in fkcs: - if include_names is not None and \ - tname not in include_names: - continue - tb = Table( - tname, MetaData(), - Column('x', Integer), - Column('y', Integer), - schema=schema - ) - conn.execute(DropConstraint( - ForeignKeyConstraint( - [tb.c.x], [tb.c.y], name=fkc) - )) - - -def teardown_events(event_cls): - @decorator - def decorate(fn, *arg, **kw): - try: - return fn(*arg, **kw) - finally: - event_cls._clear() - return decorate - diff --git a/python/sqlalchemy/testing/warnings.py b/python/sqlalchemy/testing/warnings.py deleted file mode 100644 index 19b632d3..00000000 --- a/python/sqlalchemy/testing/warnings.py +++ /dev/null @@ -1,34 +0,0 @@ -# testing/warnings.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -from __future__ import absolute_import - -import warnings -from .. import exc as sa_exc -from . import assertions - - -def setup_filters(): - """Set global warning behavior for the test suite.""" - - warnings.filterwarnings('ignore', - category=sa_exc.SAPendingDeprecationWarning) - warnings.filterwarnings('error', category=sa_exc.SADeprecationWarning) - warnings.filterwarnings('error', category=sa_exc.SAWarning) - - -def assert_warnings(fn, warning_msgs, regex=False): - """Assert that each of the given warnings are emitted by fn. - - Deprecated. Please use assertions.expect_warnings(). - - """ - - with assertions._expect_warnings( - sa_exc.SAWarning, warning_msgs, regex=regex): - return fn() - diff --git a/python/sqlalchemy/types.py b/python/sqlalchemy/types.py deleted file mode 100644 index 9ab92e90..00000000 --- a/python/sqlalchemy/types.py +++ /dev/null @@ -1,78 +0,0 @@ -# types.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Compatibility namespace for sqlalchemy.sql.types. - -""" - -__all__ = ['TypeEngine', 'TypeDecorator', 'UserDefinedType', - 'INT', 'CHAR', 'VARCHAR', 'NCHAR', 'NVARCHAR', 'TEXT', 'Text', - 'FLOAT', 'NUMERIC', 'REAL', 'DECIMAL', 'TIMESTAMP', 'DATETIME', - 'CLOB', 'BLOB', 'BINARY', 'VARBINARY', 'BOOLEAN', 'BIGINT', - 'SMALLINT', 'INTEGER', 'DATE', 'TIME', 'String', 'Integer', - 'SmallInteger', 'BigInteger', 'Numeric', 'Float', 'DateTime', - 'Date', 'Time', 'LargeBinary', 'Binary', 'Boolean', 'Unicode', - 'Concatenable', 'UnicodeText', 'PickleType', 'Interval', 'Enum'] - -from .sql.type_api import ( - adapt_type, - TypeEngine, - TypeDecorator, - Variant, - to_instance, - UserDefinedType -) -from .sql.sqltypes import ( - BIGINT, - BINARY, - BLOB, - BOOLEAN, - BigInteger, - Binary, - _Binary, - Boolean, - CHAR, - CLOB, - Concatenable, - DATE, - DATETIME, - DECIMAL, - Date, - DateTime, - Enum, - FLOAT, - Float, - INT, - INTEGER, - Integer, - Interval, - LargeBinary, - MatchType, - NCHAR, - NVARCHAR, - NullType, - NULLTYPE, - NUMERIC, - Numeric, - PickleType, - REAL, - SchemaType, - SMALLINT, - SmallInteger, - String, - STRINGTYPE, - TEXT, - TIME, - TIMESTAMP, - Text, - Time, - Unicode, - UnicodeText, - VARBINARY, - VARCHAR, - _type_map - ) diff --git a/python/sqlalchemy/util/__init__.py b/python/sqlalchemy/util/__init__.py deleted file mode 100644 index ed968f16..00000000 --- a/python/sqlalchemy/util/__init__.py +++ /dev/null @@ -1,49 +0,0 @@ -# util/__init__.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -from .compat import callable, cmp, reduce, \ - threading, py3k, py33, py2k, jython, pypy, cpython, win32, \ - pickle, dottedgetter, parse_qsl, namedtuple, next, reraise, \ - raise_from_cause, text_type, safe_kwarg, string_types, int_types, \ - binary_type, nested, \ - quote_plus, with_metaclass, print_, itertools_filterfalse, u, ue, b,\ - unquote_plus, unquote, b64decode, b64encode, byte_buffer, itertools_filter,\ - iterbytes, StringIO, inspect_getargspec, zip_longest - -from ._collections import KeyedTuple, ImmutableContainer, immutabledict, \ - Properties, OrderedProperties, ImmutableProperties, OrderedDict, \ - OrderedSet, IdentitySet, OrderedIdentitySet, column_set, \ - column_dict, ordered_column_set, populate_column_dict, unique_list, \ - UniqueAppender, PopulateDict, EMPTY_SET, to_list, to_set, \ - to_column_set, update_copy, flatten_iterator, has_intersection, \ - LRUCache, ScopedRegistry, ThreadLocalRegistry, WeakSequence, \ - coerce_generator_arg, lightweight_named_tuple - -from .langhelpers import iterate_attributes, class_hierarchy, \ - portable_instancemethod, unbound_method_to_callable, \ - getargspec_init, format_argspec_init, format_argspec_plus, \ - get_func_kwargs, get_cls_kwargs, decorator, as_interface, \ - memoized_property, memoized_instancemethod, md5_hex, \ - group_expirable_memoized_property, dependencies, decode_slice, \ - monkeypatch_proxied_specials, asbool, bool_or_str, coerce_kw_type,\ - duck_type_collection, assert_arg_type, symbol, dictlike_iteritems,\ - classproperty, set_creation_order, warn_exception, warn, NoneType,\ - constructor_copy, methods_equivalent, chop_traceback, asint,\ - generic_repr, counter, PluginLoader, hybridproperty, hybridmethod, \ - safe_reraise,\ - get_callable_argspec, only_once, attrsetter, ellipses_string, \ - warn_limited, map_bits, MemoizedSlots, EnsureKWArgType - -from .deprecations import warn_deprecated, warn_pending_deprecation, \ - deprecated, pending_deprecation, inject_docstring_text - -# things that used to be not always available, -# but are now as of current support Python versions -from collections import defaultdict -from functools import partial -from functools import update_wrapper -from contextlib import contextmanager diff --git a/python/sqlalchemy/util/_collections.py b/python/sqlalchemy/util/_collections.py deleted file mode 100644 index 3869775c..00000000 --- a/python/sqlalchemy/util/_collections.py +++ /dev/null @@ -1,1043 +0,0 @@ -# util/_collections.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Collection classes and helpers.""" - -from __future__ import absolute_import -import weakref -import operator -from .compat import threading, itertools_filterfalse, string_types -from . import py2k -import types -import collections - -EMPTY_SET = frozenset() - - -class AbstractKeyedTuple(tuple): - __slots__ = () - - def keys(self): - """Return a list of string key names for this :class:`.KeyedTuple`. - - .. seealso:: - - :attr:`.KeyedTuple._fields` - - """ - - return list(self._fields) - - -class KeyedTuple(AbstractKeyedTuple): - """``tuple`` subclass that adds labeled names. - - E.g.:: - - >>> k = KeyedTuple([1, 2, 3], labels=["one", "two", "three"]) - >>> k.one - 1 - >>> k.two - 2 - - Result rows returned by :class:`.Query` that contain multiple - ORM entities and/or column expressions make use of this - class to return rows. - - The :class:`.KeyedTuple` exhibits similar behavior to the - ``collections.namedtuple()`` construct provided in the Python - standard library, however is architected very differently. - Unlike ``collections.namedtuple()``, :class:`.KeyedTuple` is - does not rely on creation of custom subtypes in order to represent - a new series of keys, instead each :class:`.KeyedTuple` instance - receives its list of keys in place. The subtype approach - of ``collections.namedtuple()`` introduces significant complexity - and performance overhead, which is not necessary for the - :class:`.Query` object's use case. - - .. versionchanged:: 0.8 - Compatibility methods with ``collections.namedtuple()`` have been - added including :attr:`.KeyedTuple._fields` and - :meth:`.KeyedTuple._asdict`. - - .. seealso:: - - :ref:`ormtutorial_querying` - - """ - - def __new__(cls, vals, labels=None): - t = tuple.__new__(cls, vals) - if labels: - t.__dict__.update(zip(labels, vals)) - else: - labels = [] - t.__dict__['_labels'] = labels - return t - - @property - def _fields(self): - """Return a tuple of string key names for this :class:`.KeyedTuple`. - - This method provides compatibility with ``collections.namedtuple()``. - - .. versionadded:: 0.8 - - .. seealso:: - - :meth:`.KeyedTuple.keys` - - """ - return tuple([l for l in self._labels if l is not None]) - - def __setattr__(self, key, value): - raise AttributeError("Can't set attribute: %s" % key) - - def _asdict(self): - """Return the contents of this :class:`.KeyedTuple` as a dictionary. - - This method provides compatibility with ``collections.namedtuple()``, - with the exception that the dictionary returned is **not** ordered. - - .. versionadded:: 0.8 - - """ - return dict((key, self.__dict__[key]) for key in self.keys()) - - -class _LW(AbstractKeyedTuple): - __slots__ = () - - def __new__(cls, vals): - return tuple.__new__(cls, vals) - - def __reduce__(self): - # for pickling, degrade down to the regular - # KeyedTuple, thus avoiding anonymous class pickling - # difficulties - return KeyedTuple, (list(self), self._real_fields) - - def _asdict(self): - """Return the contents of this :class:`.KeyedTuple` as a dictionary.""" - - d = dict(zip(self._real_fields, self)) - d.pop(None, None) - return d - - -class ImmutableContainer(object): - def _immutable(self, *arg, **kw): - raise TypeError("%s object is immutable" % self.__class__.__name__) - - __delitem__ = __setitem__ = __setattr__ = _immutable - - -class immutabledict(ImmutableContainer, dict): - - clear = pop = popitem = setdefault = \ - update = ImmutableContainer._immutable - - def __new__(cls, *args): - new = dict.__new__(cls) - dict.__init__(new, *args) - return new - - def __init__(self, *args): - pass - - def __reduce__(self): - return immutabledict, (dict(self), ) - - def union(self, d): - if not d: - return self - elif not self: - if isinstance(d, immutabledict): - return d - else: - return immutabledict(d) - else: - d2 = immutabledict(self) - dict.update(d2, d) - return d2 - - def __repr__(self): - return "immutabledict(%s)" % dict.__repr__(self) - - -class Properties(object): - """Provide a __getattr__/__setattr__ interface over a dict.""" - - __slots__ = '_data', - - def __init__(self, data): - object.__setattr__(self, '_data', data) - - def __len__(self): - return len(self._data) - - def __iter__(self): - return iter(list(self._data.values())) - - def __add__(self, other): - return list(self) + list(other) - - def __setitem__(self, key, object): - self._data[key] = object - - def __getitem__(self, key): - return self._data[key] - - def __delitem__(self, key): - del self._data[key] - - def __setattr__(self, key, obj): - self._data[key] = obj - - def __getstate__(self): - return {'_data': self.__dict__['_data']} - - def __setstate__(self, state): - self.__dict__['_data'] = state['_data'] - - def __getattr__(self, key): - try: - return self._data[key] - except KeyError: - raise AttributeError(key) - - def __contains__(self, key): - return key in self._data - - def as_immutable(self): - """Return an immutable proxy for this :class:`.Properties`.""" - - return ImmutableProperties(self._data) - - def update(self, value): - self._data.update(value) - - def get(self, key, default=None): - if key in self: - return self[key] - else: - return default - - def keys(self): - return list(self._data) - - def values(self): - return list(self._data.values()) - - def items(self): - return list(self._data.items()) - - def has_key(self, key): - return key in self._data - - def clear(self): - self._data.clear() - - -class OrderedProperties(Properties): - """Provide a __getattr__/__setattr__ interface with an OrderedDict - as backing store.""" - - __slots__ = () - - def __init__(self): - Properties.__init__(self, OrderedDict()) - - -class ImmutableProperties(ImmutableContainer, Properties): - """Provide immutable dict/object attribute to an underlying dictionary.""" - - __slots__ = () - - -class OrderedDict(dict): - """A dict that returns keys/values/items in the order they were added.""" - - __slots__ = '_list', - - def __reduce__(self): - return OrderedDict, (self.items(),) - - def __init__(self, ____sequence=None, **kwargs): - self._list = [] - if ____sequence is None: - if kwargs: - self.update(**kwargs) - else: - self.update(____sequence, **kwargs) - - def clear(self): - self._list = [] - dict.clear(self) - - def copy(self): - return self.__copy__() - - def __copy__(self): - return OrderedDict(self) - - def sort(self, *arg, **kw): - self._list.sort(*arg, **kw) - - def update(self, ____sequence=None, **kwargs): - if ____sequence is not None: - if hasattr(____sequence, 'keys'): - for key in ____sequence.keys(): - self.__setitem__(key, ____sequence[key]) - else: - for key, value in ____sequence: - self[key] = value - if kwargs: - self.update(kwargs) - - def setdefault(self, key, value): - if key not in self: - self.__setitem__(key, value) - return value - else: - return self.__getitem__(key) - - def __iter__(self): - return iter(self._list) - - def keys(self): - return list(self) - - def values(self): - return [self[key] for key in self._list] - - def items(self): - return [(key, self[key]) for key in self._list] - - if py2k: - def itervalues(self): - return iter(self.values()) - - def iterkeys(self): - return iter(self) - - def iteritems(self): - return iter(self.items()) - - def __setitem__(self, key, object): - if key not in self: - try: - self._list.append(key) - except AttributeError: - # work around Python pickle loads() with - # dict subclass (seems to ignore __setstate__?) - self._list = [key] - dict.__setitem__(self, key, object) - - def __delitem__(self, key): - dict.__delitem__(self, key) - self._list.remove(key) - - def pop(self, key, *default): - present = key in self - value = dict.pop(self, key, *default) - if present: - self._list.remove(key) - return value - - def popitem(self): - item = dict.popitem(self) - self._list.remove(item[0]) - return item - - -class OrderedSet(set): - def __init__(self, d=None): - set.__init__(self) - self._list = [] - if d is not None: - self._list = unique_list(d) - set.update(self, self._list) - else: - self._list = [] - - def add(self, element): - if element not in self: - self._list.append(element) - set.add(self, element) - - def remove(self, element): - set.remove(self, element) - self._list.remove(element) - - def insert(self, pos, element): - if element not in self: - self._list.insert(pos, element) - set.add(self, element) - - def discard(self, element): - if element in self: - self._list.remove(element) - set.remove(self, element) - - def clear(self): - set.clear(self) - self._list = [] - - def __getitem__(self, key): - return self._list[key] - - def __iter__(self): - return iter(self._list) - - def __add__(self, other): - return self.union(other) - - def __repr__(self): - return '%s(%r)' % (self.__class__.__name__, self._list) - - __str__ = __repr__ - - def update(self, iterable): - for e in iterable: - if e not in self: - self._list.append(e) - set.add(self, e) - return self - - __ior__ = update - - def union(self, other): - result = self.__class__(self) - result.update(other) - return result - - __or__ = union - - def intersection(self, other): - other = set(other) - return self.__class__(a for a in self if a in other) - - __and__ = intersection - - def symmetric_difference(self, other): - other = set(other) - result = self.__class__(a for a in self if a not in other) - result.update(a for a in other if a not in self) - return result - - __xor__ = symmetric_difference - - def difference(self, other): - other = set(other) - return self.__class__(a for a in self if a not in other) - - __sub__ = difference - - def intersection_update(self, other): - other = set(other) - set.intersection_update(self, other) - self._list = [a for a in self._list if a in other] - return self - - __iand__ = intersection_update - - def symmetric_difference_update(self, other): - set.symmetric_difference_update(self, other) - self._list = [a for a in self._list if a in self] - self._list += [a for a in other._list if a in self] - return self - - __ixor__ = symmetric_difference_update - - def difference_update(self, other): - set.difference_update(self, other) - self._list = [a for a in self._list if a in self] - return self - - __isub__ = difference_update - - -class IdentitySet(object): - """A set that considers only object id() for uniqueness. - - This strategy has edge cases for builtin types- it's possible to have - two 'foo' strings in one of these sets, for example. Use sparingly. - - """ - - _working_set = set - - def __init__(self, iterable=None): - self._members = dict() - if iterable: - for o in iterable: - self.add(o) - - def add(self, value): - self._members[id(value)] = value - - def __contains__(self, value): - return id(value) in self._members - - def remove(self, value): - del self._members[id(value)] - - def discard(self, value): - try: - self.remove(value) - except KeyError: - pass - - def pop(self): - try: - pair = self._members.popitem() - return pair[1] - except KeyError: - raise KeyError('pop from an empty set') - - def clear(self): - self._members.clear() - - def __cmp__(self, other): - raise TypeError('cannot compare sets using cmp()') - - def __eq__(self, other): - if isinstance(other, IdentitySet): - return self._members == other._members - else: - return False - - def __ne__(self, other): - if isinstance(other, IdentitySet): - return self._members != other._members - else: - return True - - def issubset(self, iterable): - other = type(self)(iterable) - - if len(self) > len(other): - return False - for m in itertools_filterfalse(other._members.__contains__, - iter(self._members.keys())): - return False - return True - - def __le__(self, other): - if not isinstance(other, IdentitySet): - return NotImplemented - return self.issubset(other) - - def __lt__(self, other): - if not isinstance(other, IdentitySet): - return NotImplemented - return len(self) < len(other) and self.issubset(other) - - def issuperset(self, iterable): - other = type(self)(iterable) - - if len(self) < len(other): - return False - - for m in itertools_filterfalse(self._members.__contains__, - iter(other._members.keys())): - return False - return True - - def __ge__(self, other): - if not isinstance(other, IdentitySet): - return NotImplemented - return self.issuperset(other) - - def __gt__(self, other): - if not isinstance(other, IdentitySet): - return NotImplemented - return len(self) > len(other) and self.issuperset(other) - - def union(self, iterable): - result = type(self)() - # testlib.pragma exempt:__hash__ - members = self._member_id_tuples() - other = _iter_id(iterable) - result._members.update(self._working_set(members).union(other)) - return result - - def __or__(self, other): - if not isinstance(other, IdentitySet): - return NotImplemented - return self.union(other) - - def update(self, iterable): - self._members = self.union(iterable)._members - - def __ior__(self, other): - if not isinstance(other, IdentitySet): - return NotImplemented - self.update(other) - return self - - def difference(self, iterable): - result = type(self)() - # testlib.pragma exempt:__hash__ - members = self._member_id_tuples() - other = _iter_id(iterable) - result._members.update(self._working_set(members).difference(other)) - return result - - def __sub__(self, other): - if not isinstance(other, IdentitySet): - return NotImplemented - return self.difference(other) - - def difference_update(self, iterable): - self._members = self.difference(iterable)._members - - def __isub__(self, other): - if not isinstance(other, IdentitySet): - return NotImplemented - self.difference_update(other) - return self - - def intersection(self, iterable): - result = type(self)() - # testlib.pragma exempt:__hash__ - members = self._member_id_tuples() - other = _iter_id(iterable) - result._members.update(self._working_set(members).intersection(other)) - return result - - def __and__(self, other): - if not isinstance(other, IdentitySet): - return NotImplemented - return self.intersection(other) - - def intersection_update(self, iterable): - self._members = self.intersection(iterable)._members - - def __iand__(self, other): - if not isinstance(other, IdentitySet): - return NotImplemented - self.intersection_update(other) - return self - - def symmetric_difference(self, iterable): - result = type(self)() - # testlib.pragma exempt:__hash__ - members = self._member_id_tuples() - other = _iter_id(iterable) - result._members.update( - self._working_set(members).symmetric_difference(other)) - return result - - def _member_id_tuples(self): - return ((id(v), v) for v in self._members.values()) - - def __xor__(self, other): - if not isinstance(other, IdentitySet): - return NotImplemented - return self.symmetric_difference(other) - - def symmetric_difference_update(self, iterable): - self._members = self.symmetric_difference(iterable)._members - - def __ixor__(self, other): - if not isinstance(other, IdentitySet): - return NotImplemented - self.symmetric_difference(other) - return self - - def copy(self): - return type(self)(iter(self._members.values())) - - __copy__ = copy - - def __len__(self): - return len(self._members) - - def __iter__(self): - return iter(self._members.values()) - - def __hash__(self): - raise TypeError('set objects are unhashable') - - def __repr__(self): - return '%s(%r)' % (type(self).__name__, list(self._members.values())) - - -class WeakSequence(object): - def __init__(self, __elements=()): - self._storage = [ - weakref.ref(element, self._remove) for element in __elements - ] - - def append(self, item): - self._storage.append(weakref.ref(item, self._remove)) - - def _remove(self, ref): - self._storage.remove(ref) - - def __len__(self): - return len(self._storage) - - def __iter__(self): - return (obj for obj in - (ref() for ref in self._storage) if obj is not None) - - def __getitem__(self, index): - try: - obj = self._storage[index] - except KeyError: - raise IndexError("Index %s out of range" % index) - else: - return obj() - - -class OrderedIdentitySet(IdentitySet): - class _working_set(OrderedSet): - # a testing pragma: exempt the OIDS working set from the test suite's - # "never call the user's __hash__" assertions. this is a big hammer, - # but it's safe here: IDS operates on (id, instance) tuples in the - # working set. - __sa_hash_exempt__ = True - - def __init__(self, iterable=None): - IdentitySet.__init__(self) - self._members = OrderedDict() - if iterable: - for o in iterable: - self.add(o) - - -class PopulateDict(dict): - """A dict which populates missing values via a creation function. - - Note the creation function takes a key, unlike - collections.defaultdict. - - """ - - def __init__(self, creator): - self.creator = creator - - def __missing__(self, key): - self[key] = val = self.creator(key) - return val - -# Define collections that are capable of storing -# ColumnElement objects as hashable keys/elements. -# At this point, these are mostly historical, things -# used to be more complicated. -column_set = set -column_dict = dict -ordered_column_set = OrderedSet -populate_column_dict = PopulateDict - - -_getters = PopulateDict(operator.itemgetter) - -_property_getters = PopulateDict( - lambda idx: property(operator.itemgetter(idx))) - - -def unique_list(seq, hashfunc=None): - seen = set() - seen_add = seen.add - if not hashfunc: - return [x for x in seq - if x not in seen - and not seen_add(x)] - else: - return [x for x in seq - if hashfunc(x) not in seen - and not seen_add(hashfunc(x))] - - -class UniqueAppender(object): - """Appends items to a collection ensuring uniqueness. - - Additional appends() of the same object are ignored. Membership is - determined by identity (``is a``) not equality (``==``). - """ - - def __init__(self, data, via=None): - self.data = data - self._unique = {} - if via: - self._data_appender = getattr(data, via) - elif hasattr(data, 'append'): - self._data_appender = data.append - elif hasattr(data, 'add'): - self._data_appender = data.add - - def append(self, item): - id_ = id(item) - if id_ not in self._unique: - self._data_appender(item) - self._unique[id_] = True - - def __iter__(self): - return iter(self.data) - - -def coerce_generator_arg(arg): - if len(arg) == 1 and isinstance(arg[0], types.GeneratorType): - return list(arg[0]) - else: - return arg - - -def to_list(x, default=None): - if x is None: - return default - if not isinstance(x, collections.Iterable) or isinstance(x, string_types): - return [x] - elif isinstance(x, list): - return x - else: - return list(x) - - -def has_intersection(set_, iterable): - """return True if any items of set_ are present in iterable. - - Goes through special effort to ensure __hash__ is not called - on items in iterable that don't support it. - - """ - # TODO: optimize, write in C, etc. - return bool( - set_.intersection([i for i in iterable if i.__hash__]) - ) - - -def to_set(x): - if x is None: - return set() - if not isinstance(x, set): - return set(to_list(x)) - else: - return x - - -def to_column_set(x): - if x is None: - return column_set() - if not isinstance(x, column_set): - return column_set(to_list(x)) - else: - return x - - -def update_copy(d, _new=None, **kw): - """Copy the given dict and update with the given values.""" - - d = d.copy() - if _new: - d.update(_new) - d.update(**kw) - return d - - -def flatten_iterator(x): - """Given an iterator of which further sub-elements may also be - iterators, flatten the sub-elements into a single iterator. - - """ - for elem in x: - if not isinstance(elem, str) and hasattr(elem, '__iter__'): - for y in flatten_iterator(elem): - yield y - else: - yield elem - - -class LRUCache(dict): - """Dictionary with 'squishy' removal of least - recently used items. - - Note that either get() or [] should be used here, but - generally its not safe to do an "in" check first as the dictionary - can change subsequent to that call. - - """ - - def __init__(self, capacity=100, threshold=.5): - self.capacity = capacity - self.threshold = threshold - self._counter = 0 - self._mutex = threading.Lock() - - def _inc_counter(self): - self._counter += 1 - return self._counter - - def get(self, key, default=None): - item = dict.get(self, key, default) - if item is not default: - item[2] = self._inc_counter() - return item[1] - else: - return default - - def __getitem__(self, key): - item = dict.__getitem__(self, key) - item[2] = self._inc_counter() - return item[1] - - def values(self): - return [i[1] for i in dict.values(self)] - - def setdefault(self, key, value): - if key in self: - return self[key] - else: - self[key] = value - return value - - def __setitem__(self, key, value): - item = dict.get(self, key) - if item is None: - item = [key, value, self._inc_counter()] - dict.__setitem__(self, key, item) - else: - item[1] = value - self._manage_size() - - def _manage_size(self): - if not self._mutex.acquire(False): - return - try: - while len(self) > self.capacity + self.capacity * self.threshold: - by_counter = sorted(dict.values(self), - key=operator.itemgetter(2), - reverse=True) - for item in by_counter[self.capacity:]: - try: - del self[item[0]] - except KeyError: - # deleted elsewhere; skip - continue - finally: - self._mutex.release() - - -_lw_tuples = LRUCache(100) - - -def lightweight_named_tuple(name, fields): - hash_ = (name, ) + tuple(fields) - tp_cls = _lw_tuples.get(hash_) - if tp_cls: - return tp_cls - - tp_cls = type( - name, (_LW,), - dict([ - (field, _property_getters[idx]) - for idx, field in enumerate(fields) if field is not None - ] + [('__slots__', ())]) - ) - - tp_cls._real_fields = fields - tp_cls._fields = tuple([f for f in fields if f is not None]) - - _lw_tuples[hash_] = tp_cls - return tp_cls - - -class ScopedRegistry(object): - """A Registry that can store one or multiple instances of a single - class on the basis of a "scope" function. - - The object implements ``__call__`` as the "getter", so by - calling ``myregistry()`` the contained object is returned - for the current scope. - - :param createfunc: - a callable that returns a new object to be placed in the registry - - :param scopefunc: - a callable that will return a key to store/retrieve an object. - """ - - def __init__(self, createfunc, scopefunc): - """Construct a new :class:`.ScopedRegistry`. - - :param createfunc: A creation function that will generate - a new value for the current scope, if none is present. - - :param scopefunc: A function that returns a hashable - token representing the current scope (such as, current - thread identifier). - - """ - self.createfunc = createfunc - self.scopefunc = scopefunc - self.registry = {} - - def __call__(self): - key = self.scopefunc() - try: - return self.registry[key] - except KeyError: - return self.registry.setdefault(key, self.createfunc()) - - def has(self): - """Return True if an object is present in the current scope.""" - - return self.scopefunc() in self.registry - - def set(self, obj): - """Set the value for the current scope.""" - - self.registry[self.scopefunc()] = obj - - def clear(self): - """Clear the current scope, if any.""" - - try: - del self.registry[self.scopefunc()] - except KeyError: - pass - - -class ThreadLocalRegistry(ScopedRegistry): - """A :class:`.ScopedRegistry` that uses a ``threading.local()`` - variable for storage. - - """ - - def __init__(self, createfunc): - self.createfunc = createfunc - self.registry = threading.local() - - def __call__(self): - try: - return self.registry.value - except AttributeError: - val = self.registry.value = self.createfunc() - return val - - def has(self): - return hasattr(self.registry, "value") - - def set(self, obj): - self.registry.value = obj - - def clear(self): - try: - del self.registry.value - except AttributeError: - pass - - -def _iter_id(iterable): - """Generator: ((id(o), o) for o in iterable).""" - - for item in iterable: - yield id(item), item diff --git a/python/sqlalchemy/util/compat.py b/python/sqlalchemy/util/compat.py deleted file mode 100644 index 5b6f691f..00000000 --- a/python/sqlalchemy/util/compat.py +++ /dev/null @@ -1,262 +0,0 @@ -# util/compat.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Handle Python version/platform incompatibilities.""" - -import sys - -try: - import threading -except ImportError: - import dummy_threading as threading - -py33 = sys.version_info >= (3, 3) -py32 = sys.version_info >= (3, 2) -py3k = sys.version_info >= (3, 0) -py2k = sys.version_info < (3, 0) -py265 = sys.version_info >= (2, 6, 5) -jython = sys.platform.startswith('java') -pypy = hasattr(sys, 'pypy_version_info') -win32 = sys.platform.startswith('win') -cpython = not pypy and not jython # TODO: something better for this ? - -import collections -next = next - -if py3k: - import pickle -else: - try: - import cPickle as pickle - except ImportError: - import pickle - -# work around http://bugs.python.org/issue2646 -if py265: - safe_kwarg = lambda arg: arg -else: - safe_kwarg = str - -ArgSpec = collections.namedtuple("ArgSpec", - ["args", "varargs", "keywords", "defaults"]) - -if py3k: - import builtins - - from inspect import getfullargspec as inspect_getfullargspec - from urllib.parse import (quote_plus, unquote_plus, - parse_qsl, quote, unquote) - import configparser - from io import StringIO - - from io import BytesIO as byte_buffer - - def inspect_getargspec(func): - return ArgSpec( - *inspect_getfullargspec(func)[0:4] - ) - - string_types = str, - binary_type = bytes - text_type = str - int_types = int, - iterbytes = iter - - def u(s): - return s - - def ue(s): - return s - - def b(s): - return s.encode("latin-1") - - if py32: - callable = callable - else: - def callable(fn): - return hasattr(fn, '__call__') - - def cmp(a, b): - return (a > b) - (a < b) - - from functools import reduce - - print_ = getattr(builtins, "print") - - import_ = getattr(builtins, '__import__') - - import itertools - itertools_filterfalse = itertools.filterfalse - itertools_filter = filter - itertools_imap = map - from itertools import zip_longest - - import base64 - - def b64encode(x): - return base64.b64encode(x).decode('ascii') - - def b64decode(x): - return base64.b64decode(x.encode('ascii')) - -else: - from inspect import getargspec as inspect_getfullargspec - inspect_getargspec = inspect_getfullargspec - from urllib import quote_plus, unquote_plus, quote, unquote - from urlparse import parse_qsl - import ConfigParser as configparser - from StringIO import StringIO - from cStringIO import StringIO as byte_buffer - - string_types = basestring, - binary_type = str - text_type = unicode - int_types = int, long - - def iterbytes(buf): - return (ord(byte) for byte in buf) - - def u(s): - # this differs from what six does, which doesn't support non-ASCII - # strings - we only use u() with - # literal source strings, and all our source files with non-ascii - # in them (all are tests) are utf-8 encoded. - return unicode(s, "utf-8") - - def ue(s): - return unicode(s, "unicode_escape") - - def b(s): - return s - - def import_(*args): - if len(args) == 4: - args = args[0:3] + ([str(arg) for arg in args[3]],) - return __import__(*args) - - callable = callable - cmp = cmp - reduce = reduce - - import base64 - b64encode = base64.b64encode - b64decode = base64.b64decode - - def print_(*args, **kwargs): - fp = kwargs.pop("file", sys.stdout) - if fp is None: - return - for arg in enumerate(args): - if not isinstance(arg, basestring): - arg = str(arg) - fp.write(arg) - - import itertools - itertools_filterfalse = itertools.ifilterfalse - itertools_filter = itertools.ifilter - itertools_imap = itertools.imap - from itertools import izip_longest as zip_longest - - -import time -if win32 or jython: - time_func = time.clock -else: - time_func = time.time - -from collections import namedtuple -from operator import attrgetter as dottedgetter - - -if py3k: - def reraise(tp, value, tb=None, cause=None): - if cause is not None: - value.__cause__ = cause - if value.__traceback__ is not tb: - raise value.with_traceback(tb) - raise value - - def raise_from_cause(exception, exc_info=None): - if exc_info is None: - exc_info = sys.exc_info() - exc_type, exc_value, exc_tb = exc_info - reraise(type(exception), exception, tb=exc_tb, cause=exc_value) -else: - exec("def reraise(tp, value, tb=None, cause=None):\n" - " raise tp, value, tb\n") - - def raise_from_cause(exception, exc_info=None): - # not as nice as that of Py3K, but at least preserves - # the code line where the issue occurred - if exc_info is None: - exc_info = sys.exc_info() - exc_type, exc_value, exc_tb = exc_info - reraise(type(exception), exception, tb=exc_tb) - -if py3k: - exec_ = getattr(builtins, 'exec') -else: - def exec_(func_text, globals_, lcl=None): - if lcl is None: - exec('exec func_text in globals_') - else: - exec('exec func_text in globals_, lcl') - - -def with_metaclass(meta, *bases): - """Create a base class with a metaclass. - - Drops the middle class upon creation. - - Source: http://lucumr.pocoo.org/2013/5/21/porting-to-python-3-redux/ - - """ - - class metaclass(meta): - __call__ = type.__call__ - __init__ = type.__init__ - - def __new__(cls, name, this_bases, d): - if this_bases is None: - return type.__new__(cls, name, (), d) - return meta(name, bases, d) - return metaclass('temporary_class', None, {}) - - -from contextlib import contextmanager - -try: - from contextlib import nested -except ImportError: - # removed in py3k, credit to mitsuhiko for - # workaround - - @contextmanager - def nested(*managers): - exits = [] - vars = [] - exc = (None, None, None) - try: - for mgr in managers: - exit = mgr.__exit__ - enter = mgr.__enter__ - vars.append(enter()) - exits.append(exit) - yield vars - except: - exc = sys.exc_info() - finally: - while exits: - exit = exits.pop() - try: - if exit(*exc): - exc = (None, None, None) - except: - exc = sys.exc_info() - if exc != (None, None, None): - reraise(exc[0], exc[1], exc[2]) diff --git a/python/sqlalchemy/util/deprecations.py b/python/sqlalchemy/util/deprecations.py deleted file mode 100644 index 4c7ea47e..00000000 --- a/python/sqlalchemy/util/deprecations.py +++ /dev/null @@ -1,146 +0,0 @@ -# util/deprecations.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Helpers related to deprecation of functions, methods, classes, other -functionality.""" - -from .. import exc -import warnings -import re -from .langhelpers import decorator - - -def warn_deprecated(msg, stacklevel=3): - warnings.warn(msg, exc.SADeprecationWarning, stacklevel=stacklevel) - - -def warn_pending_deprecation(msg, stacklevel=3): - warnings.warn(msg, exc.SAPendingDeprecationWarning, stacklevel=stacklevel) - - -def deprecated(version, message=None, add_deprecation_to_docstring=True): - """Decorates a function and issues a deprecation warning on use. - - :param message: - If provided, issue message in the warning. A sensible default - is used if not provided. - - :param add_deprecation_to_docstring: - Default True. If False, the wrapped function's __doc__ is left - as-is. If True, the 'message' is prepended to the docs if - provided, or sensible default if message is omitted. - - """ - - if add_deprecation_to_docstring: - header = ".. deprecated:: %s %s" % \ - (version, (message or '')) - else: - header = None - - if message is None: - message = "Call to deprecated function %(func)s" - - def decorate(fn): - return _decorate_with_warning( - fn, exc.SADeprecationWarning, - message % dict(func=fn.__name__), header) - return decorate - - -def pending_deprecation(version, message=None, - add_deprecation_to_docstring=True): - """Decorates a function and issues a pending deprecation warning on use. - - :param version: - An approximate future version at which point the pending deprecation - will become deprecated. Not used in messaging. - - :param message: - If provided, issue message in the warning. A sensible default - is used if not provided. - - :param add_deprecation_to_docstring: - Default True. If False, the wrapped function's __doc__ is left - as-is. If True, the 'message' is prepended to the docs if - provided, or sensible default if message is omitted. - """ - - if add_deprecation_to_docstring: - header = ".. deprecated:: %s (pending) %s" % \ - (version, (message or '')) - else: - header = None - - if message is None: - message = "Call to deprecated function %(func)s" - - def decorate(fn): - return _decorate_with_warning( - fn, exc.SAPendingDeprecationWarning, - message % dict(func=fn.__name__), header) - return decorate - - -def _sanitize_restructured_text(text): - def repl(m): - type_, name = m.group(1, 2) - if type_ in ("func", "meth"): - name += "()" - return name - return re.sub(r'\:(\w+)\:`~?\.?(.+?)`', repl, text) - - -def _decorate_with_warning(func, wtype, message, docstring_header=None): - """Wrap a function with a warnings.warn and augmented docstring.""" - - message = _sanitize_restructured_text(message) - - @decorator - def warned(fn, *args, **kwargs): - warnings.warn(message, wtype, stacklevel=3) - return fn(*args, **kwargs) - - doc = func.__doc__ is not None and func.__doc__ or '' - if docstring_header is not None: - docstring_header %= dict(func=func.__name__) - - doc = inject_docstring_text(doc, docstring_header, 1) - - decorated = warned(func) - decorated.__doc__ = doc - return decorated - -import textwrap - - -def _dedent_docstring(text): - split_text = text.split("\n", 1) - if len(split_text) == 1: - return text - else: - firstline, remaining = split_text - if not firstline.startswith(" "): - return firstline + "\n" + textwrap.dedent(remaining) - else: - return textwrap.dedent(text) - - -def inject_docstring_text(doctext, injecttext, pos): - doctext = _dedent_docstring(doctext or "") - lines = doctext.split('\n') - injectlines = textwrap.dedent(injecttext).split("\n") - if injectlines[0]: - injectlines.insert(0, "") - - blanks = [num for num, line in enumerate(lines) if not line.strip()] - blanks.insert(0, 0) - - inject_pos = blanks[min(pos, len(blanks) - 1)] - - lines = lines[0:inject_pos] + injectlines + lines[inject_pos:] - return "\n".join(lines) diff --git a/python/sqlalchemy/util/langhelpers.py b/python/sqlalchemy/util/langhelpers.py deleted file mode 100644 index dd258924..00000000 --- a/python/sqlalchemy/util/langhelpers.py +++ /dev/null @@ -1,1377 +0,0 @@ -# util/langhelpers.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Routines to help with the creation, loading and introspection of -modules, classes, hierarchies, attributes, functions, and methods. - -""" -import itertools -import inspect -import operator -import re -import sys -import types -import warnings -from functools import update_wrapper -from .. import exc -import hashlib -from . import compat -from . import _collections - - -def md5_hex(x): - if compat.py3k: - x = x.encode('utf-8') - m = hashlib.md5() - m.update(x) - return m.hexdigest() - - -class safe_reraise(object): - """Reraise an exception after invoking some - handler code. - - Stores the existing exception info before - invoking so that it is maintained across a potential - coroutine context switch. - - e.g.:: - - try: - sess.commit() - except: - with safe_reraise(): - sess.rollback() - - """ - - def __enter__(self): - self._exc_info = sys.exc_info() - - def __exit__(self, type_, value, traceback): - # see #2703 for notes - if type_ is None: - exc_type, exc_value, exc_tb = self._exc_info - self._exc_info = None # remove potential circular references - compat.reraise(exc_type, exc_value, exc_tb) - else: - self._exc_info = None # remove potential circular references - compat.reraise(type_, value, traceback) - - -def decode_slice(slc): - """decode a slice object as sent to __getitem__. - - takes into account the 2.5 __index__() method, basically. - - """ - ret = [] - for x in slc.start, slc.stop, slc.step: - if hasattr(x, '__index__'): - x = x.__index__() - ret.append(x) - return tuple(ret) - - -def _unique_symbols(used, *bases): - used = set(used) - for base in bases: - pool = itertools.chain((base,), - compat.itertools_imap(lambda i: base + str(i), - range(1000))) - for sym in pool: - if sym not in used: - used.add(sym) - yield sym - break - else: - raise NameError("exhausted namespace for symbol base %s" % base) - - -def map_bits(fn, n): - """Call the given function given each nonzero bit from n.""" - - while n: - b = n & (~n + 1) - yield fn(b) - n ^= b - - -def decorator(target): - """A signature-matching decorator factory.""" - - def decorate(fn): - if not inspect.isfunction(fn): - raise Exception("not a decoratable function") - spec = compat.inspect_getfullargspec(fn) - names = tuple(spec[0]) + spec[1:3] + (fn.__name__,) - targ_name, fn_name = _unique_symbols(names, 'target', 'fn') - - metadata = dict(target=targ_name, fn=fn_name) - metadata.update(format_argspec_plus(spec, grouped=False)) - metadata['name'] = fn.__name__ - code = """\ -def %(name)s(%(args)s): - return %(target)s(%(fn)s, %(apply_kw)s) -""" % metadata - decorated = _exec_code_in_env(code, - {targ_name: target, fn_name: fn}, - fn.__name__) - decorated.__defaults__ = getattr(fn, 'im_func', fn).__defaults__ - decorated.__wrapped__ = fn - return update_wrapper(decorated, fn) - return update_wrapper(decorate, target) - - -def _exec_code_in_env(code, env, fn_name): - exec(code, env) - return env[fn_name] - - -def public_factory(target, location): - """Produce a wrapping function for the given cls or classmethod. - - Rationale here is so that the __init__ method of the - class can serve as documentation for the function. - - """ - if isinstance(target, type): - fn = target.__init__ - callable_ = target - doc = "Construct a new :class:`.%s` object. \n\n"\ - "This constructor is mirrored as a public API function; "\ - "see :func:`~%s` "\ - "for a full usage and argument description." % ( - target.__name__, location, ) - else: - fn = callable_ = target - doc = "This function is mirrored; see :func:`~%s` "\ - "for a description of arguments." % location - - location_name = location.split(".")[-1] - spec = compat.inspect_getfullargspec(fn) - del spec[0][0] - metadata = format_argspec_plus(spec, grouped=False) - metadata['name'] = location_name - code = """\ -def %(name)s(%(args)s): - return cls(%(apply_kw)s) -""" % metadata - env = {'cls': callable_, 'symbol': symbol} - exec(code, env) - decorated = env[location_name] - decorated.__doc__ = fn.__doc__ - decorated.__module__ = "sqlalchemy" + location.rsplit(".", 1)[0] - if compat.py2k or hasattr(fn, '__func__'): - fn.__func__.__doc__ = doc - else: - fn.__doc__ = doc - return decorated - - -class PluginLoader(object): - - def __init__(self, group, auto_fn=None): - self.group = group - self.impls = {} - self.auto_fn = auto_fn - - def load(self, name): - if name in self.impls: - return self.impls[name]() - - if self.auto_fn: - loader = self.auto_fn(name) - if loader: - self.impls[name] = loader - return loader() - - try: - import pkg_resources - except ImportError: - pass - else: - for impl in pkg_resources.iter_entry_points( - self.group, name): - self.impls[name] = impl.load - return impl.load() - - raise exc.NoSuchModuleError( - "Can't load plugin: %s:%s" % - (self.group, name)) - - def register(self, name, modulepath, objname): - def load(): - mod = compat.import_(modulepath) - for token in modulepath.split(".")[1:]: - mod = getattr(mod, token) - return getattr(mod, objname) - self.impls[name] = load - - -def get_cls_kwargs(cls, _set=None): - """Return the full set of inherited kwargs for the given `cls`. - - Probes a class's __init__ method, collecting all named arguments. If the - __init__ defines a \**kwargs catch-all, then the constructor is presumed - to pass along unrecognized keywords to its base classes, and the - collection process is repeated recursively on each of the bases. - - Uses a subset of inspect.getargspec() to cut down on method overhead. - No anonymous tuple arguments please ! - - """ - toplevel = _set is None - if toplevel: - _set = set() - - ctr = cls.__dict__.get('__init__', False) - - has_init = ctr and isinstance(ctr, types.FunctionType) and \ - isinstance(ctr.__code__, types.CodeType) - - if has_init: - names, has_kw = inspect_func_args(ctr) - _set.update(names) - - if not has_kw and not toplevel: - return None - - if not has_init or has_kw: - for c in cls.__bases__: - if get_cls_kwargs(c, _set) is None: - break - - _set.discard('self') - return _set - - -try: - # TODO: who doesn't have this constant? - from inspect import CO_VARKEYWORDS - - def inspect_func_args(fn): - co = fn.__code__ - nargs = co.co_argcount - names = co.co_varnames - args = list(names[:nargs]) - has_kw = bool(co.co_flags & CO_VARKEYWORDS) - return args, has_kw - -except ImportError: - def inspect_func_args(fn): - names, _, has_kw, _ = inspect.getargspec(fn) - return names, bool(has_kw) - - -def get_func_kwargs(func): - """Return the set of legal kwargs for the given `func`. - - Uses getargspec so is safe to call for methods, functions, - etc. - - """ - - return compat.inspect_getargspec(func)[0] - - -def get_callable_argspec(fn, no_self=False, _is_init=False): - """Return the argument signature for any callable. - - All pure-Python callables are accepted, including - functions, methods, classes, objects with __call__; - builtins and other edge cases like functools.partial() objects - raise a TypeError. - - """ - if inspect.isbuiltin(fn): - raise TypeError("Can't inspect builtin: %s" % fn) - elif inspect.isfunction(fn): - if _is_init and no_self: - spec = compat.inspect_getargspec(fn) - return compat.ArgSpec(spec.args[1:], spec.varargs, - spec.keywords, spec.defaults) - else: - return compat.inspect_getargspec(fn) - elif inspect.ismethod(fn): - if no_self and (_is_init or fn.__self__): - spec = compat.inspect_getargspec(fn.__func__) - return compat.ArgSpec(spec.args[1:], spec.varargs, - spec.keywords, spec.defaults) - else: - return compat.inspect_getargspec(fn.__func__) - elif inspect.isclass(fn): - return get_callable_argspec( - fn.__init__, no_self=no_self, _is_init=True) - elif hasattr(fn, '__func__'): - return compat.inspect_getargspec(fn.__func__) - elif hasattr(fn, '__call__'): - if inspect.ismethod(fn.__call__): - return get_callable_argspec(fn.__call__, no_self=no_self) - else: - raise TypeError("Can't inspect callable: %s" % fn) - else: - raise TypeError("Can't inspect callable: %s" % fn) - - -def format_argspec_plus(fn, grouped=True): - """Returns a dictionary of formatted, introspected function arguments. - - A enhanced variant of inspect.formatargspec to support code generation. - - fn - An inspectable callable or tuple of inspect getargspec() results. - grouped - Defaults to True; include (parens, around, argument) lists - - Returns: - - args - Full inspect.formatargspec for fn - self_arg - The name of the first positional argument, varargs[0], or None - if the function defines no positional arguments. - apply_pos - args, re-written in calling rather than receiving syntax. Arguments are - passed positionally. - apply_kw - Like apply_pos, except keyword-ish args are passed as keywords. - - Example:: - - >>> format_argspec_plus(lambda self, a, b, c=3, **d: 123) - {'args': '(self, a, b, c=3, **d)', - 'self_arg': 'self', - 'apply_kw': '(self, a, b, c=c, **d)', - 'apply_pos': '(self, a, b, c, **d)'} - - """ - if compat.callable(fn): - spec = compat.inspect_getfullargspec(fn) - else: - # we accept an existing argspec... - spec = fn - args = inspect.formatargspec(*spec) - if spec[0]: - self_arg = spec[0][0] - elif spec[1]: - self_arg = '%s[0]' % spec[1] - else: - self_arg = None - - if compat.py3k: - apply_pos = inspect.formatargspec(spec[0], spec[1], - spec[2], None, spec[4]) - num_defaults = 0 - if spec[3]: - num_defaults += len(spec[3]) - if spec[4]: - num_defaults += len(spec[4]) - name_args = spec[0] + spec[4] - else: - apply_pos = inspect.formatargspec(spec[0], spec[1], spec[2]) - num_defaults = 0 - if spec[3]: - num_defaults += len(spec[3]) - name_args = spec[0] - - if num_defaults: - defaulted_vals = name_args[0 - num_defaults:] - else: - defaulted_vals = () - - apply_kw = inspect.formatargspec(name_args, spec[1], spec[2], - defaulted_vals, - formatvalue=lambda x: '=' + x) - if grouped: - return dict(args=args, self_arg=self_arg, - apply_pos=apply_pos, apply_kw=apply_kw) - else: - return dict(args=args[1:-1], self_arg=self_arg, - apply_pos=apply_pos[1:-1], apply_kw=apply_kw[1:-1]) - - -def format_argspec_init(method, grouped=True): - """format_argspec_plus with considerations for typical __init__ methods - - Wraps format_argspec_plus with error handling strategies for typical - __init__ cases:: - - object.__init__ -> (self) - other unreflectable (usually C) -> (self, *args, **kwargs) - - """ - if method is object.__init__: - args = grouped and '(self)' or 'self' - else: - try: - return format_argspec_plus(method, grouped=grouped) - except TypeError: - args = (grouped and '(self, *args, **kwargs)' - or 'self, *args, **kwargs') - return dict(self_arg='self', args=args, apply_pos=args, apply_kw=args) - - -def getargspec_init(method): - """inspect.getargspec with considerations for typical __init__ methods - - Wraps inspect.getargspec with error handling for typical __init__ cases:: - - object.__init__ -> (self) - other unreflectable (usually C) -> (self, *args, **kwargs) - - """ - try: - return inspect.getargspec(method) - except TypeError: - if method is object.__init__: - return (['self'], None, None, None) - else: - return (['self'], 'args', 'kwargs', None) - - -def unbound_method_to_callable(func_or_cls): - """Adjust the incoming callable such that a 'self' argument is not - required. - - """ - - if isinstance(func_or_cls, types.MethodType) and not func_or_cls.__self__: - return func_or_cls.__func__ - else: - return func_or_cls - - -def generic_repr(obj, additional_kw=(), to_inspect=None, omit_kwarg=()): - """Produce a __repr__() based on direct association of the __init__() - specification vs. same-named attributes present. - - """ - if to_inspect is None: - to_inspect = [obj] - else: - to_inspect = _collections.to_list(to_inspect) - - missing = object() - - pos_args = [] - kw_args = _collections.OrderedDict() - vargs = None - for i, insp in enumerate(to_inspect): - try: - (_args, _vargs, vkw, defaults) = \ - inspect.getargspec(insp.__init__) - except TypeError: - continue - else: - default_len = defaults and len(defaults) or 0 - if i == 0: - if _vargs: - vargs = _vargs - if default_len: - pos_args.extend(_args[1:-default_len]) - else: - pos_args.extend(_args[1:]) - else: - kw_args.update([ - (arg, missing) for arg in _args[1:-default_len] - ]) - - if default_len: - kw_args.update([ - (arg, default) - for arg, default - in zip(_args[-default_len:], defaults) - ]) - output = [] - - output.extend(repr(getattr(obj, arg, None)) for arg in pos_args) - - if vargs is not None and hasattr(obj, vargs): - output.extend([repr(val) for val in getattr(obj, vargs)]) - - for arg, defval in kw_args.items(): - if arg in omit_kwarg: - continue - try: - val = getattr(obj, arg, missing) - if val is not missing and val != defval: - output.append('%s=%r' % (arg, val)) - except Exception: - pass - - if additional_kw: - for arg, defval in additional_kw: - try: - val = getattr(obj, arg, missing) - if val is not missing and val != defval: - output.append('%s=%r' % (arg, val)) - except Exception: - pass - - return "%s(%s)" % (obj.__class__.__name__, ", ".join(output)) - - -class portable_instancemethod(object): - """Turn an instancemethod into a (parent, name) pair - to produce a serializable callable. - - """ - - __slots__ = 'target', 'name', '__weakref__' - - def __getstate__(self): - return {'target': self.target, 'name': self.name} - - def __setstate__(self, state): - self.target = state['target'] - self.name = state['name'] - - def __init__(self, meth): - self.target = meth.__self__ - self.name = meth.__name__ - - def __call__(self, *arg, **kw): - return getattr(self.target, self.name)(*arg, **kw) - - -def class_hierarchy(cls): - """Return an unordered sequence of all classes related to cls. - - Traverses diamond hierarchies. - - Fibs slightly: subclasses of builtin types are not returned. Thus - class_hierarchy(class A(object)) returns (A, object), not A plus every - class systemwide that derives from object. - - Old-style classes are discarded and hierarchies rooted on them - will not be descended. - - """ - if compat.py2k: - if isinstance(cls, types.ClassType): - return list() - - hier = set([cls]) - process = list(cls.__mro__) - while process: - c = process.pop() - if compat.py2k: - if isinstance(c, types.ClassType): - continue - bases = (_ for _ in c.__bases__ - if _ not in hier and not isinstance(_, types.ClassType)) - else: - bases = (_ for _ in c.__bases__ if _ not in hier) - - for b in bases: - process.append(b) - hier.add(b) - - if compat.py3k: - if c.__module__ == 'builtins' or not hasattr(c, '__subclasses__'): - continue - else: - if c.__module__ == '__builtin__' or not hasattr( - c, '__subclasses__'): - continue - - for s in [_ for _ in c.__subclasses__() if _ not in hier]: - process.append(s) - hier.add(s) - return list(hier) - - -def iterate_attributes(cls): - """iterate all the keys and attributes associated - with a class, without using getattr(). - - Does not use getattr() so that class-sensitive - descriptors (i.e. property.__get__()) are not called. - - """ - keys = dir(cls) - for key in keys: - for c in cls.__mro__: - if key in c.__dict__: - yield (key, c.__dict__[key]) - break - - -def monkeypatch_proxied_specials(into_cls, from_cls, skip=None, only=None, - name='self.proxy', from_instance=None): - """Automates delegation of __specials__ for a proxying type.""" - - if only: - dunders = only - else: - if skip is None: - skip = ('__slots__', '__del__', '__getattribute__', - '__metaclass__', '__getstate__', '__setstate__') - dunders = [m for m in dir(from_cls) - if (m.startswith('__') and m.endswith('__') and - not hasattr(into_cls, m) and m not in skip)] - - for method in dunders: - try: - fn = getattr(from_cls, method) - if not hasattr(fn, '__call__'): - continue - fn = getattr(fn, 'im_func', fn) - except AttributeError: - continue - try: - spec = inspect.getargspec(fn) - fn_args = inspect.formatargspec(spec[0]) - d_args = inspect.formatargspec(spec[0][1:]) - except TypeError: - fn_args = '(self, *args, **kw)' - d_args = '(*args, **kw)' - - py = ("def %(method)s%(fn_args)s: " - "return %(name)s.%(method)s%(d_args)s" % locals()) - - env = from_instance is not None and {name: from_instance} or {} - compat.exec_(py, env) - try: - env[method].__defaults__ = fn.__defaults__ - except AttributeError: - pass - setattr(into_cls, method, env[method]) - - -def methods_equivalent(meth1, meth2): - """Return True if the two methods are the same implementation.""" - - return getattr(meth1, '__func__', meth1) is getattr( - meth2, '__func__', meth2) - - -def as_interface(obj, cls=None, methods=None, required=None): - """Ensure basic interface compliance for an instance or dict of callables. - - Checks that ``obj`` implements public methods of ``cls`` or has members - listed in ``methods``. If ``required`` is not supplied, implementing at - least one interface method is sufficient. Methods present on ``obj`` that - are not in the interface are ignored. - - If ``obj`` is a dict and ``dict`` does not meet the interface - requirements, the keys of the dictionary are inspected. Keys present in - ``obj`` that are not in the interface will raise TypeErrors. - - Raises TypeError if ``obj`` does not meet the interface criteria. - - In all passing cases, an object with callable members is returned. In the - simple case, ``obj`` is returned as-is; if dict processing kicks in then - an anonymous class is returned. - - obj - A type, instance, or dictionary of callables. - cls - Optional, a type. All public methods of cls are considered the - interface. An ``obj`` instance of cls will always pass, ignoring - ``required``.. - methods - Optional, a sequence of method names to consider as the interface. - required - Optional, a sequence of mandatory implementations. If omitted, an - ``obj`` that provides at least one interface method is considered - sufficient. As a convenience, required may be a type, in which case - all public methods of the type are required. - - """ - if not cls and not methods: - raise TypeError('a class or collection of method names are required') - - if isinstance(cls, type) and isinstance(obj, cls): - return obj - - interface = set(methods or [m for m in dir(cls) if not m.startswith('_')]) - implemented = set(dir(obj)) - - complies = operator.ge - if isinstance(required, type): - required = interface - elif not required: - required = set() - complies = operator.gt - else: - required = set(required) - - if complies(implemented.intersection(interface), required): - return obj - - # No dict duck typing here. - if not isinstance(obj, dict): - qualifier = complies is operator.gt and 'any of' or 'all of' - raise TypeError("%r does not implement %s: %s" % ( - obj, qualifier, ', '.join(interface))) - - class AnonymousInterface(object): - """A callable-holding shell.""" - - if cls: - AnonymousInterface.__name__ = 'Anonymous' + cls.__name__ - found = set() - - for method, impl in dictlike_iteritems(obj): - if method not in interface: - raise TypeError("%r: unknown in this interface" % method) - if not compat.callable(impl): - raise TypeError("%r=%r is not callable" % (method, impl)) - setattr(AnonymousInterface, method, staticmethod(impl)) - found.add(method) - - if complies(found, required): - return AnonymousInterface - - raise TypeError("dictionary does not contain required keys %s" % - ', '.join(required - found)) - - -class memoized_property(object): - """A read-only @property that is only evaluated once.""" - - def __init__(self, fget, doc=None): - self.fget = fget - self.__doc__ = doc or fget.__doc__ - self.__name__ = fget.__name__ - - def __get__(self, obj, cls): - if obj is None: - return self - obj.__dict__[self.__name__] = result = self.fget(obj) - return result - - def _reset(self, obj): - memoized_property.reset(obj, self.__name__) - - @classmethod - def reset(cls, obj, name): - obj.__dict__.pop(name, None) - - -def memoized_instancemethod(fn): - """Decorate a method memoize its return value. - - Best applied to no-arg methods: memoization is not sensitive to - argument values, and will always return the same value even when - called with different arguments. - - """ - - def oneshot(self, *args, **kw): - result = fn(self, *args, **kw) - memo = lambda *a, **kw: result - memo.__name__ = fn.__name__ - memo.__doc__ = fn.__doc__ - self.__dict__[fn.__name__] = memo - return result - return update_wrapper(oneshot, fn) - - -class group_expirable_memoized_property(object): - """A family of @memoized_properties that can be expired in tandem.""" - - def __init__(self, attributes=()): - self.attributes = [] - if attributes: - self.attributes.extend(attributes) - - def expire_instance(self, instance): - """Expire all memoized properties for *instance*.""" - stash = instance.__dict__ - for attribute in self.attributes: - stash.pop(attribute, None) - - def __call__(self, fn): - self.attributes.append(fn.__name__) - return memoized_property(fn) - - def method(self, fn): - self.attributes.append(fn.__name__) - return memoized_instancemethod(fn) - - -class MemoizedSlots(object): - """Apply memoized items to an object using a __getattr__ scheme. - - This allows the functionality of memoized_property and - memoized_instancemethod to be available to a class using __slots__. - - """ - - __slots__ = () - - def _fallback_getattr(self, key): - raise AttributeError(key) - - def __getattr__(self, key): - if key.startswith('_memoized'): - raise AttributeError(key) - elif hasattr(self, '_memoized_attr_%s' % key): - value = getattr(self, '_memoized_attr_%s' % key)() - setattr(self, key, value) - return value - elif hasattr(self, '_memoized_method_%s' % key): - fn = getattr(self, '_memoized_method_%s' % key) - - def oneshot(*args, **kw): - result = fn(*args, **kw) - memo = lambda *a, **kw: result - memo.__name__ = fn.__name__ - memo.__doc__ = fn.__doc__ - setattr(self, key, memo) - return result - oneshot.__doc__ = fn.__doc__ - return oneshot - else: - return self._fallback_getattr(key) - - -def dependency_for(modulename): - def decorate(obj): - # TODO: would be nice to improve on this import silliness, - # unfortunately importlib doesn't work that great either - tokens = modulename.split(".") - mod = compat.import_( - ".".join(tokens[0:-1]), globals(), locals(), tokens[-1]) - mod = getattr(mod, tokens[-1]) - setattr(mod, obj.__name__, obj) - return obj - return decorate - - -class dependencies(object): - """Apply imported dependencies as arguments to a function. - - E.g.:: - - @util.dependencies( - "sqlalchemy.sql.widget", - "sqlalchemy.engine.default" - ); - def some_func(self, widget, default, arg1, arg2, **kw): - # ... - - Rationale is so that the impact of a dependency cycle can be - associated directly with the few functions that cause the cycle, - and not pollute the module-level namespace. - - """ - - def __init__(self, *deps): - self.import_deps = [] - for dep in deps: - tokens = dep.split(".") - self.import_deps.append( - dependencies._importlater( - ".".join(tokens[0:-1]), - tokens[-1] - ) - ) - - def __call__(self, fn): - import_deps = self.import_deps - spec = compat.inspect_getfullargspec(fn) - - spec_zero = list(spec[0]) - hasself = spec_zero[0] in ('self', 'cls') - - for i in range(len(import_deps)): - spec[0][i + (1 if hasself else 0)] = "import_deps[%r]" % i - - inner_spec = format_argspec_plus(spec, grouped=False) - - for impname in import_deps: - del spec_zero[1 if hasself else 0] - spec[0][:] = spec_zero - - outer_spec = format_argspec_plus(spec, grouped=False) - - code = 'lambda %(args)s: fn(%(apply_kw)s)' % { - "args": outer_spec['args'], - "apply_kw": inner_spec['apply_kw'] - } - - decorated = eval(code, locals()) - decorated.__defaults__ = getattr(fn, 'im_func', fn).__defaults__ - return update_wrapper(decorated, fn) - - @classmethod - def resolve_all(cls, path): - for m in list(dependencies._unresolved): - if m._full_path.startswith(path): - m._resolve() - - _unresolved = set() - _by_key = {} - - class _importlater(object): - _unresolved = set() - - _by_key = {} - - def __new__(cls, path, addtl): - key = path + "." + addtl - if key in dependencies._by_key: - return dependencies._by_key[key] - else: - dependencies._by_key[key] = imp = object.__new__(cls) - return imp - - def __init__(self, path, addtl): - self._il_path = path - self._il_addtl = addtl - dependencies._unresolved.add(self) - - @property - def _full_path(self): - return self._il_path + "." + self._il_addtl - - @memoized_property - def module(self): - if self in dependencies._unresolved: - raise ImportError( - "importlater.resolve_all() hasn't " - "been called (this is %s %s)" - % (self._il_path, self._il_addtl)) - - return getattr(self._initial_import, self._il_addtl) - - def _resolve(self): - dependencies._unresolved.discard(self) - self._initial_import = compat.import_( - self._il_path, globals(), locals(), - [self._il_addtl]) - - def __getattr__(self, key): - if key == 'module': - raise ImportError("Could not resolve module %s" - % self._full_path) - try: - attr = getattr(self.module, key) - except AttributeError: - raise AttributeError( - "Module %s has no attribute '%s'" % - (self._full_path, key) - ) - self.__dict__[key] = attr - return attr - - -# from paste.deploy.converters -def asbool(obj): - if isinstance(obj, compat.string_types): - obj = obj.strip().lower() - if obj in ['true', 'yes', 'on', 'y', 't', '1']: - return True - elif obj in ['false', 'no', 'off', 'n', 'f', '0']: - return False - else: - raise ValueError("String is not true/false: %r" % obj) - return bool(obj) - - -def bool_or_str(*text): - """Return a callable that will evaluate a string as - boolean, or one of a set of "alternate" string values. - - """ - def bool_or_value(obj): - if obj in text: - return obj - else: - return asbool(obj) - return bool_or_value - - -def asint(value): - """Coerce to integer.""" - - if value is None: - return value - return int(value) - - -def coerce_kw_type(kw, key, type_, flexi_bool=True): - """If 'key' is present in dict 'kw', coerce its value to type 'type\_' if - necessary. If 'flexi_bool' is True, the string '0' is considered false - when coercing to boolean. - """ - - if key in kw and not isinstance(kw[key], type_) and kw[key] is not None: - if type_ is bool and flexi_bool: - kw[key] = asbool(kw[key]) - else: - kw[key] = type_(kw[key]) - - -def constructor_copy(obj, cls, *args, **kw): - """Instantiate cls using the __dict__ of obj as constructor arguments. - - Uses inspect to match the named arguments of ``cls``. - - """ - - names = get_cls_kwargs(cls) - kw.update((k, obj.__dict__[k]) for k in names if k in obj.__dict__) - return cls(*args, **kw) - - -def counter(): - """Return a threadsafe counter function.""" - - lock = compat.threading.Lock() - counter = itertools.count(1) - - # avoid the 2to3 "next" transformation... - def _next(): - lock.acquire() - try: - return next(counter) - finally: - lock.release() - - return _next - - -def duck_type_collection(specimen, default=None): - """Given an instance or class, guess if it is or is acting as one of - the basic collection types: list, set and dict. If the __emulates__ - property is present, return that preferentially. - """ - - if hasattr(specimen, '__emulates__'): - # canonicalize set vs sets.Set to a standard: the builtin set - if (specimen.__emulates__ is not None and - issubclass(specimen.__emulates__, set)): - return set - else: - return specimen.__emulates__ - - isa = isinstance(specimen, type) and issubclass or isinstance - if isa(specimen, list): - return list - elif isa(specimen, set): - return set - elif isa(specimen, dict): - return dict - - if hasattr(specimen, 'append'): - return list - elif hasattr(specimen, 'add'): - return set - elif hasattr(specimen, 'set'): - return dict - else: - return default - - -def assert_arg_type(arg, argtype, name): - if isinstance(arg, argtype): - return arg - else: - if isinstance(argtype, tuple): - raise exc.ArgumentError( - "Argument '%s' is expected to be one of type %s, got '%s'" % - (name, ' or '.join("'%s'" % a for a in argtype), type(arg))) - else: - raise exc.ArgumentError( - "Argument '%s' is expected to be of type '%s', got '%s'" % - (name, argtype, type(arg))) - - -def dictlike_iteritems(dictlike): - """Return a (key, value) iterator for almost any dict-like object.""" - - if compat.py3k: - if hasattr(dictlike, 'items'): - return list(dictlike.items()) - else: - if hasattr(dictlike, 'iteritems'): - return dictlike.iteritems() - elif hasattr(dictlike, 'items'): - return iter(dictlike.items()) - - getter = getattr(dictlike, '__getitem__', getattr(dictlike, 'get', None)) - if getter is None: - raise TypeError( - "Object '%r' is not dict-like" % dictlike) - - if hasattr(dictlike, 'iterkeys'): - def iterator(): - for key in dictlike.iterkeys(): - yield key, getter(key) - return iterator() - elif hasattr(dictlike, 'keys'): - return iter((key, getter(key)) for key in dictlike.keys()) - else: - raise TypeError( - "Object '%r' is not dict-like" % dictlike) - - -class classproperty(property): - """A decorator that behaves like @property except that operates - on classes rather than instances. - - The decorator is currently special when using the declarative - module, but note that the - :class:`~.sqlalchemy.ext.declarative.declared_attr` - decorator should be used for this purpose with declarative. - - """ - - def __init__(self, fget, *arg, **kw): - super(classproperty, self).__init__(fget, *arg, **kw) - self.__doc__ = fget.__doc__ - - def __get__(desc, self, cls): - return desc.fget(cls) - - -class hybridproperty(object): - def __init__(self, func): - self.func = func - - def __get__(self, instance, owner): - if instance is None: - clsval = self.func(owner) - clsval.__doc__ = self.func.__doc__ - return clsval - else: - return self.func(instance) - - -class hybridmethod(object): - """Decorate a function as cls- or instance- level.""" - - def __init__(self, func): - self.func = func - - def __get__(self, instance, owner): - if instance is None: - return self.func.__get__(owner, owner.__class__) - else: - return self.func.__get__(instance, owner) - - -class _symbol(int): - def __new__(self, name, doc=None, canonical=None): - """Construct a new named symbol.""" - assert isinstance(name, compat.string_types) - if canonical is None: - canonical = hash(name) - v = int.__new__(_symbol, canonical) - v.name = name - if doc: - v.__doc__ = doc - return v - - def __reduce__(self): - return symbol, (self.name, "x", int(self)) - - def __str__(self): - return repr(self) - - def __repr__(self): - return "symbol(%r)" % self.name - -_symbol.__name__ = 'symbol' - - -class symbol(object): - """A constant symbol. - - >>> symbol('foo') is symbol('foo') - True - >>> symbol('foo') - - - A slight refinement of the MAGICCOOKIE=object() pattern. The primary - advantage of symbol() is its repr(). They are also singletons. - - Repeated calls of symbol('name') will all return the same instance. - - The optional ``doc`` argument assigns to ``__doc__``. This - is strictly so that Sphinx autoattr picks up the docstring we want - (it doesn't appear to pick up the in-module docstring if the datamember - is in a different module - autoattribute also blows up completely). - If Sphinx fixes/improves this then we would no longer need - ``doc`` here. - - """ - symbols = {} - _lock = compat.threading.Lock() - - def __new__(cls, name, doc=None, canonical=None): - cls._lock.acquire() - try: - sym = cls.symbols.get(name) - if sym is None: - cls.symbols[name] = sym = _symbol(name, doc, canonical) - return sym - finally: - symbol._lock.release() - - -_creation_order = 1 - - -def set_creation_order(instance): - """Assign a '_creation_order' sequence to the given instance. - - This allows multiple instances to be sorted in order of creation - (typically within a single thread; the counter is not particularly - threadsafe). - - """ - global _creation_order - instance._creation_order = _creation_order - _creation_order += 1 - - -def warn_exception(func, *args, **kwargs): - """executes the given function, catches all exceptions and converts to - a warning. - - """ - try: - return func(*args, **kwargs) - except Exception: - warn("%s('%s') ignored" % sys.exc_info()[0:2]) - - -def ellipses_string(value, len_=25): - try: - if len(value) > len_: - return "%s..." % value[0:len_] - else: - return value - except TypeError: - return value - - -class _hash_limit_string(compat.text_type): - """A string subclass that can only be hashed on a maximum amount - of unique values. - - This is used for warnings so that we can send out parameterized warnings - without the __warningregistry__ of the module, or the non-overridable - "once" registry within warnings.py, overloading memory, - - - """ - def __new__(cls, value, num, args): - interpolated = (value % args) + \ - (" (this warning may be suppressed after %d occurrences)" % num) - self = super(_hash_limit_string, cls).__new__(cls, interpolated) - self._hash = hash("%s_%d" % (value, hash(interpolated) % num)) - return self - - def __hash__(self): - return self._hash - - def __eq__(self, other): - return hash(self) == hash(other) - - -def warn(msg): - """Issue a warning. - - If msg is a string, :class:`.exc.SAWarning` is used as - the category. - - """ - warnings.warn(msg, exc.SAWarning, stacklevel=2) - - -def warn_limited(msg, args): - """Issue a warning with a paramterized string, limiting the number - of registrations. - - """ - if args: - msg = _hash_limit_string(msg, 10, args) - warnings.warn(msg, exc.SAWarning, stacklevel=2) - - -def only_once(fn): - """Decorate the given function to be a no-op after it is called exactly - once.""" - - once = [fn] - - def go(*arg, **kw): - if once: - once_fn = once.pop() - return once_fn(*arg, **kw) - - return go - - -_SQLA_RE = re.compile(r'sqlalchemy/([a-z_]+/){0,2}[a-z_]+\.py') -_UNITTEST_RE = re.compile(r'unit(?:2|test2?/)') - - -def chop_traceback(tb, exclude_prefix=_UNITTEST_RE, exclude_suffix=_SQLA_RE): - """Chop extraneous lines off beginning and end of a traceback. - - :param tb: - a list of traceback lines as returned by ``traceback.format_stack()`` - - :param exclude_prefix: - a regular expression object matching lines to skip at beginning of - ``tb`` - - :param exclude_suffix: - a regular expression object matching lines to skip at end of ``tb`` - """ - start = 0 - end = len(tb) - 1 - while start <= end and exclude_prefix.search(tb[start]): - start += 1 - while start <= end and exclude_suffix.search(tb[end]): - end -= 1 - return tb[start:end + 1] - -NoneType = type(None) - - -def attrsetter(attrname): - code = \ - "def set(obj, value):"\ - " obj.%s = value" % attrname - env = locals().copy() - exec(code, env) - return env['set'] - - -class EnsureKWArgType(type): - """Apply translation of functions to accept **kw arguments if they - don't already. - - """ - def __init__(cls, clsname, bases, clsdict): - fn_reg = cls.ensure_kwarg - if fn_reg: - for key in clsdict: - m = re.match(fn_reg, key) - if m: - fn = clsdict[key] - spec = inspect.getargspec(fn) - if not spec.keywords: - clsdict[key] = wrapped = cls._wrap_w_kw(fn) - setattr(cls, key, wrapped) - super(EnsureKWArgType, cls).__init__(clsname, bases, clsdict) - - def _wrap_w_kw(self, fn): - - def wrap(*arg, **kw): - return fn(*arg) - return update_wrapper(wrap, fn) - diff --git a/python/sqlalchemy/util/queue.py b/python/sqlalchemy/util/queue.py deleted file mode 100644 index 29e00a43..00000000 --- a/python/sqlalchemy/util/queue.py +++ /dev/null @@ -1,199 +0,0 @@ -# util/queue.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""An adaptation of Py2.3/2.4's Queue module which supports reentrant -behavior, using RLock instead of Lock for its mutex object. The -Queue object is used exclusively by the sqlalchemy.pool.QueuePool -class. - -This is to support the connection pool's usage of weakref callbacks to return -connections to the underlying Queue, which can in extremely -rare cases be invoked within the ``get()`` method of the Queue itself, -producing a ``put()`` inside the ``get()`` and therefore a reentrant -condition. - -""" - -from collections import deque -from time import time as _time -from .compat import threading - - -__all__ = ['Empty', 'Full', 'Queue'] - - -class Empty(Exception): - "Exception raised by Queue.get(block=0)/get_nowait()." - - pass - - -class Full(Exception): - "Exception raised by Queue.put(block=0)/put_nowait()." - - pass - - -class Queue: - def __init__(self, maxsize=0): - """Initialize a queue object with a given maximum size. - - If `maxsize` is <= 0, the queue size is infinite. - """ - - self._init(maxsize) - # mutex must be held whenever the queue is mutating. All methods - # that acquire mutex must release it before returning. mutex - # is shared between the two conditions, so acquiring and - # releasing the conditions also acquires and releases mutex. - self.mutex = threading.RLock() - # Notify not_empty whenever an item is added to the queue; a - # thread waiting to get is notified then. - self.not_empty = threading.Condition(self.mutex) - # Notify not_full whenever an item is removed from the queue; - # a thread waiting to put is notified then. - self.not_full = threading.Condition(self.mutex) - - def qsize(self): - """Return the approximate size of the queue (not reliable!).""" - - self.mutex.acquire() - n = self._qsize() - self.mutex.release() - return n - - def empty(self): - """Return True if the queue is empty, False otherwise (not - reliable!).""" - - self.mutex.acquire() - n = self._empty() - self.mutex.release() - return n - - def full(self): - """Return True if the queue is full, False otherwise (not - reliable!).""" - - self.mutex.acquire() - n = self._full() - self.mutex.release() - return n - - def put(self, item, block=True, timeout=None): - """Put an item into the queue. - - If optional args `block` is True and `timeout` is None (the - default), block if necessary until a free slot is - available. If `timeout` is a positive number, it blocks at - most `timeout` seconds and raises the ``Full`` exception if no - free slot was available within that time. Otherwise (`block` - is false), put an item on the queue if a free slot is - immediately available, else raise the ``Full`` exception - (`timeout` is ignored in that case). - """ - - self.not_full.acquire() - try: - if not block: - if self._full(): - raise Full - elif timeout is None: - while self._full(): - self.not_full.wait() - else: - if timeout < 0: - raise ValueError("'timeout' must be a positive number") - endtime = _time() + timeout - while self._full(): - remaining = endtime - _time() - if remaining <= 0.0: - raise Full - self.not_full.wait(remaining) - self._put(item) - self.not_empty.notify() - finally: - self.not_full.release() - - def put_nowait(self, item): - """Put an item into the queue without blocking. - - Only enqueue the item if a free slot is immediately available. - Otherwise raise the ``Full`` exception. - """ - return self.put(item, False) - - def get(self, block=True, timeout=None): - """Remove and return an item from the queue. - - If optional args `block` is True and `timeout` is None (the - default), block if necessary until an item is available. If - `timeout` is a positive number, it blocks at most `timeout` - seconds and raises the ``Empty`` exception if no item was - available within that time. Otherwise (`block` is false), - return an item if one is immediately available, else raise the - ``Empty`` exception (`timeout` is ignored in that case). - """ - self.not_empty.acquire() - try: - if not block: - if self._empty(): - raise Empty - elif timeout is None: - while self._empty(): - self.not_empty.wait() - else: - if timeout < 0: - raise ValueError("'timeout' must be a positive number") - endtime = _time() + timeout - while self._empty(): - remaining = endtime - _time() - if remaining <= 0.0: - raise Empty - self.not_empty.wait(remaining) - item = self._get() - self.not_full.notify() - return item - finally: - self.not_empty.release() - - def get_nowait(self): - """Remove and return an item from the queue without blocking. - - Only get an item if one is immediately available. Otherwise - raise the ``Empty`` exception. - """ - - return self.get(False) - - # Override these methods to implement other queue organizations - # (e.g. stack or priority queue). - # These will only be called with appropriate locks held - - # Initialize the queue representation - def _init(self, maxsize): - self.maxsize = maxsize - self.queue = deque() - - def _qsize(self): - return len(self.queue) - - # Check whether the queue is empty - def _empty(self): - return not self.queue - - # Check whether the queue is full - def _full(self): - return self.maxsize > 0 and len(self.queue) == self.maxsize - - # Put a new item in the queue - def _put(self, item): - self.queue.append(item) - - # Get an item from the queue - def _get(self): - return self.queue.popleft() diff --git a/python/sqlalchemy/util/topological.py b/python/sqlalchemy/util/topological.py deleted file mode 100644 index 0cd2bae2..00000000 --- a/python/sqlalchemy/util/topological.py +++ /dev/null @@ -1,100 +0,0 @@ -# util/topological.py -# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Topological sorting algorithms.""" - -from ..exc import CircularDependencyError -from .. import util - -__all__ = ['sort', 'sort_as_subsets', 'find_cycles'] - - -def sort_as_subsets(tuples, allitems, deterministic_order=False): - - edges = util.defaultdict(set) - for parent, child in tuples: - edges[child].add(parent) - - Set = util.OrderedSet if deterministic_order else set - - todo = Set(allitems) - - while todo: - output = Set() - for node in todo: - if todo.isdisjoint(edges[node]): - output.add(node) - - if not output: - raise CircularDependencyError( - "Circular dependency detected.", - find_cycles(tuples, allitems), - _gen_edges(edges) - ) - - todo.difference_update(output) - yield output - - -def sort(tuples, allitems, deterministic_order=False): - """sort the given list of items by dependency. - - 'tuples' is a list of tuples representing a partial ordering. - 'deterministic_order' keeps items within a dependency tier in list order. - """ - - for set_ in sort_as_subsets(tuples, allitems, deterministic_order): - for s in set_: - yield s - - -def find_cycles(tuples, allitems): - # adapted from: - # http://neopythonic.blogspot.com/2009/01/detecting-cycles-in-directed-graph.html - - edges = util.defaultdict(set) - for parent, child in tuples: - edges[parent].add(child) - nodes_to_test = set(edges) - - output = set() - - # we'd like to find all nodes that are - # involved in cycles, so we do the full - # pass through the whole thing for each - # node in the original list. - - # we can go just through parent edge nodes. - # if a node is only a child and never a parent, - # by definition it can't be part of a cycle. same - # if it's not in the edges at all. - for node in nodes_to_test: - stack = [node] - todo = nodes_to_test.difference(stack) - while stack: - top = stack[-1] - for node in edges[top]: - if node in stack: - cyc = stack[stack.index(node):] - todo.difference_update(cyc) - output.update(cyc) - - if node in todo: - stack.append(node) - todo.remove(node) - break - else: - node = stack.pop() - return output - - -def _gen_edges(edges): - return set([ - (right, left) - for left in edges - for right in edges[left] - ]) From 9f12980948a6151b82bdedd37526827a0b830313 Mon Sep 17 00:00:00 2001 From: Nathan Baltzell Date: Tue, 9 Jan 2024 15:52:37 -0500 Subject: [PATCH 06/26] sqlalchemy version changes --- python/ccdb/model.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/python/ccdb/model.py b/python/ccdb/model.py index 91b42f87..dcb32bdb 100644 --- a/python/ccdb/model.py +++ b/python/ccdb/model.py @@ -7,7 +7,7 @@ from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.schema import Column, ForeignKey from sqlalchemy.types import Integer, String, Text, DateTime, Enum, Boolean -from sqlalchemy.orm import reconstructor, relation +from sqlalchemy.orm import reconstructor from sqlalchemy.orm import relationship, backref Base = declarative_base() @@ -267,8 +267,8 @@ class Variation(Base): comment = Column(Text) author_id = Column('authorId', Integer, default=1) parent_id = Column('parentId', Integer, ForeignKey('variations.id'), default=1) - parent = relation('Variation', remote_side=[id]) - children = relation("Variation") + parent = relationship('Variation', remote_side=[id]) + children = relationship("Variation") def __repr__(self): return "".format(self.id, self.name) From a89ae84934f156bd70e13716997ecd052ed34c21 Mon Sep 17 00:00:00 2001 From: Nathan Baltzell Date: Tue, 9 Jan 2024 15:52:55 -0500 Subject: [PATCH 07/26] avoid python3 warnings --- python/mysql/connector/abstracts.py | 2 +- python/mysql/connector/optionfiles.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/python/mysql/connector/abstracts.py b/python/mysql/connector/abstracts.py index 9aa4ac93..17e39562 100644 --- a/python/mysql/connector/abstracts.py +++ b/python/mysql/connector/abstracts.py @@ -130,7 +130,7 @@ def _read_option_files(self, config): config_options[option][1] <= value[1]): config_options[option] = value except KeyError: - if group is 'connector_python': + if group == 'connector_python': raise AttributeError("Unsupported argument " "'{0}'".format(option)) except KeyError: diff --git a/python/mysql/connector/optionfiles.py b/python/mysql/connector/optionfiles.py index 43344812..0fb89b8b 100644 --- a/python/mysql/connector/optionfiles.py +++ b/python/mysql/connector/optionfiles.py @@ -95,7 +95,7 @@ def read_option_files(**config): config_options[option][1] <= value[1]): config_options[option] = value except KeyError: - if group is 'connector_python': + if group == 'connector_python': raise AttributeError("Unsupported argument " "'{0}'".format(option)) except KeyError: From 98a272871da76c595d762eddd29c78b65c780344 Mon Sep 17 00:00:00 2001 From: Nathan Baltzell Date: Tue, 9 Jan 2024 16:03:55 -0500 Subject: [PATCH 08/26] silence sqlalchemy warning --- python/ccdb/model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/ccdb/model.py b/python/ccdb/model.py index dcb32bdb..21b7f886 100644 --- a/python/ccdb/model.py +++ b/python/ccdb/model.py @@ -268,7 +268,7 @@ class Variation(Base): author_id = Column('authorId', Integer, default=1) parent_id = Column('parentId', Integer, ForeignKey('variations.id'), default=1) parent = relationship('Variation', remote_side=[id]) - children = relationship("Variation") + children = relationship('Variation',viewonly=True) def __repr__(self): return "".format(self.id, self.name) From 9ebd9c00d4afd181bedaae68d562c9e059edbf8c Mon Sep 17 00:00:00 2001 From: Nathan Baltzell Date: Tue, 23 Apr 2024 14:28:01 -0400 Subject: [PATCH 09/26] run 2to3 on scons files --- SConstruct | 4 ++-- src/Library/SConscript | 24 ++++++++++++------------ swig/SConscript | 4 ++-- 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/SConstruct b/SConstruct index d64b1fd9..75e4b754 100644 --- a/SConstruct +++ b/SConstruct @@ -3,8 +3,8 @@ import os #Setup default environment. This environment if not 'CCDB_HOME' in os.environ: - print "CCDB_HOME environment variable is not found but should be set to compile the CCDB" - print "One can run 'source environment.bash' from your bash shell to automatically set environment variables" + print("CCDB_HOME environment variable is not found but should be set to compile the CCDB") + print("One can run 'source environment.bash' from your bash shell to automatically set environment variables") exit(1) #Create 'default' environment. Other environments will be a copy of this one diff --git a/src/Library/SConscript b/src/Library/SConscript index 0e850cc1..b3fbfa72 100644 --- a/src/Library/SConscript +++ b/src/Library/SConscript @@ -8,8 +8,8 @@ env = default_env.Clone() #Clone it to add library specified things #Mac Os X requires install_name flag to be built properly if env['PLATFORM'] == 'darwin': - print - print "Darwin platform is detected. Setting -install_name @rpath/"+'${TARGET.file}' + print() + print("Darwin platform is detected. Setting -install_name @rpath/"+'${TARGET.file}') env.Append(SHLINKFLAGS = ['-install_name', '@rpath/'+'${TARGET.file}']) @@ -74,17 +74,17 @@ else: #Read user flag for using mysql dependencies or not if ARGUMENTS.get("mysql","no")=="yes" or ARGUMENTS.get("with-mysql","true")=="true": #User wants mysql! - print "Building CCDB using MySQL dependencies" - print "To build CCDB without mysql dependencies. Run scons with 'with-mysql=false'" - print "" + print("Building CCDB using MySQL dependencies") + print("To build CCDB without mysql dependencies. Run scons with 'with-mysql=false'") + print("") if not WhereIs("mysql_config"): - print - print "ERROR. Can't find 'mysql_config' utility which is needed to build CCDB with MySQL support." - print "Two options is possible to build CCDB:" - print " 1. Install mysql_config (RHEL has it in mysql-devel package, Ubuntu in libmysqlclient-dev)" - print " 2. Build CCDB without MySQL dependencies (use 'mysql=no' scons flag)" - print + print() + print("ERROR. Can't find 'mysql_config' utility which is needed to build CCDB with MySQL support.") + print("Two options is possible to build CCDB:") + print(" 1. Install mysql_config (RHEL has it in mysql-devel package, Ubuntu in libmysqlclient-dev)") + print(" 2. Build CCDB without MySQL dependencies (use 'mysql=no' scons flag)") + print() Exit() mysql_sources = [ @@ -99,7 +99,7 @@ if ARGUMENTS.get("mysql","no")=="yes" or ARGUMENTS.get("with-mysql","true")=="tr env.Append(CPPDEFINES='CCDB_MYSQL') env.ParseConfig('mysql_config --libs --cflags') else: - print "CCDB is being build WITHOUT MySQL support. Use 'with-mysql=true' flag to explicitly enable MySQL support" + print("CCDB is being build WITHOUT MySQL support. Use 'with-mysql=true' flag to explicitly enable MySQL support") if ARGUMENTS.get("with-perflog","false")=="true": diff --git a/swig/SConscript b/swig/SConscript index fbd49c6a..827294bd 100644 --- a/swig/SConscript +++ b/swig/SConscript @@ -43,7 +43,7 @@ pyllapi_env.ParseConfig('python-config --cflags --libs') use_swig = int(ARGUMENTS.get("swig",0)) if use_swig==1: - print "Building CCDB using SWIG conversion" + print("Building CCDB using SWIG conversion") #Setup environment @@ -57,7 +57,7 @@ if use_swig==1: pyllapi_env.InstallAs('#swig/ccdb_pyllapi_wrap.cc','ccdb_pyllapi_wrap.cc') pyllapi_env.InstallAs('#swig/ccdb_pyllapi.py','ccdb_pyllapi.py') else: - print "Skipping SWIG wrapper generation. Use 'scons ... swig=1' to enable it" + print("Skipping SWIG wrapper generation. Use 'scons ... swig=1' to enable it") pyllapi_lib = pyllapi_env.SharedLibrary('_ccdb_pyllapi', ['ccdb_pyllapi_wrap.cc'], LIBS=["ccdb"], LIBPATH='#lib') pyllapi_env.Append(CPPPATH = ['#include', '#src']) From df7a44179de11b3e5d7b71556962c57e068ddfc9 Mon Sep 17 00:00:00 2001 From: Nathan Baltzell Date: Tue, 23 Apr 2024 14:42:17 -0400 Subject: [PATCH 10/26] search homebrew libraries on macos --- src/CMakeLists.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 7cf4bbf1..49e3782f 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -38,6 +38,9 @@ if(APPLE) IF("${isSystemDir}" STREQUAL "-1") set(CMAKE_INSTALL_RPATH "${CMAKE_LIBRARY_OUTPUT_DIRECTORY}") ENDIF("${isSystemDir}" STREQUAL "-1") + + # find homebrew libraries: + link_directories(/opt/homebrew/lib) endif(APPLE) # Set a default build type if none was specified From cf99bf9493adbd0f600b3393b620df0828d72f26 Mon Sep 17 00:00:00 2001 From: Nathan Baltzell Date: Tue, 23 Apr 2024 14:53:21 -0400 Subject: [PATCH 11/26] install executable --- CMakeLists.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 2f0d40b6..1d307e56 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -13,3 +13,6 @@ cmake_minimum_required(VERSION 3.3) project(CCDB_Project) add_subdirectory(src) + +install(PROGRAMS bin/ccdb DESTINATION ${CMAKE_INSTALL_PREFIX}/bin) + From b340a50974037d47ecafb475e44dba96827d129d Mon Sep 17 00:00:00 2001 From: Nathan Baltzell Date: Tue, 28 May 2024 15:40:25 -0400 Subject: [PATCH 12/26] cmake install python --- CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 1d307e56..60bfd1c2 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -15,4 +15,5 @@ project(CCDB_Project) add_subdirectory(src) install(PROGRAMS bin/ccdb DESTINATION ${CMAKE_INSTALL_PREFIX}/bin) +install(DIRECTORY python DESTINATION ${CMAKE_INSTALL_PREFIX}) From 42a5acca43c4950951e6932385ae4fc75445fe5e Mon Sep 17 00:00:00 2001 From: Nathan Baltzell Date: Mon, 1 Jul 2024 17:49:36 -0400 Subject: [PATCH 13/26] detect python version for imp->importlib --- python/ccdb/cmd/console_context.py | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/python/ccdb/cmd/console_context.py b/python/ccdb/cmd/console_context.py index 2496d720..50368412 100644 --- a/python/ccdb/cmd/console_context.py +++ b/python/ccdb/cmd/console_context.py @@ -1,7 +1,6 @@ import os import re -import imp import sys import logging import shlex @@ -169,9 +168,9 @@ def register_utilities(self, path=""): self._ls = util except AttributeError as ex: - log.debug("Error registering module : " + repr(ex)) + log.warning("Error registering module : " + repr(ex)) except Exception as ex: - log.debug("Error registering module : " + repr(ex)) + log.warning("Error registering module : " + repr(ex)) if log.isEnabledFor(logging.DEBUG): log.debug(lfm("{0}Utils found and registered in directory '{1}' are:", os.linesep, path)) @@ -208,11 +207,19 @@ def search_utils(self, path): if m.startswith('__'): continue try: - f, filename, desc = imp.find_module(m, [path]) - modules.append(imp.load_module(m, f, filename, desc)) + if sys.version_info < (3,12): + import imp + f, filename, desc = imp.find_module(m, [path]) + modules.append(imp.load_module(m, f, filename, desc)) + else: + import importlib + x = importlib.util.spec_from_file_location('ccdb.cmd.utils.'+m, os.path.dirname(__file__)+'/utils/%s.py'%m) + y = importlib.util.module_from_spec(x) + x.loader.exec_module(y) + modules.append(y) except ImportError as ex: - log.debug(lfm(" |- error importing module: {0}", m)) - log.debug(lfm(" |\\{0} ||-{1}", os.linesep, repr(ex))) + log.warning(lfm(" |- error importing module: {0}", m)) + log.warning(lfm(" |\\{0} ||-{1}", os.linesep, repr(ex))) continue return modules From 468356e90765fb68fa15d900a8e692956c1cc08f Mon Sep 17 00:00:00 2001 From: Nathan Baltzell Date: Tue, 16 Jul 2024 17:30:07 -0400 Subject: [PATCH 14/26] 2to3 for user creation --- scripts/users_create/users_create.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/scripts/users_create/users_create.py b/scripts/users_create/users_create.py index afb175e1..b5f45804 100644 --- a/scripts/users_create/users_create.py +++ b/scripts/users_create/users_create.py @@ -8,7 +8,7 @@ #------------------------------------------------------------------------------------------------------------- def print_help(): - print """ + print(""" This utility can add users to ccdb database by user names. The idea behind this utility is to recreate users list from cronjob. @@ -27,7 +27,7 @@ def print_help(): echo "anna,bob,smith' | python users_create.py mysql://ccdb_user@localhost/ccdb The scripts fails if no '--recreate' flag is given and a user exists. - """ + """) #------------------------------------------------------------------------------------------------------------- @@ -52,7 +52,7 @@ def delete_users(provider): if not user.name in god_list: provider.delete_user(user.name) deleted_count += 1 - print ("Users deleted {}".format(deleted_count)) + print(("Users deleted {}".format(deleted_count))) #------------------------------------------------------------------------------------------------------------- @@ -90,9 +90,9 @@ def create_users(provider, user_names): provider.create_user(name) count += 1 except UserExistsError as err: - print(err.message) + print((err.message)) - print("Users created: {}".format(len(user_names))) + print(("Users created: {}".format(len(user_names)))) #------------------------------------------------------------------------------------------------------------- @@ -123,7 +123,7 @@ def create_users(provider, user_names): print("Error! No connection string given!") print_help() sys.exit(1) - print ("Connecting to '" + connection_str + "'") + print(("Connecting to '" + connection_str + "'")) provider = get_provider(connection_str) @@ -135,12 +135,12 @@ def create_users(provider, user_names): try: delete_users(provider) except Exception as ex: - print("User deletion failed with error of type {} : {}".format(type(ex),ex.message)) + print(("User deletion failed with error of type {} : {}".format(type(ex),ex.message))) sys.exit(2) #create new users try: create_users(provider, names) except Exception as ex: - print("User creation failed with error of type {} : {}".format(type(ex),ex.message)) + print(("User creation failed with error of type {} : {}".format(type(ex),ex.message))) sys.exit(3) From ccacf2c3369e116ee09248c5e9bd88c3d3b7be49 Mon Sep 17 00:00:00 2001 From: Nathan Baltzell Date: Tue, 16 Jul 2024 17:31:19 -0400 Subject: [PATCH 15/26] cleanup --- scripts/scons-local-2.1.0.tar.gz | Bin 400836 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 scripts/scons-local-2.1.0.tar.gz diff --git a/scripts/scons-local-2.1.0.tar.gz b/scripts/scons-local-2.1.0.tar.gz deleted file mode 100644 index 27429670d547d74bdddafdb90c82aeb7e3bbfc78..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 400836 zcmV(%K;pk2iwFSD=xR;?1MEC&bK5wQ`OIH|wJ&99Bg>E2@vW~?wTi7Is<9=XBqy7i zo6l?*WCa}kdpJ5c(S*3!^+4K2{ihJexW(ak0Z5z)w}5SN8KmC z&2Em)_~i8D$P;MtnR*+QfR-~!4@W(&4jNPodH1X!%R zv>~s!Qn4>Nzyo&q`BFp~t<7dK6pA%~UEa>f%-CttB7xu`umYd~ z&K4MQFwQfU3Xqf}Nwo~W5C{7>luKz=!0E(6g+Ox!6d;~0wu}Qg!@q*aO7dwa)uQDE z5GlYV}XK@(DD!2$QF zC=|>C_Hu`u6qg6X5oHp1pTsGxS0@wHa{RW-MuXSm_nl#v^+xPwIC$5)>|U~+&Io?* zwAlOJ`0e0!%wWW@(;t6egV(In|KR=5>tD85_b)fY?r6jYL)N>#x$1S{Uax;~b$i+C zzhSRnUVkuVSH0`r7=Vrk3>NeZs9tx3z^=Q)i?{Hz^Qw2%8-HlA*S&EcVZH{S9d^?h zj(ZokSDhidxgFjNMqOC`((4cUz5eSVtkS*i_Q!2l6_#S%ckqLa-gd68a4FWg1?-1} z-^JkO!?5?}?U=nCTwQkI=BuuE)$6>v>guKXAK1lJr+3|Am!0d*n=Z{70F)uaVFtVR zZ@Y90*6YB(i*awz_kfIxL4Q1ipDmzrI4-8X?~S@G)*1Fj$dK2=0f0w#!i)g{fqDI| zhJvhS6^XzQ{Czv>mbhGYJ68Z}gp=LDZSS9LtAC42k^didJD1nLmHYpU|35l8JwB}a z{};!<`~UB9?XvIUIa&SsvZ#Wh3HXOr)zwsThZLP~b`Q=2XMpk5KojoY@i{mPd>+O= z4?Xcw_&LJe-_I621CT9N++*knVK$(W2w%>muV=A9rr=@X6nvlYmmb6(=wrYQNkA4| zf?ks=oW53>SnjW6AQ(UnWwr(v@FC6_k{8N50fA2{ct9YKG$I3hOB@O7JW5BFt-#aC zEa%plA>WRgj3Gn{g7OGy1xk5j*h7f#|IP5^AC?z7wr~P0@sTen8lg2|Y6g*pYL|lQ zMH)k&qf1{K8WIR6@+F@F9TKKsZ~?0W0lBg?6%g#w#4B>6z#`398~-DZlmTWD7J+K`h%rhK5TQW;BqadETL?>L zc^Iw>Kt|vYZ6bFHO;x03hF1{=4Kvhx_yS^d89FPth*5AD$9G1PO5~+s`ig_f0WTt! zgaQOm0KuU`Qp{X#iA`y5s3#~nl322qG_}QI7miB(EwH4nfEYL~_tPyFr#u(HB$v}&g4#(gd&5dR$*Bs6%jKX2ZO_N>Frde@SL}$pi zQh!QeTM~@6lnXA(XMUWAC@j*N%;+%@^WbLCJCvYER}YdjzL$s&DKtjvc0TSCyT3QE zL-ko8y!caUY=UYlRu@v6kKd%W{R-+Ui7?lRgxv{y8`u%?B~W6l zZCq+juRj_?@CVGf>WsUiF?$D*9tKx0E0^cJ5^B8e~03JotDmJXY5dNLnIba$& zO5U~P$UVoRl{+6zJ|tBYOAqOU)=hmkh*wb<^8g;t#d#^kMV2M!2M4Rws!gEVaXLS+ zgWHR22@c2W{-t|yi_s#`{-3wKAw|EV8g(U=!#s)#jR86TH3y%e*v%TuOjHnb+&)3X zk<3&}E5#(ksGb+aMsov^2-qKyZ;}iH53@j0i8PN2-9(WGV#agOTD>~ZG?Vi@)$)Z* z64FOSI%QPvTKZ{>TEpH+l>=8Vo;~yYc$t9aU`8V|ftKQZumAkmmOxxetZGPctsvv2yh?s|k^6EQ3+ z2cWY=p2oIK&UT5`6Z3E|PPkto`%6>w$uL8eOuOe{4kSSa&lA|XNs4K=S7jnV=|=Z5 zjU$vnHA1=PFk=c?IS9iu+oChBZv}Yd?mCJ1{W`D+`56n?QnncnJ#yj6;-#f|b32R$-ZX5Rv~%wRjS4qH1GEU8#qSaapT zCb;1#fu=9hJ0dd03W7?L4y?tLb(c-hY95m?s|Eu)OcZ6Dr_H}&4dWTN(+{^bpi&a0 z1RSIEZ!FQaLYt&y4eU)3c8ie2I1O~7_34!`_H-nyx0bgK?Vwz9jM0>%jKHu$ zz*KVqv(EotZoHVc4Gsu z&1|IgvKPq=%mr-9eKH>RFzC8Bwx!Od6FYPek{@!pgtZo+KUBHoL7=Nyq^JU7q{u{r zOMVB~0z=b!&;p#XrBtk;l8=aVDb++@NE!y0rt+vn!c#PA8xb|gb`KMMgiJBB)lAT8uqiLvo#nR3gfOHk*(x3vD`38H?#<+*U1Zo2{O0;kZj0%Y3I{qv(@mXq$(teym zc$Z*>8E)6pFDwghW>S`}bFZR@!|qkLGwQapkC~>a0kaM^?rv3~t(jW0Wwx-%0Tgl{ zV>Y6-lA0HFj#QB4cJns0-l&E#sQ1!Mf)HseB){p7YDjO~{T8G-Y+#)&`Je!E;b1Lk zS1hOuaWCUsS^R5Q>V1C%rZl1)fHgq+2k`5_KC#iy7HL^PpFmIv-v)DPiLTOkX&qP0 zMV7hgE!Gz^+8Sv@7iO-+0b$FMzt9 zO}ju!4ode0j)c|MRl?Mbx84441{jzo4~Mw^flu#IU|CA6hn z0<|{UF)jc@;usJ9=phcmeebNKRg1>`86T;3ZBby!b*G1CHSDT4swe5N#VU|#W9g{A zs64tmtgY*+c)*NUv%lt8jDk@}3gn5pR1jskBCHN0vTY%4}7-~#~h33qyqP$Hv zwgT68=@V7+z<~%{!qd0w0Ukj1;p_F5SfJq91=VFrF|EWDYtUa}e_$bf8c54{6|kcW z#2B$3B6QT=U8DnXP*NTS%M2_a=GIHkfvcai;qJlz=<%??dkjFp&_)@VrG*HS(l={_ zFSTA#x}FKXJTJHYf(NXVBZu63DcbFelsk6`TXrR;5Cr>WARl2YxMstc`-{>f35WJKS3;Xee9ZReA`+%V8D+S5ZHNSIC=FE979Tq9TJZm{YM#LaZk(iJ!W6E1YDj&F~J8Z@w zX;1O&udH|$M?Bt*L8zqnapb?b{|W0y^#WgS|4&Yj&VIN5-{X2n{udDBh)bEall3>R z@tOXgv(uwe{^0x3{+~WS`rZD2k89WaJ{>@~&f~Pt?y_%fpxtGU(L%e+9;1nNmpwuo z?Jj$qM%vw42a?-LivoPCZrWY;EBk2)iC^7PyUV_zsdkq=MqBMJd!)wNUG_+=wY%(b znrnC2x3<^rdfsG$m0eVx$%MURJ6K)TryQGZiJd+_JtK>AaQG+q$BvKApC6qcK4->M z?;t!d0vgoLj%O!>vL&cv9^UlilImr#KH9NAK^e2+`&t#lbp5vD>$XN8Ax=bu!CU~V zjMH40n}L|-bGY@Ihsxf=t}{`)r?B_O-tq!5V}#~VfKh;W_;nunjSMYf)-tE!EheJ- zrY>w`FjYVO@$e_QGgEl0J=1k_AF+brZ5T1J^f#LN8U73;1f}$iinlLeMw@;jGa8v5 zDQ9$&GgEEx5erR?VnVa5ufRmT021EVt5DH+s+#AFJyqsJNU!)`(H>Y|XZfbnF_nR- zVn^N=LQeJ1;;`nadH}ebRYce}$NLlR8#`w)fPp}-R9G3^O0Q;iTdkKg1is>NRBbq* zKx$P|CP2Cm6a4*Cll{FI@O}sePXjXiKir~`Blr`||1anyLj06Q8X|Tq<^lWfr)p0t2#w!i zPYrurqWf_KJF!AD;84*T{im_9Q(N0u)vN&O9hL9VmpRZYCvE@f3S6(C64`p+N-zTX%*q%mIStO?Q&Y z%(g7xp^Qca&zj?WzG&&5os^^004r=>b%1DQn7}tR9fovJz?ZhqAOR&1)EF-{-!OhF zK~S9nsbG9YRR!#O^RWqTCG>%KQDcyZG5n|dHQg@ZT{8kZ@IsMcr<;AW%_O5u*^`<*ztEJbQKr9ohoc7VPy>_-t_MSn=-2jgd+MURt(bPzYG@UJR z>+IY{w!4E^`;;PT0@Bud&>VFtc(miye56#cIU07CINF}2>kapT?LII{mE)ZreP1M* z+kn_j(>SeyfUwJXD>4XaH<0ahJgr2X36`7kO_p zwpA$5r<^x>TTqeX!rN|6BG}lH*RapFl3elYb*EoAbtf?RHGciGIPAX778MZ`W8 zxMED5j;55ZUjU$Q(g9_UP&l@q-1#J5$h98rOjAS;MG_Y%~#cEq>!@QZ42@K03 zl+(Ih>(SFKCQWl|rytvys6E%0r&vSDQQhp3eXGQ^NtVdubt2fQ0Q>@W_G4J!n&4Mi z>B6C2LF)f|se{CVq7Ad)4lUx#;(?!oDn;3(nkiO5(l~nZ`o=yhE1Bg+zP~$7TPKS>*K`1KOX(mY?e}$%2pCc zs+);SqgrmT-M*!+K|Er%vV~cO;+rSN~5hnd7zHnIgq4@!si=$>FqXxu|dY^?jWgV>A*KRE*r zmgTb<)9KA+@hFd+hfZ$_B^M31!Mw}NWsM36Sv(F?jS4kzYJx$S668PFixQ1dCJRa!=F4oWy;shM&lSIRmZ zA#{r{s9IEDD@vS=$^$zxlMt2ahJ{6YW}R_$*RnidhSQS?Qa70xSoovtjH=fm{-a4Y4QY(Qz>+Q#sQ*b31q04t!Fkj}EVs3c@&V5ulph7O*UI-U1cEl_LX)E3`TR(l1yk^ts4iDLLJL=XT}s4m z{tfa`#O4r3MCQF8{bpw7ynP@(f(!Fyh8u%A1wXxW_1hKS?qN)Eyuc#5cbyAy6d>F? zj-v=Pa2ITZG+WAX4FuO>KmJ5zH2Z%C5HF9K81x)&h8VOwE#08E;-@bU%UOq8GFeRu zoe&g+Yy>ZLO3oR| zRxQzyfn6Ek@3Jd;hwPnwQ}Y~8IQbO+4-MJk{DjUS0}%c`+*JFJUWoHZhH$$Z1vvIz zF3-vNfZoI2fKfPIWpVQI1Ye1qWd!Nv$@#xr->1DE>i=*m1mD~O^2z>>NHF`J!oE_5b*L{1~3h+BG-fdwbtO zI47wCP>9iS&#GWW#}KF%%WRJ#b8_$wOh|Wt(dScAqb?TfC4@#~F3SBL(Fr`_I5j-5BQ7tnUY(bWc8cL&4eytSH}mY{9vRrp zPRQgUKyTlfE6s^v261N^@7Z_2^v%TsNzo(JcM7R_agDed%SrYeEma|b6GHi@{HJ6> z@}IrR7KLH){=?Ts7HmEl9?$c%iadXD5qZeL%5M9+JnZuGy20*K1&JS;Ha^ch786t zN`Vi+lN~AZvWrrFabbkny1d_!6e@8Ig{M1~hYK;QLD;mgA|Yeq!nx&E9*reYK^{h`%(X zdQi3x0*^W8nEB%~Gf?YQ!#5A!;O3@|i6fMLof1dE*H7Pr;o2eKnC&+~7=qh7xNB66 zkJ&*A_-{|@8YGV%3E^!nfA&MvRM0gFXjFJL>6vb_a-bg1lOY9}=6T(I2T;5%>Usy}^_Me8_ADLN#cKQ7u3XsA~Ava6DOh=1(Ib z%pC{{wHkxB#BIxv8txhXn3W&0=}6zRM&`2-eQ}gr;iz;8K;mR{@#fw6^V5s&z<+#s zeE$9ErF~4L{J-o=D(nAcU%q?YQj>e(zIea4H(LXfAMM(Y{q7HME-puJge?DkY=m9^ z$@91G_`!Q(0C>bM4N+UwEnRb~9xOv%*`Q{e1Jm)&I4WVc*)8lUDmgSns-CYzlh>O% z6g9`6!2%#OA8=zKRIseJwA^y|U;p|=)qSu9{+#KEvQC@flP{86U!ywB=G@7v+xL5p zG4Vqh)sbnR9Z_PeW<&;r{Kmubb9(m~dX?SxRa=Ag*3x8P9wV6aEAtBYHonxo!8Tgk zg6#uZC#eoWPQklpMTjL})!=U?D9GTJpYGh(7fAVGJ^t?98D7g;y6U-6qDqxZ~DJknXlLVqLQOMl~xbCW{^uGSQ}E*W-}t|)zk>1JsMda zeKR7Xi|5DY4ys0_jnC{V) zb)&YG9;$iTH{}3A2;p!3FOY~BR|-l~Ep}e#LoVBMTsw#F=9_$}%)u(b6VSYvA`Ei_91bBw_W#nuO(!$t!Mo@a2)hHBQ+?ZtrzJqn3qa$!A zaz~s9vSOBW0I0MA2)w#OTJS#U@QyFuzD^DW-Sv4<+>Xuv&6AURb8s?vE>%wsBdASG ziH7-}O+RNL_|3O+K+|gW2)luc{a&c+2enE`t4WGumwK?u-X*OR-{D7 zvB)xqz-A(&V=ri><$0}S1fU4E4d$=ksezk04Yls=`HDQ zeYylbNs0xu?ThePC1Qx~^$|&PN%2pDKISM{FAF1&7%38!Mgu~ZB(NqzcbGuq_Bw72K|=TC+`#X|1AK-Z@h<3Y*td{sz7_;s@!ooxctb7r;=OHO&`hk{;nT zUU39lS}Y;d_&#C}XrxJpzAf!UIzN)(#0mN;@bR|JVZjmf63J0Kk7vdYrUsPQ@aQFO0jTqvj8==_h=c=RQ{7N-{O&w-Hkk^qGttu&ehFyg_(C#zkwB>JjSW$YPh860*m|=odY{tp8bD+C9aFbzuy?svJ^SDxjbNW2X z=lKo(xdegE0%yar=UpOlDvdEYSEl)LZX<=j{$}dHp-?PmwqBAdQno4Dzpu3^d& za77+Wd1;vt>Z72xs>1Xj0$~WN6R^90Os@u3jfpCjnfsENngrTWA;{0+tk6&d)t$nb zm$o=U`c`o3f;cxWMmH})sG;;Arg43nXatD3WFy#{$+|u;A04a!D2C)7vP2^KHO>(n zTw~N+n{`uVRRyr&<8%V+dRk#1m*K&raz>QabY2yd$^#@pnj*RZ<3wDOItl$RK`=6N z+JOfh)&qCkfmIDGQ{y}%!j}O>j%?~2V`lzb`J#tRZi>2aQPD?EO2@8;a13D6wz7p?5T;T|;6 z^g`*Ot@nCN&r6{(hjH3h^xOM*i+7>#*+Lrqx}x9Shg{ZMh{NbGyHH#`w)!Z8n>l~f z^zZNf&p@*NqWX7xo!89U(W~lqa(pasmELF9bo$!nBi*K~0O1~H%Lf4;ntUJ&V1$SL zl$pea&jV?psVFD?$FXc1(t9*VA<`XR@vyyDzKrAa61#TG7lXp@qwzr_RE3O2S3ufn zLr7ChM`KF42JErPklgGBI<+=J{-(YuA-*`Y2$4?+8 zip5Z*@C~HPV%KN>WGrW`y%1faniiPTcdD2 zT8e97b&Zu_auo`(Z-;hOm+gA_0R_2A^#V zU+w`En3mS*z}6pfP0&!=T60VZW_q?Mrqgn`Ykh%3Z7+P^Z?JSyG5Q?bCyCw&TUaHG z=6SX#K4f$Z_gKRb8_M>6+0wzi$MwOK;*RK%nwkS3q^Jf-)g3Kq3!1mFF>_V1qF${6 zR|~3%*}ed;n&t&NT6zrN6G+CgH!|Xn>0UksL!X(z67`OdTB%oduSV&?e;yzFzwdv# zd-Sj3mS8^!&4fMkfAhdGG`jgpaFe^hRuoi&tZtrNn{B#CYv1n{Nr-jL7iW1{dkr5N z0+SnQ0Q8G^z=hy+8j{*&C)5H4p3T#IhE6v%gu{I(2prsV3(mB=v%0hr4V4uD@+mIT zIho*RP5}%@mf^Qk&dwI=HpLg9-zOe_par$5&}a)1m4b5 zfO*hBep<7FP-$Wm`wphxn08*zQiSE%~17A`{QP}4~a1K7o& zV^fcqx*5QCHplpeJ60HTdQ7_(P)r9aI!Pl0utTy4Z3VW!PgQCG)T4pNIx8wblHdhS z%TBp?Asd6Z3XEo8MHeJ)X<=i$!7Tw91gB}HrDweZoRx^9D%fyA-BV@|=w!v*X4tp< z8-KBGT`dtYDBlFDMNJwn4zI^fx|93iiC52i6DFRi7X~&r-8e(McQt$kh zV}rS5ROyRe+$#O@ zkdkY?8ew(`@4K|TegwF0RCg7YK`k3_)Z$&0f7%(aT&#W~qfPd2rWKLF{Dn%VmL zsw*B4cy;u=EcRMHt5B__sG=uHcK={ctp-`~vhW0*dWjzv{~UvTHw< zC!4}See}=Q=)X6jhB7$Bl zu;k2%tH|)#JI)}YI>GE_D8`#o;O*DLG@fi1eK!1N7;Z7u;t}@c2&?Ng-m(LfZ$ewF zoV5^+<>u|t6zRFW&c*1Djh7+kHl+8~QFCtqE9;^EmlOugExiwxmfH4I!G`-GY)aQ| z@>cRyog13#-JGM}hquXwPZQ0qsB}Q4%1Qj5+MI;|pQrNORV=PlwIs?@gwn~RD5q@q zK~L{|f$~2%w`I%8&0}<;a_t(f6fE1m*Q6}z@CJ`Bp$;kE$Xbz^+axME2#P=vVsY^f zWD|*zSgsdi@LZVDzT$p>+k^R~oF0IAYN}}9&Hw{O<*XXK6})9HAQdSItUj)oVf5+r zgAujBZwKyJ>AjIBuf=q-t|=Apasn&sj#OV@{YRlq(eXW(hj4?(e146cbZ+o{dU0pE!go((mfR=w2Q}l{AL_lkac%8#STwiPhJC)D zoXyB&Nx+g8qawp#4>Gxu^Z6LF1A;GBf{KHH4zwGQWHNn|P)waFn(~7I`&Oj^844g8 z!1g#`P0G~F#PxcfLgHRPg}^(!=;&cVcngupWV2a5$x*)x>qK;u0@i5l-YB25hlWRj zXTmxMT5;_suUL{;n^;Vv;`2rQv(X2&bH>pCQDF)JcRS+t+guP$S5V7&;jmE4gC*L~ ztHsD_r|4KjSg81wbx$(u*WT>&K(K2temR|NRqD_aHn8CY@%F+LgB<1S(MYsJyBT4_Jb3@2X2~Q-JO02``reF=m4NI~9u*F{N6*-b?&hSM( z_bkNvViG6hUtPl6%%0yYbUh1r5D(JTdsmh{hLnwXhKM)Ip7H~dUIcd+I0;c1+pMV( zzQmZ>p?5b@j^Wb@eY9+;UdA^lkeK&AG(CQ0IN||d;m_Fc)!W9~-M(+@@v?=dz|TH5 z948VZ*n7)?;Wc+hPsMKD_8}POOhJ_VpSG5265mWUSkCS37UH_J>F93bf({6qMg>>e zF#8B8nA@nvK@WWj?1UYmK-TQ7T{U7-*hw{6pSei3qPe6J2Yuxo6z?)gW{d_#GyO`!oLmQVHsn!YD-2O;_c%n`` za{Qi~HzoL)&F@K1yDI27ZqXAPTxOTIhne>O%1mV&hHd;Dg-K6F=kWUbX#jIzL z-Z0ictoirq+T@5hB>{+dK*f|oA9YKIfp%JiEWE{7B47*}qrV}#!%^L4jzC*mk`BFa zAj;Nx8s^2)Ft-j$L#tX4Rfgh%6-G;|*KLcremRf5yy}hs4Qw>Bju!eDmsJD3JmYvIG<-WwYdAA_c0pNugQ^VnAgD6ZVP&{djG5ogQqBxZ5eIpge##>`8&0Y8pg|aE8SVdw%}8DW zqGE)MqM46Tz=&EPSh2(W(v7Sjw4stM^{>rdb``gv8u1IS(F0vP^2&5&T@!NWhwc$d z%gK%5YZF7x9HSNo&W*@|lwp#e1TG|SC=vje&(^qiWUnMk$;ylv4p%xQ=&f{)IK+8L zKrG%KhM$Ia6a0{uu^FCW=-#}8FB1;@_|N{GvM!kE0(d2#J+OdwkROQ_PEYJEYhEjn zvUxv6Uk+HnyWLM$kuzcCp9w3w$$frtBy0oub=8!v+HeP2nUz5KKaYtBs4Dr2zj9?wgB9`S_Eg%6yU6@e8iJR`5AuQ-(~sXV?91~M+k(a7-9LA4-oK!FkD>gml#NCJq{Dr?2EgQ4ZpFkNUBP@RXxp@-Q+Oo zPO}f7OzJjJdb*UN01CM}hHuZ_o<@siWw}$)^)d&JpUy|i?9K?$Sp}Z~yj4*4Z>m4+ zImd&*4s&=Yrd73uusu+0W1#O})6Dl%ChJR5F4@Dm2#g@J{p4P@uA_di3aSmZUp ziTZLeX~S)yFy`2{Z4=tJ^&0j=v|~fF$$3N=k2wF@(e_+}eX6aon(#&2KzD;+@}}0p zcWR-nS{2Tu#6h1Z8i8Sog69znpb*!+uuXAqnJFa_m@ce+!7sx5d%BS56T~W|eke=! z8;;5sCjbf3Zy6sWBu>}JO;GrP!SUAAO;KfmdPPVswfhA6fm%QS&m3bav~N0LID{{S zqVv86wNh2fKFBlzn*9=`W4A^tbD)FUE9h8NjZ~_9# z!gmvKZw_~)4y2H53MfE_BK9^)1@D|2_in9wPj!7F+NXeZc6DM~pjB8L5kq~U2J#whPYMxG%$ijCwvpmaU zRYJfx+3+Q)1rY_bIl<}%-caAKz|Bf=OTyNmWBC9iE&h~TS-*e%j$*0+V{dYUF4++N zL*U*{dMQ?DKeny}d1m9FurE1yGeaZM-F1#W@8;kHUwU=DmCjmseN-K@Glsbg16)PV zTp;Huqnj_%^0trLKUbO!6!xYZ<(FD{kP~wtoVEq~u(1^qcK184HH;RArBh%IX4Pp2 zq@|CGfd=E~th4LqU%ge|J-&GH^7#9UF8_U~1*a}I4>5oHtaKc!j-d7A-Yh-coO*fpk zM!7(k@*cB0mtba;tRml@D4Np z=#_@4?$Ka4x7k_)a;_;F@@`&S=M%yun}J2soFw^w+9(~CB?C&7>Nc>D;=`)2gHkN1Ky-L_u zwXD!p0K?$Y30gJ|{xT~K2L)eeL;Yx0keKYzfF-m*`zGXiU{(%iqF0I!hh_G$9+{V; zy5L(IwqhoV^#e)eHOW_gl>b{wde!C;y;JRZA@g zoVumG4mcNkuH3+LlEgY&z1H{uV;R|=UJ^J$iWEofjFnq+-W=?wPuoXZs`^&dbl%pkjkSEcy^`PRNS@~&<)Yslz8{)x zJMDG9{Ic8sm!pFXURJl+{gG3uf5OTk0vkTJfq=X9lU1KzZf1$U^hDB%cGouVZp8PC zmZcWmO6He`>n>Y6X+Ki8Y1H=w;qzMOZr`vu&yO}ywnZI1xL)ReS!WOm1jRSGwjcpz zS)rA3V zjk+iFs;u_?W1O{;CIDi9Q&f+kR{SQEl`rY zWiHkZKPeFWw5IwAzTVZ+FMbQ=tN6BTrAIW)_bnZUpMCKDA`+nZrZhri6(_YlLN!26 z149UwuNSrBQI=EV)98a!P0il4${r-DIrK zvuQse_r(@EXnbe5cSHbVSiG#9G|}$RqNq*vKCRAFtNF1Zo#Z+PADCV?ydL(^uh+cI z!4I7IWD6~}rQ3)q^cye&NWRJvb77m$yK&Fk=z5eNUJ3Z0FIyG_{O}U($r>HFgghIf z88=o9Kk0O8j+K#W1s@TuU+w7Mu75bu)v_|&wD)9y)wGA$!q3M2VOz3Mnh1K=3!o$5 zr}ll};fK6`t`3XexA+h140x8;}n>i8-;BQcsoCU4KLUXE%Dpf z;0J}3+D78y-_}Z;dq30CFVzXR5`unM?hMHBp_qY3$R0u~J3-?D{E^?9qtur+z-na1 z>F6osWU}PpSapAGlQYoO+4IQDmy{3n^z|Q%C}o5vM-1CR(Ln545FGaqXF-huS0xSK z!;go1irl-(NHlND>@o3VQ{h_-og(vVI=@TrE2dp3(iI=kkPcW;J0&}%^8Hf7_E)Pe zJWRTe*H!s=UQE*Y<9R;r{;jX5*gE_|l(b1kMXNW1kyn3W)QPE``~l!Mvpbt|fct=# z7c4F=9lnJhD-AS0D~p~nbv<8Epz7cGwV4h>C5F8KMS=-7r*3aFp3U~rG8I7xGGVNO%leyo>^<#shWyiiUp@P zdPS2CSSWVa*G6V)dj%3CP&3!WBT?13)Ru9u&*>WA_lDmISU~Al<~}**Sp;l7W-~rP#rQ1F+#xVw48GOAebyhB3rNz z5APi9iNl1f$BqT5jke1rurf0!2<}i5lVKPbc$wB!SnD!=GZc~a|&C?qCeAJTE9&@#CdWq(tmiLxdv=vY;`E0L_ z6lx;;+TSBkzl2B5Te=NJ+|@#wJVF8_NWt}L3s@|zZN@8gl-|BPzI^fK{M8`I>dA1x zS(Y*US={!(K~Rz%rXSKA(?}A`whCWyZe|2X_sGaej}Rj+3>9&ivFyiqmPL-TFkyy* zrk3DUH71Io3l3$PU``Q93@%WznSAj=AF z*n9pD55DZDeGlXxjeDafJ0SHypk`YDqos+j!b-8>jg~!)I(j8Cr(_uQWDGbqiijMJlzYI&TNj39gW%*R#}!uXBZ=|`XWvImtOm}x zu$chD(HuSKTWOexk&;lGo04ux(cp|^bU9mbqE8SUYKi(lQBhwrapNlzkw_mXb4T!} zAF|}WSO-~TJnd0CRFr9TWq z^;K>7C?;g%h6W@GbDipNEuDlTLw%*X8 zI}4DmM!;4c0B+HT_@6K7dzUC6B;$2r_|D~WQj84~ZZvVfaNHZH$x$6YP$0-HjiLKM zs~2oHw$Z`t;aP*|5{QRCqj^P(4(c&UMhg&-iH=13`fzk47t@xyXxdIzHj4-R5FZ55 zcW8z9OmGdoGXWQWoNv>s=AONL^XBD6+flHAo~C)#Wu~N78w&3uEFn2SX=lnssJ|C5 zHwfM=ND0)!8q_Fip^3FSUfj22q_L^!@H$Xsl&sfWr04l0m&A}nhVv8IFnYiv9$8w9$Csw9i9;2E`^6wgK|Pp zFZ;iQa^oFG#(4=KFdhfhcL4Km)kt7tq|6b#oUP^wBwB?d32>0*0VE|(<_I1CODK65 zKzqbFD&tvR&E@JvR&zEiF4+baVgWFGm<2`3{@!3(%^V9#@}u0Rw){rMWl_+UK2G@P zIS8UBc^M+1Hk!hiZqyX{3SsQR7Hm#fh*Wk_K(|!VXc#Vapel_s_oUkYxGi!u_QAt? zqf`cysqL81vcL5FfTFf`0$z{qYor{YMQG?&%0DvoCsiW?4-)>%0l{}6+MpX^(J}~u z;Emgu)|o=}V9Y>CIqUk~q9BSdb4mv+&Dk)s)d;Tkrf6P?%CTpA5x zQrV>sU1L$$QTIvce1yq!Z~*90!PjUrhEv*bF~VyE%N+%h0y-|p07^i$znm^5V|1}J zZPWQo`Wq8D+8Qja?V0iEOMb#j)SX8K4xD7*MK3NiW}%tDl28DrB32{aZqplOAE)o& zfY1voOlPZi)68sVvrI)0Lfu4~m`6>Oz!gfb6ir8N;@&}dRy1yECgMb}IEWFBli5WOP=YP}*tJD$kC9M6a&E|FWbN=%QHmA zYFjdrE#{mYUPzdsIypZVX#_DmUEm#UtD4I>Ul0>81+Wwmgso7mXS4jH+2?6F&q4Bz z(_kix+42zN=%a0e81I1Q%Q2A zoktD7IKuxl#>-<*z|Z=4sjvK$INa{hB3(hAxy)wJzP*chqn}_2a(n=JbZYx;zE66T zIgD)zE&XipG?#LVE39Q*lLhV*^k(XRk19K78xEM1Cg z5%dN+g8=WS#A^XHL2%LE09HtP7H)EoDG--oIT=;?^{6&7?1RoC{oz9{Gxd)5vM7Dq zE&bN>S5(amHe7vOe_BuVhrdKmqx5Z0_1i5y{agwPq?No~j{)>Tc}$q?x6BcXjk0fe zjzdX9k-~J=fN|)nQ^;RFGt%r5({LJxLCY$0s(xD@ZcLEC8lCb< zFeLAFQUn%E#~;FOTSk^&Nd0u|umNI;nd!A_Gl1<@b%=QxQHP?+wCPmFWbN;W1HA!~ zQdGVD$a%olT2-eri% zRNW+ujKf<2niF1404bcEqMi=F89sxRkTM33h*oAxfC(8dRIciP5bP$9Q4N8`Q^W0nxDYj=1}fziL9MBHz6u^S z;o()xkk0$6i|l4YvCGz)KUm!Nwe(9?t;+>B2Pv(TSkS>T`fe|V+z|3E`~4d6)<1oQG#hfG5pE-%Qnf_;Xpqi zj90|VWRGI7tivOysrvD;ArVa+i7+699_^@diW1!zg0!E!0u5sYsidxNz#RcB$c9JQ z8KFcSs5{v}MdeSpr0llhuwynFDly^k$J8U_&b)z^9Y{=sDtnF5t7q1@Z8kB;Lsfhz z;itI)T$ESzqhBb-t8L%d`~mGBEB}^0sqx{j)tZuQB0vwgG95_0>9WP$dz}jB!^?tnyrVA4xi_$ z17wh(31Hh{0zZwFaU#`k9}=CP5Lry@iq!Ul;~C@$jCOV_cd^t!W)0_uv^Pz4Al&xk zRtyO_!IVeHxE@^l(wcb)qp~hm2W18t2^*%u)+@Z;vDXblpMu11c+;l&&l`=0(r`>`%O-13@AP{`i$7(x2{D^Zy9a`$exd8eLzX*KFez3n40c8ax zVhx=v+W3Rpz63@(cas5ddCCoFhKJ3b7*9HA+pbjx35AMk2%nihz~m2;IFhRthW<)v zr%Lc6bOpo~9(JMLt{n4NyZv4V=&(b8XZuf*AJJq3Q9dB8g4dFRIJRW4o&picVU$rw z1)ZxMclZOfR4E4&V-s((`O4A&@c)t)mk7IHZo+6IMoJ*1H8{>Agah9*Ed_T6l`|J4 zlR8VM6b>Uv#Ow!Wvv6?X(k==peXe{1yPDn7WZ0x&VXoG(4& z41?;q@pw}U6zuj7K3ck6y@+K6<=UBuMz`_!iVFw$JI#B)lw(IETN*QQsLe>L43{?$ zQ42e^Mz=sw(U0Z{N7N+!$aj5|&T|yL>p`m*p7bm*$hP(!(AhtPTlyVFor^gl*oaSl zS4OAo3b!irRwza>il6({SZ4LGs+tyOrji%^s z*Dzeh)7uR_z;!ph-FTad3=K<(+eu%UGK2nxY^kvq?RT6_iyD6H!k*qf@Lsah+U74G zw7$9B4`1W(v)ek4R1~t?2ghEV z3Q_ooYw{zlN%;1o9EcAzfza01^zf8d1Y?bD9-O8XO|5af+FcK&hm7q*)jq#&`Q5Ad zKWod&&!?)GXhSK*(nDl}tu{};mMavOTh2l5#7;~_esEM~`Ml1`-aK85r)lyL<^CfO z={zQ5r1aZ|>f@tK!XJ`tle`Q23%~iCt=yK2{kwZ@$>BSbK)9kM%UlXD<_#KNMM4oB zEiyJhx+l4Mu`&ejeeI@#f5cgjo95+b`@B1l)m@nKgB!B(pnuTG@<~E1{IrV(u92T0 zid)iK#PLmS-_DQ7GPUc}F!yTjDg5ll@R`(>)%B^&e5;rMKPV~;{;cc?<<2$*)384a ze~8EKwlqtfa)17%j{hwu^4449&P?c&WLo&yfq8E=rGSo*a%QXW=cB9lE&`g6o?^gG z5w)R>-NQfuR9Cm*(vgR!{TM&Ut+kbX;yM77gYP;j&P!p#=SCEm zmTp@CYE;1pu~^q034s!1^}Mx2Z}pVKg!HV{MA(-AHG~q@$1#Z9I_htT_?e6$s#MX@ z0l>E8&x!VFp&ek)n05@Ps27`0E;I^tj}s$*$Ww=N)D!29gE5CrEOdAxIdmZHX<$Ek zvqXnXGA*&APAazCNP+XgWM}CCcz&slUk>(rf7M^K7!nZw|A2mTdw+^&Rb(64G9GHTG z5MksLi)f%estWfQEo_XV!VW;8S>hoof1uC=_wexW+pGZQ)1R3S{~id(IO>#xN_l(HACKgRyYW%Xd@m3fA(TuaKv$J!00Y5mxQ6^Qg(W6jTP{h%Ax`rX6KTuzeB8A{88bYao_#U zu5B!c8_bm?)9}#u2dBmNqiF>AMGT=ez2Mo2r4Lo};{i0w7kr{!F)e?Q&XwP-4u+u? z>H$=dQInf=X_f?mT3CzZHrQewR#$K+bkG66*Z!4i+|y`lcZYY5^a8x`AwuUN{gA9I z&Ate=9V+JLKBQiHGs@5=u4=asBk)Syk0hGNel~1`NO|5VRNmYascqfp94KlJ78R1>qYm#OJx<`SJdR9nUY1=QGzv3cp1 zHtYl;-HopK{7r3V-8WzzrV$5rH3S@_m27-pwbX(mXumkXl@Fr{@p|DAwu-YsvJfZf6CnT6jks!s9+xW|`{kSBA- z5oR@5tc!C&2Q$VOQ6z-$fc@6cZ;xQb!CAA+GKYjekmO%+d|4qu0e|582g3&Nwh)j< zICW-|nllJo{Fvw#uvRb9Gc0BCS#2r5SQC=fTQHi9)Y(Lio>f_RnZcZ@g@8{0LBj1+ zC|#xt%)AaCw#NCl7;(ee;0K?<9g~0aubS}8Ru}h2h0gy7+K{sfD`+CR0z8&=Of@A! z&g(;lomihwEwMLUap7ru;5KR-?As}$I_uEGkMEsk4rsmC6@Up7KkCEY4U#VkHT>_@ zllNhN@VV;Hda>%+8zC#%T42)OSxf*6vd)f z(Ww#~YAJvR)YD>J`<|vi>ec}b$JgWGt^23o9&vQAb=(mq{*(wv?_+8R^mTdJ~`qsH>+nI|=y`hr#CG6oLS8uye0 zO*y#btER*EYPh0|BO%fQfQg)a%qr#APKlZj9fuJ^;;ZojLGi^tSIpQo8lhbZ;KQ_7 zQ%1RAk9#6bDlnw378(&aaF9EesG*4<>uySz&OzKWl^w#RI>p+t&aIzG({#T>8a(cV zP}Tj{?w34TVG|qW=(##RCGisSQs{3u%jJOG@7c9}Sgq!{IVI|bhd-|`D1LPae2Oc> zr~x(hSl|E($Q)w{`XeGY#sUX0F{L?2X8AQQAIO1Ys5|+W%?Ofaw3V-A0dA!gco4x> zpr8y}TCHnlWm1{jR!d1{PyjAbKD!S?JbWVr$kIrm3z|gDq{Q1kp)`xU5+&ROq@WA| zDj>jLn&Gji9P^B^tYT{GzTp0JiEk!^fDF;BT#9)Sz(*+d6ojI;jsZn3P*w@dcnXR% z&&hgOC}FsABRx==VMS`HLAh3`+MFV?(tuWI=;5fAr2EH%2`+`M9g$u>%!bKzeamlD zj3@Hyg-!Lwow8h8BO3)^7vb9Rr2J?u)=YGd(25==pq(tHc2UxQA{uXA^ENvhDJs>c zt@j^Soo;4CPEDwpvy*Mg)w{Bw~nZNa@}8O2;0r39ajnz^;U`23lmK#C!STC0ugcgIqz^1egfhQ@88JzHNtYF3nmtMv! z&vhv&RbVOB1g@RUR|x9jZOXb#QA7Z6Qi4=`1!;~J`l3* zFqjR}6xS|P$aLa5TdwmZbkm4Rv-Moioru|h_TCp?;9CMNNt59e$CY|8!(Z6`&Eukc5^13RwgGqLZgeg%}zUXBZV`rnl#oQMXijdA1*(y@{9)5$R8s zT#H5{BIw&kb2FMQ*9&gHmynM)oxg-Pz1gZigwPZ$r$i_D?rM%hiH;uh3~m4rhxacS zhvx+R+Fm65YZ82AzxI0&4K)=-*pxFn9^sdK=64UfgXC!tAkh+Lx-qsCqMpm*t_lDF zuy@#C4)*83vm|+r_=)C|2q{;S{a$cvu-VMo?+Q3l7hv6!XulSKd9QnMKoT5n>k~qQ z#)H(igC`ICi~+*3g2_M5Lu=}(Ssx9d45TeC*iZgFyf?xR zX=7-m;%J%$YgIFi()y?KicF>;TP(c^Z=A67hv&AVLbV5i@-c> zp`jszJ{!uD$LB)t$SsuGk)A#e-Vh<@Sg5AWFfcpfE=3oJf?nD}!sE@Y$V=OZ?2gN{ z8>E$6!oAOhXZ6nu<+MA}(XS{+jh}xClp}TVco-y41AC{fky{{nQHgX8jWmmdzy^BGkQ|8>YbxOo3NV)Ib&&#(AAe^-2-heA#K)51*9 z-F14qg9hi9fSLGQ@!g*m+x>S5sf3>vWcB9A%g<&(@L{pr52}1)-1dJ({Apu>?j+<& zlu?^@o!@Ue=$Z$dlA?^f1^R=QFAaZuT(F00i+4fel5~5A0g;f!`ZLEtGG#wV6CV1^ zR&r=ZYIn0Us9e+RPN% z=kWVRuQXXUj+k8sBjx|(=)2bqe>4c1InYas+!s-mRA=R?n725nprn7`ZcNq{k_7rG zy56|rs;%1`(;ydn=+mb9vdib8Z<`vKZJ)zt3Tt;Gx6|h_&$`GsG@Rz#yMCsz#Zqk@OH>UnN@s(a{&=l%{tO^G;u7<_m5zCrpxig-?b?m;{4!|>CF zAG7oi{ga^IYIMH@8qE=c&3}>Aoa-vi`E9~KBi3V@zot}wyXmq<^&pQ_(iN6=ajkxE z;w)cIM-oYENJz5XyASH7#{rJ=5{78*DU!63AOlE7EN zc)%lnablrx%%r}!kDL(FUi%P7*OiCn@VE*b8dP}@;(CVL^A)5qo+?hcNy91yV2tpe zoTxP_yu_x=%rc@-t3L0EUI(_#ByMir}S zWUo+T_Wr8-_W1IL?gK5!fe_KQPy&4NpatS75knd`!}P#z!55L9o&NFi7wnv=G}1rU z6xBs2>P}{uU%!YKmEz@x$9i@2ibyIHV05OY+xD@Ap|J!1AGE?PnVY6TqyehzUXJ<6cT0xu~t&ro!SKkVfw6 zslTjKpVRW%b64{vG|uDQs!aooc~Z=JiI)XOesp{1-48q$D;fm;Pw&xEhGFR)yT{Y% z+cNV83=?#ly2C(44E>P$4Yg_!3Ur2xLUrU`yV+o`AD%jzH#o!3vx5mHur>cj z7ag3EfDVsxrF_VQ1e&e@m5u$igF_1AisJs)C=kTGug7!HoYfA~BrAJ3W{e=#bX}mD z#9e!AF=^}$?njT$FpK-PW6``9tI1zIdfk-6!PL*;CPwPdK7*}{X69{C-goJ9k5305 zEKIv|uR8O=)1Z)HCK8U*wS|y>*>Zn#oug5wHuFHse|!3{haZ9af|bB!z{qPPK#b_| z7X_hw(e9wNpLrNywb?!L@emD_;A5Fj(a%dFerz?kTu~_X;Rfz*ig~743WqI2agMIz zm)r#FHLZx4?OjZA#70^#p|&&ucV|QRRL&N4h%{ICrj@;)JU49-uh>bk`~bdY76J-+ z5TPrecY&>uct}8M0x^9MSPAh;)ue=o+|V;xA=EKOLjIp29rQe)vEp#^B%*wPK9##nBuU}wcY^sbNa2}5!`_uc5?)|4fv z*m;kFa4{aTwDTo%$SCcNdqEejhjuz64A8PA$CT=!@Xs%s3@XB$NIbbq?`bgPLLzY+ zO+h5OCaH9w20()!w=fi6LRqoD(?U0$Gqapxa~UD5(K&TqLEo@fCstDzziBbnTBl=6 z_idakL0?UCbR__L2kinxa8wvbhn+}wJ2rQelH4e>Y z7Jb(?y-RA+(2R7|RSm<{3(wqF{@E(_q}Y3{^Hd`c$9R+F9@XkT=>E9rFhRUNfKYLh zALimCd}8>8yvMCuF_eap`~cHI0uk}@hjNI19bQeT z*xDi92pOu>$bBRrLzuP`Ni)3VAts@fAgv%Mwn+$#2H`24z_@uE0sKRWzGmQKTmmf; z{Gkkk|C+H0t%SZ=JP+mh*3y6T))BPqNO@l3GCLMxHY(*HQZ+|?~B zSVzH0;v*0E5uy;%>MylZmRj!_)Pir;T`Qf^hoHUgIvQPXf_G8JG&$!q{*}f>{1l#x z+9*eak*2l?MD92wKk+!D)P2^!SN@6Thx0d;(Ai(sfM?zd(ZWaqJ&N4k$(J~Wv4H|F z2XSlTYS4#G*YV(~XQK6~xbb>j=tDXp7@Og_1VrOByp)RXHM?g|fQ(^Fd3Q6m{q4Zz>*^wr!<+rQ#O>PXcK{ z?KbB}L)_AJpq@jPsg*(6X2-Ar3gjhif9B43M6+05-?aG;>glw`4s4mHn+wLmp9k{= z+Z4bg0RJb?1+^0LDrZ#l9(^89p}Al}G8X3L z%cOglbj^Pvgsb~%Sk>mRFNcU0(CZ+g;Gr3WN68n}PfNfHrSCSZvLWCHtT%Z@w&*rq zm^S|NY)DpLd9N8NSec0!;s(C2Q;#U4yF!xtAr zbPxi{J!(abx$}^g-smspDP+;Gs8uuzZOjB`Yi2*;!j9VaS&chMh17*ta=T@nQM@h( zJ6)&y8`cjR=)$_%XLZXafV-*d)VvrwH3(R<0FP^$SGPx(CFYtSdnCg44+O%F{%m({ zvs9f11W5OVwv*hMpc>}0>t$iD>)T{^Dh#o?4j%j;TQ1M3dppVGFCi^fDt>=7RBfx5 zGKSbMsH`Goq?UQRU{+=^;mQF=A5l~1R8P;g^SE>Pzz7zWpi6ooK&8Ez##vGw+2!*}se+HhlihreB^)tAZ zCevH(!n-a=;j^3C)!z%ucABc!R&TcQIhQa?%32kn zSky58K=3^3P?jh2relt|@nWv2p^2;L3#CHDd@PRld6kZJ zAP51S^eayD1xpC~gW6EA`Shtbjo5@}8-=FC<40O5V}hnJ_l5Zj%Er6qeM7!MuCHie zHmpOHX$x4D>mf}p#~M__SGVZcuL-y&IE6U8Bte?)6>duPV#uf2DLM+t*$z1X$aP3K zTc9?{=5xkeM|P$@AbcZYD`|yqoG8d8iWwO!Q5nuqgWi2clWQ;$uXQav#9<%q#<#p3 zuPVTUT(zdb0o2POI;9xVf21CcZqmwG{3AE0O%RElS4=ShTQWLAAgVkX5McGP-s%n< zn-tnKD8VVk1?~~on&29y8xMSyvlxk(&{|dxkoHuxi`EGqs_w?El3U*0F7s+w7zy{2jrXqIaq~TlUmm+b^T2L6;IRZuJ<|wOLGTvE!m6Cu?IZIP&Nh<> zC;GzX0TLPQex=VAFAbg;hU_;RbK-3(?e`=|kk@EgOR_vrIDHvIJ4iY|)$D8oYoZj6 zPbdrB7nYb}Xn}-7l&xoH8_5Zu?P)DCx63 zVHiXQ&A-GAu<~*gFfx&Yo^;s?EG1J6J){K+3A~ppfxuLF<&dkk0&5oHlS2h$iB~CV z$NKDqTrL1=RA&d(3IeaiGjmE!A%|uRf_46Vu)FgmK>nBOIjs|aTo00CairvKBE!yM z1ECyRsd~m>`c2V4nZl7c(mGhK1Vo+U0mQBW{8?MoFkp!&Ng9Q)d#YJkj%jctO#~0P z{ob_hczNHL7y99dLnJAihTB^=T6soJstl}&uu!r*h{0EC9pI#M#$l{^7z8p#bd}8u z86O;epeOPjTCsh96YhA;(`rX|#}mlz?F!MW;7g?#C8weXooMW_vEzxhsr1GZt)-n`tf3J zQu@WBlX=LE+8McVz3>SsHLiAa&K% zk@F2sLhUjb57-BC&5psp;ib!(*RW&kbnodnq0!_mE$Ns;yB_L9P!kflq5GDXY7p2) zPokPF`0?u;nvRl5Hp1(OcO^)BbJv~0ZNG{(bKOFzzG08fdNO)(_VV;14u7%jBM&oz@`IJ2%e+BosGj{i z_^zRw*;3Em9>$a=Z;XUdf*M-6O@RS}ha2>9fOYYa&1VG{uL9sSid{8}SpzpA1)Ye~ zEN^O^M*GCXS4%}%LH{xs1b_i$CHP^0bgDn3RCQ)9=l2*lwjKkHhKCePD~r>BA;Z7E zsqZrQH|d=vH-$O9br&%;X=Q!7F!#V3lYJo}g=4{3x~hKdi1r<-2VsyY3KrClHZVC` zlEc_^nsWj_f!i=!AYT$V80Zxla~z>JHjXVu)A1lqv!$TX*}Px~m&jMkG=at;2@hz? zOCW+??39r;vPd|`AJM=htdxowv!bNsIIl~Xz{r=?aL984IaC?a3__KYN2a<*6lWQX z>)4JZe%J#qhrC`xqZQ@83pbPuY%Xmy@NJw+0WnvL8XskCFm=)vfI3fUKL<6BpkaZnW4I9SWOJI9MZ?hS5lDS~PqE z_xaIZ|K^)CN9H~^USE$!hsnISK4ZDyIfR-z%!#hvT#2;6HFMww0V@>~au-k~4=WPP{E%dWNP) zCJa8r^QrYTC7*(bO5k(>kXoGS8osjHH*+TGoK?QZ(0}cAf_CbDx%~upNAqU41-EN! zRimChs0)~*eN#`=W?{8_)v{aQX58A`q4#-&7le)9g<}O2ZBtd&_U3KLTTYB|zsrJ} zZr)-#Q~VGye3kF)(1QH7jZfpsGPS*-Hh#X2_}9+4pX6Rp|Lu|Y#!1nuGXDVBylt|d z2v6$FQ~e%bi~KIUqjW*3N2Xheur=i<6C=Cc!%alQQbHrKgeH&=j1T2vGFg|B-WSnw zDC=4UE*{Xe_&s;S!sRYtJ@H0hc9;1T~H!rt26Y@U!gk^@`#parD z4|kvHjmYB8o|QDgcaUA8oFDRYK&eMm!f@ zquEVo2{<|V^^!M*`JBX1(}0bEVCKH5!MBg-`fENvUV7sPZFC9>7UJDiP861bDr9e8 zu8H35jDi}od(PTueTB{FVX|58Hox!0yh>ddUH)>hluwWbjQw&-T7ogz>Z+T3VaUwW z`wYSA##wy_&Q1YfC0?&9$tmd)O77Gg2U)`IkXmchw}~n-DfqtoGSkYU8D$#6v~si> zrrC#lBHqvY=EENyln`ny@WO#wCff^~0bZ7CTmVL7OYcd0LK)SI8BYvN-g;T*a{@NA z+1&>=@D#acUfa1gE!B~H`!+Ill}~Py^$L@@VU}9Gu$H^(aS2*D8#KY%Tc&xa!UcIErff=ovRX2IwpU4~ttC-y6pu1Dbs=o&bW zeBu?0$ILWqR=}Fj?J~WC9xt5hW~94ObQklbAxGrR?>`H|xxE9>^R_zhJ-&nHWA98M zuY2SlpyR5%7zM;|iLWIr+@u8o0R!iI1pMIcY3_ywG#QrD%coHy61RpPv?DbHK^R4Ipu{zVjgJ2(k63K5_>1VF zAiN#~A&d(o#{oE|58>!c#|L-$w7xm~8l^oWBG(7zVRZnFJ6z^-^Z0Z*g|Z*=#Ub7| zvX3hxBA8O}c~uvyqpv|Snbk674~>K1_j!1 zb^jUacw&Bj`}NoG->2Vx^F;p_e*Nt?&%XK1)2F}v>e)BH{rbt%uYO}5ef8D1ze%2a zhPt=@tc_e>CdqHc=7x2hE?diOd;j0`XFstsoluMtx}%aA4DXQV>3o*_I844T3nLpJ z{C;ij3Ued=4uJ)He$y~4G?*cAW zR7P$z_@YWWleb0!pEIiQx}pe5$Vx4~b3-Ar!h-Twbz@lHT>b*rwY|MtbD6m^BYOgdYH3@9iNN*O~@#kA$RuH=B8m z-j9e{z^&<1UGm@e?u#!n5o6eO=bjft+{ka-61{c?$wKxer*2iikkuz=@oK2 z*#q6p1q1?@MgmOl()+~v2w20sP0~Pl=t>BT;ssvq5e7Hfp1bB=x2EL1O9Y!m+kv@Q zSluFBn&DM-zns|TNMyQ3zj9b#Q*)h#%)}XA^uXpk&CDnk_hx?}dGg{*=t2vPT;x;q z3D`$-bCx2pDf%4V02=WOdsR!j!M1EYQc#O1i7@8(NX6OSi*lNkl=rwQ7MYl{gMlzx zhgAg5@{NQR>Ag+qItE{OaPKJAHQ9p+rFf0nuzi}1EaN6ulXZn&t0@K%gZt1;U9S!w zKVFzUYPbwKG8X0a3 zY2Xs(*FAf0@iUaizF}(+JSsC6@Jhe)i}i92nc(pS{SGgPQH?;%6c`nJGL$D!2>2KN z1J29lKSN?@(K|hSGXhpKqTB6ko`S2jLuLSdhgnbX9Jf7X8t9aUGj;CZp9%c4WY1&6 zaC=m=;@q?J>=C7`ZBLOFE2>}BRr&u@(-iKUMck2%P=GWdVj<+@He{Trr|xgn2D zzPq!wZHg$~qWPvPKUU3i;Wxu825g8+q2I}uiDOJyP+vv2QHF$;x=(F2HNFb!g8cE|Kk6yw4YrpNdItUIK6ds?xDSbB_`>U_LhAjZ9k`1+U9hv*5KY$Q%C61@3gSs$H`{Y@(G1fDo|i zFuFXs*XaoMljmTnfrhg-90iw)cPDQ~@6JvJc<|u(haM@a4@Q{iIRXR=D-^neNV~#;fT>XW5vx&%RlsLK0?;w*9;szoMT`{`DVMl zWizwpq!x1J*alPZG@rd`PIJ)Se>nnv=;;#3sUr)f;30ho)mw0W&4$41l%@e10?8Ct z{p8=1CteF$jOfe)`0$$->9p#tv`T@yEyv@#CH`XeE)T=$n-?*_IA}?mkbDG9i}*=w z{^n^zEgBKB8=IcbyUDz$GHpBEq&9eiW7&p(%C*;B|2O z;Q5NKJz@84tAPGiQjAK)ZjSAN86_fa8wP*vCqL%PDHuY84WV5mK~V$Pa6WTqB;jA6 z-NQ;8oQ?sLXAcsTg}V41T4~VbtAdc1y9_-&r;vWfNV#v8$-CEQe;ks~g)=6T2(JC4mkqCn>=q23zaLOqHD`*PCw&sJDQlZy+idUT zDwzuP&PeLIZW2$yzz*QZMwaGD17}R|$Hj4>MapCV1)6z3Zx)bi(K#4SG_tN@Crwpw zOCZjV*En^g!utj*c=GMH-!cpH4jF}`Y)No;qQ{;sM8aibu|+@He&h#dfMGd@KJ!N- zasl8frD6EdgWId2IRbtagWwzqc(aU-W0L+TwaYd=|DGS5HG#a{3tI1*+xEf;Yn>6q zSOVrpz7*lais-$5zwcO0z%HU(%N~rSJ80;T;RCAIIX7p+L9qnvDN!T{@<L$6Gp|%3) zwN*x~Y_AUA?^Gej++meWRd2u`ll?l|ur{B{&|)?@wFVlFN=TNI&e^GK^QVhBxWV^s zA~*Q6ZTlD?1;gf=vqy545iFItJ@%1lgQLIUyB`4!09bj){BDYM(3r35)7_LR`gG=+ zd#rgd8i5eYZ(L7|B;=piGIaL1W%ng0j)yQ|gu>q9TzYWw9zFT(Z-0vv#0iZwO|CFs zm5C6f6)Y!t#j#OS$kv0zJBHu7TQc9Mp)J#KAB7?;ebt#2MF&<$XPn|~nMpR2a}c_JR8Vg+nbHQVmINfU!GsNcV4@(jea+= zD=poW;<56ZyIHn$^Xpx^xv5JJosoxj&7QKlk1cl?rVWQvhCPg)9cENm4-4&gw-Mr( z=ioiF5SGa`fU(brQrUFai4XBz+Ys|20lq=2WL22of+#gPF z8~TeUiXo9a>$fxc|49+uNWc5H=n>6(f%~DAIMs?tEjE^Enf<#_{VT6;?#IH;@4p|?U2;Dc`%7U zWF%>;u);ll9X9V<4N%V>G!HPcR<-O6&X&Qr>pX~`T$uvP!YoQi{8_ChH+12Gyy0@$ zIy5`WR^Q!yRmNEjo7dj_zO%On65j|VT@VyZYeu7mk#I&M5}nn7{i~lt%TJsAAL8#H zf2KO%KKAW5-?Z5O@yFW#zj|uQJ^kkECtv+0`Q|g#zwzh)aQpwp`NvF{Ki>yri~q-y zXJ0=F{Xf3__P4+KfBZdu_BtKPxkB57E=!Q z{P%yrzfYgwzfUpr0at1AqF67dDY>nidfsN`A}8~+X))Lh$M?x~nJ&%gVJ>uKhKBIT z4K#6p;SWpn$*;g=e9RbZhE9sX8)P&G_xr-|cED)@)tJiGJ1r(_aI3}!FvMMtViP(S z{7t7nAP;po^T}W-uO!VlL<$-yj*y{6|2Mg=I zyW!Aq+aJUjESIKQa&1R#vGpq_I;fH@1Qu`txggL@;}9nhW;PuIe#pcwnPpL%7IXB0 zsbb|OnO{{mDXD>J?6N7I<%Dm?{VK_2G(~TT1Y9W4HzerqkQ(vBX>#%A#pRF3=cmcp zMe_Fi%^%KAPEV4~@rC)@2rxgMUH+h5A%$Kj`%Ere;L5`Gff*x%lDu6`OD+8SA*o__|@_Er}*6)L+2ceaSMO^;S?X48jsEY zo?o85d2I-p&OCqf`tsa77?|FkU)qmcOY# z0zeykiZlj{pExgkO3`de`+1D~Xfz*iiHazYCF(5x8+TGyP7)DoW6IX9D=4;OJaKA~ zZo(GyR1dsQTosTF%f8|!rYgd)C~nuQP2;gCRQe&k;Lp(8$D29Rr?cg(=moh@;F*Hg zVONL|!~BhM7x+HQv-xx+Swgy#D!*2d$%dPQoA}bjWkg*0I1YQ1E@fmP!H#Mr;vj|1 zv=6tpQ_~82N=|x7BE2>rcq4-^ZC&!#K4^s#2?B|NT8Ccgz$=1XZOGG7kL0WbKsZoSU0q?;YckJvVPTIS{%A{Fw_LqhagiJ@dt3_ttq#=#dDRj@g1K6@;wLA=wJa6HSmDJ z3{-9sOrvUEq;rf>uj#eG5M#QE8x4St<%9iEG2upYqutzyld?n~z=tj@w-m5T!dvtz z7k1>PjyHnB`+Ifrjcs~Qz6jNX`=Gm|MzrV{5B+7AD(mjxl zFynw>61VBKH~bJ^7>vhg?R;9EF74Gz1q#w~;-t#HQ4>3Cflg zW)^}o91f=ff>NLnfRk&w*<%R3yKHR6{5tEK2Jg925C;cvWb^w37-R{cd;AHDcc5rJ zHOHa_2_blL(>Q8qrvV^hTSIP`sfDiC3OWg9#AsT)z}{1q^-hw zX+NhmmB?oVS#^UNHJdn*@8%Sx>`OG+#}uRtC&d7O7wXxZ+Kw^5p$vKtu2;zFp!R$w z!Wv%wS&;>N91eI_aJaWDM$};NPlC5$+4Q+USJ@uO$($kxgazk-LZa(?=?E1FID**EZvnqr>( zZ4VI(mURhB-AJ=27?_o2&YZk<;%m<7eo%{HF2Yh`L~0Pk7}!Ec(nZtyLRT?O$dmx} zEvx8SDWbZB5>RO38>t2~j~z-JE-@kMwl+miAJg? z+)M~(-;nfabun8Q;?|vl6$Irj%<|D^<=Nh`5Ij;vpw-VA-;EN^idG5ZOJ_E@otQSX z&Bmb*Xob=xBJ?gZBgH5QHbPxGLU~P5YIYV94Lm1-O#&vT+Wn48A{x~ zfEX-D;V`WX<509%h9uw0Df%wUGhdfI^c7})2T0T2=$h@82j9AMLGn?>p=YE+%I?X- z_0skc-mai;j@k~0Yk+_#_;yC*_id*;Gy;AauxrtpU=y1T3vwT&Kk9;F60BsOspdG~ zn+Ea2m`G@gq%8{7@HBxKsibDn)`T`VxHE8Jyv&7GH~|V2BvveBpSpT!6IsXwnKuqK z!Bmq+Pkf8KT0PNJG4ZkAWyw8Y+E^Dt>TyMvMMiorM~G)nfAASX3aO3f zhakkFjTMt0RW+R61exlyMoIwvz&p?Y~kzqj(c6(?ufC1h}MAjlvEh>i5 z4L>Dmsr)7{vjzBQVsdJ5DrBS3Y?q4Dj=9y`w(Dm+L+`YD6HX0nGK~uS{<9 zpZ)4&N%im~@nWa69$GKWRQEHq2V>pDN=BfO?`b7*Ro0GcO99dY>2TQR09(BYe!n)d z$Oh2}U(yUp!C6kSHb@%BE)1Qn-8_|-$r>eb3nm$~CP&6fxLc{{3;c_6%~5x61wB+Nd9XMFL%7V3#Hwi zsGCVHE@6jB)8CR6yF5z6=xhfEsCzhdfpix_1!m%1&Esos2~i279<=9d!2r4fy5)yF z`UG_Y4Bk{C{PL@;*oSt@2-(bgL>k`TmPTs2P^TzM^n9D0qOP8hl>~v7>IPIS#bK5NT7c@dGae( zC8)s9?8N;Xo+Y{1^Oy$UN@mJzu^%8?RBp92%r4)SyFeH`*}@r-3f+aHm67dtQ|JIj z*oMjx(=;ATS%F8rYkVjsCqmFBVFt|OQ^EdXT+j09pC$h({I`*c{AnchQVpgc z59HC)8ey|!k%qn&QmPnUr2@zN0mPhx^fvF@PWtJ_KHilgZv5phLEcWv?g_*1Jp`}? z0;c6yG_CZD?>t3H-2kXka~=H}Hso(@(+SGRag#YPSQty*@VnO(B{IGpzXaCs;EZR?XpA(o}5`0F(n_6+;Y8PjeKfi zthCaJ<_a@f=v}j1sv-X%3#@B??D|2pD3?w}=tEBF8wlH075aTnIuq9>v+~C81fwP6 z02ySlvQ)0FN7wegt3dIfZCgaYaflU(_7n#S)w)e~dZG>!xSe?gVJ z-L8ehKQUl(EPOo7uEnX=bn?%@usVXXGpQ+be7wb|t@!tNz<|c(0}Ff#5oquVDVPL| zxaSGxDen?8lVMK1Q@glA3T`puDSt$J>ME`}Rcgn&-F)3#^i^-v+kJ21Ok7lD37rP{ zx^?d{+KX6YHDw;Al`97G`HK3&9KQbvKYT*|(9I^m9`EBfykff*Dja#5ojiL^z63;a zrWZGepNw-+xnwsSJ*ND=?OzAHX};FZi~!jV_^Cn7KuP#C!S8f~``r`STC~jtJ|38o z?WFG!!ukT2%}|3sUV)h$8u=6cSO^W<_V3w#!dQc`P6EoF5NmE+XOl;2u~#{)dzB2{ z`f!?;6mckLASJsTt77tz-4y|}zt`30FvE>`9=@hL?GzU1_po^Udh_Oe@jSiVz*l<_ z)vT1=ZFq&>OD%PHt^C@D6Vg~F7KhyT_&&)(Gzrji0xrJGQ9oN0TagGR_mQ^bRf0N& zT{AYGkf?;6uao=xkB!=%q}N$A37k%lRr8ii`K>~|wsJYS^1=}}ml25t0rf;Oy~8i4rSx6p5=qThJDxW8pJtQGO1oUxMS4xV9tHi}A zXM}!o2w)E~(6Dy!NRBS9m z@OvlLI6}l(UmAnRTHznt2X!};Lxn{SV!iP%t>E7vx5GimV2wK{LDEssCNG3vn)LV!cJiOxl%gmyIMN&0oqQ2kasO%@tt9tgl}n}A z;jx58&oN2X6E_-EZccq(sO_*#$5V>1v)(^n4~tL2PH@zGVrnFgsrj;nnp-CR7X@GM zo8;#i1Jewi%b^wr_XVx;3b&mD>6d(zo6?H{QxU2kK;pg1cxfU|rJ}TlDeWi&)I60; zPt>9DUC!hLDv7vm`!77UZ-YIud2hvi^}9J-`Ej|s{|f0ij@ZZ+|Cf=jw3FO}c_Y)L z1QkD+EXm6}$#+y+XLR#L4THhg#b*>utpVjE%}(3-e?rXols z-yL-YC^vc_D}498Aq_#Ndv|Q4B<|>UUn*> zgjJ38gDcLc;4s`&EcHzl<;voPv3<=w=&ql(%s4ZTr=dmm}%v~T{Z{W*&Nz}ilD?_FQ z9GCqw=PsV0dq9xD#dcFMUXzUv;JYdXTo0LE-eWMJ&GVK^G%;@G&$;CUGHAk-P+-L* zo6~R=G3aO4lhp?AR}AO#Czj3AE-F#6T(tv^MpkjVl&J+0em3HGVpAptQ!h6!kIUB> zB%sG2b`ms~2zQCPJR!V9qX~J$>ZIF&-yD?kctsjG_J#3^?#(Qb&)6LsOYQf*3MWac zd};1QUe8f4-yf6rnT!;aUxupr@>eHdm`a(&sQk5HMqBNZ{*4ll$MF^7P&PW;c%k5W zF((;L31P(`_QfWO5GnR$DvURhhTh)6~nMQWY zwD3p~>=t}f9dR>CA>(b_4np%mdca5fxht@GgAeMC${cDcygEGgTT#8@IyZ;djgeBb zFyf)ld}cA%(j8%4rw!HKS@`$0*nz4x3CNYS5PqZg-q{87~l`^D^ zH!*ln(;8)1AsO1`s`7HL6r92Is_sNCK;kpO28E@kFVoiw1CSmD^joQpqz4C3R4(7p zJY4nUg_G-s330m+SIb$MRG#UKz?S4*q)x-{(r=CD0`VmSW5I83fUtVb;8Y+zL$m2c z=>ii$v320Z0dJPZmkDSx%jx6pA|ZxQ0s+uA<)g-sKdvhGWC^F@-xGAFDD+zr$INuX zJKnRe=o2EZR+Xb#s+RH)7keKzNf(=F*Y2tIR@T7rO^-~>bY07Y6oR{E(-<^HnPbtZ z#yV7psivhBHf}>cF5UE09wI34S~Lw062<0?U-LC|a>gg2K@KIyG!Z0}xix#v%S=a2 zM+Q#9cjoGWLI!nYeVN`;e_1(igtxTy%W^$XQW}+3D*n{c)zeO)W-($Qt@8BmE#~ne zTq`4@?lMHFD;s60i%+6?6|t2rH`0^^DV=v=KD_HkrEwVxgjYN200sg)M@u> z{(x2vgA<-|7{$n2FT?#JTP6CNI#Sl0w<2YOq}juA!u8latIV<d)KO8v2s8>( z|KRqY+p-E4m%;rZESUOrvk~g(6!|*iuECl2FfO`k75hV5gx{_WJ{@&<=Ue!Mg0EgF zgVnM+?=W^91_$XGyN5@{k3+l{&dqpJ8D8wKvs7-HAORF^3#jN4PzP{5!xs?h7u&c) ze$B(}i#Xx5axuvMww;<+qB}Cc^|m9MPabs{I+zxa_vAEQcP;Dd9o4U)L0QA6V)PeM zU;@r@+5rvme4u)`H@vAkTv;GpIoJ*u@R-UEib~bLL&A$#T{-ahNQ+mfe&L(5FF=5| zyTo8w_Gy-Z<2z4(XI8qS*Lwi4K8vqLe+2BDc1p%c3;YubI^xq*&>!(Vcp|2E&Ga># ztfw*vqot<=LTxDY@30xj47yjv8gMpWBCe|)nwJ3%rB!1qcIa>6F{A?at#1=eu*j&< zE*9ACZySnz^BX3Nx8~FmzRj?<9OLK2$<6vyjtj5fZT_u{VhP7N#7QeQ5^uz(J|hY& zGIT>dTe-?wc}m2%=;E|i=wRR5J4WFa1y>0}DEe{4Ar|U2K8k*6xE(zP{v)@3Vepdc zS%Y;6bkbO_30#jAHM8k={7pvr(ZA;FXk3x2kss^!yzC2kNb@%fwLXsajQ0hin2Yqj6Pxt-*7~JIu-^~-M$a3#D?s<$ zTgJnxW+lXv-`@~!IgaUhUEzcV((gEY~KkjaB5cDHIg`7`r=v}h(hYdNoqjf zYE;*Vf%YE=I{%zvMN{f?&ML+~oUb0$Cn{=u+8s8P450>lWGf2WMlhFr9GY)LbmHl6 zK~)ART|&TE%v{%1-;M%~*6J1K8N2Qlp9IoEBk=2n@{oxqB>(7Ynz6Kc1ezh@fwvi7 zo7fS#WxmjFHyT^>Ll?nZjJ;UIgyGe{c(g209rMv!cv{)#`z^a4mU8rS%HJ8U%&mUD ztSxF0>)MzF*Dr209B3i*4w3LDoEKyx1{E<2&#?FPoSB2hTdY^qi|mUW8rl6AD}?1lkk1viFL=0? zLNzdO$lbkE?y%GjVd2Q}Y?2smF&6<f% zW+M|nX&S8zT&&7Wj1WuLT-=n5xlUq6af*2Y-A=^@GRLJuDu z*Im)^SyuGA7I>`&ujswtB-Dn(sJ2Q8IE zHC;6z`dfw5W@9gd3C1Mn)oOvDrlzxBbB+rd_5&7b`59Ti+EY!lAyJ&uR=^!Q0E$lUDl2gNHCrAQ^Poy@W9gMR|mDZH5KxVd687;dfsia3O`C(&zh`O zvOCR(n3W4$z>k_vy3u3B`bLv7)5Smjd;P>pi8P<{(JsIC#fv`vR%minjIssY4-5h4 zADib(d?R1SN;QIV|emLm{EFT~4 zcwz>2b@2I*-aQD?XvRbM4}0>c$3s#dcOAt7215XHZyfANcZXzAc`l6a@vVs zgb+|5htx_qcvTdElez7q?6-~c=p?XiBa_YGo9G9M$N@t6vA)d+kDN9n^zBPhbqv+E z#&E;XU=x}6?BFEjZjEs3c(nM77@29Z;mI7R+qlJ}D0@5LD8DUoa5Bk&L6s_~c40>r zTLjt{9ZPYP-?|E|7gw6xh8N$anoynLT5le^x(rxEswiRMpH|M_C0jcK+3_WPw_!R@ z!ihORT;|c;J>fW*b&H0LS6A#3wjjxt7<6SQi!MI9jVpX8``c0r@~Zl`mz>|&(qP!3 z4J9LBW?gF=sk`aWBzMXNH`a~U)*F5Y0Nd1kir%E|k2(D1_*UMrh16(frg^wW>(IJc za@7xh;@?~MTz!<_(@_f3t^I44$sa!srt<{Wd6`cQBCXCLA(4?+H)+wfM2hX^9PhmmtHO ziAZD+#<@t6Rb?lVaq5I#=mNo$Q_A@={~aiGb_)0jZ*c%4;J0x!a{UpX$DqdR{TOv~ zomE!yUFD~1p5VeV(anmlF}x1-v~LMTzMkV3_I5BH-U(2 zIu#C1+|;X0wf!@7L0!$>HKSCy?K2;&r;Dt$Gzc7mM}SRDwM_75w9BB@`F&qPdq*~a zh^>UV)cS?lsmX&rV^1j79|rbDx_F$!jnH>yT}#(D_N46vOZNM>&&N*OY}Ul@8CPd^ix1#8$xv2r9GVIq-9G%xW{7foQ*XB7;x>fby~~j@!?`x z;d`QaPcUV#&JnR1Ho;5AYjq9z90~(bVqs&K&(giQa(P!4@d)1x$)>5%;C|YwZb@~X zF>qP(eyXFeUPg`y5enOI95FNZFI>;{gP?0hG%g4X*(V>3hpf{R%+&+M>HJGZE#-G{ z_5pAaW?_6NojNVPtDHhSqSzK|S;5GJWv#?pxaI_s<*P!;u{ruz4Lwi3=z#Y!K_7sE+}>IzvH!h;RJDveI_rR7=$|p;*ddsViug~I6jikQUxZv_Q+ah zZr_;npbkfWsi%pIgy!Tw0d&De`;aum7;t^qj(*tMDEfYjzc@U0B=skPl{DCe-GbUB z*OV@jVX21T&vgD$PpOVsy_kzCWr)O7f}5<5OKRTnr+$Q?riH2i2RZ|K5UYMcOOd^i zyhXPvhSRz{sXB_B7AE%Ou($q`OA%rv<8>sruFjw<=4M!mr5O_xE!i%uSamL9;7gVy zwwzb)*1s{eeQzoPB?gDW?~c()rK|IiuU9%d7;i!?&Io9Nlo-`P1lZFF3KN^&Ydx$> z_!&drQEO8Fh9cgT#Of07L$Xn{3^9`#!hQ=Nz}eJl9Bg z6iw9q?v6+tQwQxU&*dE%T}_mXH-7BUdeAr`ZeWEC@2+t5N#lk0E=6-9)>4k#>f`de zin)Trhe=;^H9Hx`g>DG;k_FJ#7!8zg)&iM*AnrQ?EmupKB^tR&17|i^*#S0UDCNga zy5}U*u_%-Aj2MN~tBs2+h_go1x2U%9bG8^uP z+l~%iGH^&k@)<^Aq3aYy3vgnQ^D%Q-&H$m}tbo=prWnwNy@Fm`p(K z=ed+4HUaAfX^M{xULME97JMv1t1Q&R^f8K%Zuh`DrJ`et`7cBv5fvs;t2_NbDNIpG zX`PqG&G(FXZGhP{4E@XS=#~quM?BD~(bUtAI2GAjC--rUPvY~u=!IlA#;4L%1298Q zcLWlcZL?iL0qj1K3;LpNNxWNC9~oMML+$og%vm+X?0wA|DnuD^Z=G-BU*Sbj|CrTc zaU0gx)Uy z4S+*wzWCG77A*tGkp*AuvZ2yoIYZHtrajtqJ$-PXKW~wTg%qyrm7hdLn`%57;Cti0 zWg-CIHz>$#lh2>gY2XaPxqUormBktLtS0*-ZZ~|sdXZZl`ZgeS35`w;yc=&LXFSW{ zN8bCiO>iut;6}UOk0_4d-PD) zOGIWr4juzU1vN8{SHR{0Noj7|*)*EWN(zAV2wG+Z(!lN_4mP#1)3IhN%s8s#cI>rM5{^X6bJ6wAj=@5Kwt;8j47Y$wbZd^%`e_pG(99N^&^rUs+1O z#(h;+n~2q=eWU}Yew$|6(dP%Z513EPRDz&*jyc(`Z;1YlLc&TPlWhs+&JW1+}j;gFjy!MG>Cc)Pogb^cx|>aSO_y)C! z%5>Ogr3%~-=?s!(Nyc}Y+&S&kwW>A}A@FOj@s&<$AQFzyRHm_k+sbI0q??!tSPM8w ze&~!uXQl|?uB7FnbymCwYe&APM^F>s9w0#+^-N_f)&BW*N)%Hfd1h>6+hr`n3OwKe z>ol{An16gj%`im&48owCHljHC((L`~k5GXVewQS^t?j**J5g9J9}P$NL;tg6P{pBD z7IA7MOR25sA$k`>)A>>4BPi8Z@uAO_l6V0cFDg|-M}Z1Qg&@LW-N^~A0#8vB?>UDi zP7QYhaGqO09$q-BBJ6M@Ee2h)oxggL^()wBxd=YXw#qPba%~5x>6#o;0fNo&rm6p& z1pa}CzYhnR8arG8*2*uyzk71#kR(GByK9e4nLD>xFG!B*hYJT=x>=rfL^?E&APWE! z&__p0oLkF$XR||Wtgwq3mjTtaB_!qEh_!bxrLIQE{>E89|K;2^W8sazfc#zrWSC%w zAnjeq+TsVUNHeorV`&iks<|-nI!TH*EOB0~yC1Q>#YkgEx8Io4YES9BKkqm!pPW&} zIyQi~*|47yjJl9M8F*Tq*F8k^T;>hCi`Wj;EKl54YG|kxjLIX9F*wFa?~gRU0FS+u z3$QvO=prT=>_#3?zIF|}c4)q;QrpZ_TA#-2H*(+QLh1b%Hs!rdd341fpqbL?IT#Z4nfzpt}uPj zRbDy#DKGMX@k?t1^_;leB5tBXVI=_3Wlsoh+jo8!9+{HBJ+c-Wc06h$7jGqw9Ym}N z)lOhJ&2Hpcs<4(M@Yz%jx-1W@kz|tH*(zKop(IOII^$+k)Jd1X!y{AV#J<%LrCM?} zSM7Ar-b}2kqB#wEj0zp&yk1MqM>DtLYZkkUUSEw)N#lQVRGAxh`wnF(B0O)lUu$Qf@J;NJGjapz zpv5-D%)ZJWCetc4|2;gZ%0l&oQrE*yLCO>+5}AsJzm6P^zKX-fN;>>By*NP-1mX}f z%zhutxB+WVRGj<)IvdqxQ8Z)9KWU?1*YZuY;FI(xY_ZE&U=gMipaDeL$h;~tT1Lq% zN)@Z9#ZA$U5sRa8Ri?ph2}31LK>MxcD#aK=y=)`&8 zxr{o%5Y<#PQXJ%@s}G>Jhbs@MCts(MhnKSmE#Yj4O+{m8sJQQ+&-1y{L1Cii`gtX_ z+dy^7kp+hmPLuIN!l8xS1UDfEF$q=N{+ixf6O_^1o!5<4NC)<#$t5_l3yjF(AlJH< zGZ|j5W>zx^_~b5I7s#Q{*ad4Qg7_MnI2)TY8tH|6IHjQz7{&~zEO!VZ6dC+J75oYE zn6Pb~RE>;D7>l+vj}33svJ1@35*M6$WN?#y?k%%(qGCz`ei*BCr6+XS@HB z{%lZkQxOR*UmNlmc34Q#=REMy6BaXmU{1$X-$~|sk$FQRAO!)cdB~@?0gG?DR;DEO zlVdgp!@Asow&sL0T%lgZ~&)9kI!#=2_6Qc!Y>AmOxI8Y&}oEATkYA z9_NcU$vRGfXG*vSjKR>&3VAJuf!v#O# zc#nV6zKN`Q=TsUPMxCqRT4Gkv@!!4I=OBz5JJ)px$gJANo}JHl67!SaPJsXmmW;iJ zWMfwOwl%qwUcqVb-TXD@>WWEYqpcl2=^C%-yU>L2vk+Ut-ocI>Ho;ECk<>uHiQ@U_ z%O8C@Z-n*`#Torp!W^BB6jwkYc2o*oN$Fe>5z^AN54j2|-tjv>m{VP5xC^eY;Rf4F z{)*ENwzNHxuMG71wyBO=5$pzA#A-TQJqxf<*NI3{buJ+bm+Ym zQx0?Jb+%FfQlH7oZ(FoB+A=jT#~idLU71(&?4E4_w>!Li@fX8Iw+3I1UacvIhx2-K z&u8mtXxV8=*`WO6*)OrjAJyvy9)+2GZ>|;`F8G^~MwNK|sG_#lVafmLDM#3Ca=q;N z^P65we%O=DrhEO)k>EEHoh3w!J6QW%5^}?-HsW7!#_{Ann=ERWgA(GJnP8=6Y+5=! z^F(vh-7Em8&w}qq+&lbDy^`w6aky_@m+h1Z_Y=Q9{9nD}HeN%L4X4o~>bni-llh89 zrRd>(sf~`RmQKAI=rnihnn&WeP@NuJ3hjUp>2s8%;S%14+UQud?$eHqb*-be+N-ud z2PSt2*ZcN*9Z`dw65o4W^|+(St+gvZABapTLCMZCFKtk z{{LQ=Thd0uGVGw)@zs=foRxz&S$_thEUbc%ybyUI490D)7ZtoW*K$l96SlPuYR2E} zF$rXqSML*FIV6aic-ltgH}Luc3OMn9tbdbqH%2O(+>thektg2}`e;C$Scg(@zyJ?f zf&A1|El<|I!-S&Yo1mUEiVmTTOX+_z|K77OPk4gehGbe+W<#+VUWm4fwq8}vf*z== zK$@ji+QTy%cp1Rf6OK=H+d8t-P!fth2Db#kZWn36yM8}ap-|8sgk&&YM_1m0E~4aK z^{bgg3{O#n5~K7!4+^C$3e7Uf9ZbKx8v=j~%*m0l}@tz+v!Bh|)DPY*3U z_{~esNj6{a;Fr&*=pZKuWQ<({&LOX*T(fS)zyrRC#^l^X3&CgX5u5N7?N}7!E9hVs z{}i(Kh%RQmEa?Cl77W?OZKhDZ_f1mg5=qk>`1Z?AKM0n%pBB7-qZ19&K?<$5=Gnr+kX>0C;nMuRWqf4Y~uqh9L6&H zG%MYU&+5T6J64D4kt?4NP9%>j9R+0F9ns^m^k27*trGAHg^^yTubX3kyrI2u%!_Tk zeUETiBLRPE#w{vdF0FHd-iW-XX$-&u&aLd|s;%B%upg+zF=4;n_+~IsHyN;GZ$2>G zMS%XI0gP~CaXjoD9U1BC^NknO1wA`2+}%V-$w z%X`NmWO^~wH8p`arZI;lglc;d%70b+Ud=0`Tf7&iuPG9-{Ns{vKS$nu2hX;Y&h_p& zfByuTjLzJe(a)f;u`5fMr|xtlxv%%AKqZo6@(7LPwd zL1a7GR+CfN*-@l|-IIke&Jzp$6gn@2`-M5wjkJ7A{1nH7Hn|#Q$Cw8FNG{Es5L{Ab z>Z@{6kNu-`ggqj7ye~ARA^Ok&l;j19l=CqC{h~G_h~<-`IhYxO1o` zID7E4WSnRC%O+)ztKSDv#R#`?bbRyhezF1ZWK5V(X}_D>>+9rVm*-+uGwApBfB6{g zth{(yy||h#tlZ#}RrqW4BgGKF74VMY>-9DFbbz<@_BFdD_&eYK@38sfa83qg`RRe@ z!oQ;A_&>y{^wQujwfE2C+x03DH+O*h*Ee;*>(~PKxXJbqVIbG2WZ9T0p#L8aaz5xz569eEZ1HsY)jN+1+_WAnLxEZZ~Z!rJb zzw^W_uj8jIA7_Mbg?-b*44>S=jTCzDx^3vnh+TF}f}4ZEPYRi>bvH^8F|OZahJ%l~2Yg2~1}3CHZm z`5#Ou44Cb-pszj4@LgWiRT%i~3_nyqbJqt@VwHPR4|7|a;0n;zn|&YmxUU}x8U7N_cC z3*HXyfRD{ewbejK&SmAThJ1Y_p+dXM>Cn1v*@YQ`^Pgy9;uu7K2FReCyku#RRmMLQ zo4YocQxgNr4Tz&J;|a-+lw{p#@Q__=K}$p`GPk!&>o(!_^w5W=!?;=VRT7K>bG65yUL8A;5;hvG-+M&nx{E*K;h z=!`nDp)@JfTd~XE06?!7_q^#tM^dZhp0fkiB$z3t=@rR$w{2m-_j~<+pdw%Ahi3P{ z`)1|8ud8c@fzPX&!9REH^L_p=)jxN44@Xyj4fL*Wh?kI|Q#F9Syi?QM{ zRhRF({W-YrUaYy~D)+5PQ=IMK=V$I6QNgi;aFIWxM{}18FRm>FFUXBZ-?*pW(w*de ztaf?QJUOqQS~N^6{>wGBh_7Dk)g~xJ&!fw*`ZCVezQz-V6bU`96JZ{`uKU zkI>ZqCBO`yr51de7DE221WBcoc=X&FG&&P=#Tx(U?-1rgcjd46F7#y?+f1nZz>$Y! z67ly7#ADR}wRX(+(KLF(xQy3cUJ@>&x7WIm1qMbBuIn)Y@08idJkkK8_jFIBCM{6> z{5_CIp8n_63wxUi&x9P$@>GjnhR@?7)7=Q6AX5s{ZQ1xwp^(qyFG0fqs#8KL1P*&O zF-#oXpw>ZUn8c!6YQ2R916*C4hq)7ubwB?ii;Dl@$saeU06~7Zptgjp`2h(Bg3=JGTHtBXW#q@83MM8aI$B`<@W#Vs$yHLfF|CE6#?m2( zJoJ3_Fqinmd1#a`T-tS*ruTv57@{D^2}L2=mc;m}&}c_)8ScWxqEQKO#Cmqt?pKnC zvY^oS!9^kXdNrYWBT)9D;Oh9(A7qRhiX6sCgpi<47s9UE|2Do8F6WFOFcP?L|Qz!#=2dNIwA&R7bJq8)2aE(&odTGicJ<=}u&Z=_m>P zHV1nMs#gTw1y=7uP3K)t$i($e915+MNm2|;XZ-tfAp*D0oGGHpvd^5ukDqbO0{LqG z-dkwgPbl|@D{RC`A(N>e{lI@ylWN>4{SdYWTruHblc7RTO@^aqX@De+LdtYHv1 zT`J^p5wOlAzM^?lw@-_~LKcH$nC#f0g9I`*2rG(L0mxn-T{4hbqa9)eE=yoGY?veR zQzN=TW>|!C6?qJ|FN=M$a=zntAHH*#A-$W<6GYtQ{RX-J86n0pL;%aBVzJs0BoN3$ zPj=4{WEUSz9?pkLW(WWyI5Lre^ZBJkar*bEJ&VFR!XanRwh4Z&q=fgz8NExQE_*+= z`riROzbk4d`P#6BQ_<1I@=NZuJ`bM*<*c?Y59-%=0x8*xe~WH2n{tBvk8BqTDbe`9foVCdIt zJpRR`DZz-(F=Lauu*<;CG=|2sT!imHbyX4?7V_I+QdS=EzHE+ZL&Objw?1?afTcAJ zTbi6mYXS!Y%3}&h)Cj*I7py{gqh4t23HDhx{6Iv8JwU+k>CO4E|5c!~se~8L<99B5 zOIT0bxe&!@P!$@#^@-8(%gyU4n%U>f(%Mw$pq8q0)Wm*HbKFa9t? z`B(Yxd)Ua;ve7h2GHE;l_T!b{?T0cao;u_Ue8s>>5!xDfZ50W85N*qj7K^o!M!sE=J zz0*$;uyJK_@S!nI6RwmP(8gb>mQqSrX zi>=FGjg1q%6f_b{1XQWF2UZvD)VrHi>w?`=ql#(Y6PX{PKWyDDHE330vMV+(q_l=&I4A{2e|cGd zwU)AZ3J~N`B0X)yAsBIz6R&EQQ_U7mO@Sui4DqDZ7|0U240A)3RtA1RIQcQuPcI-Ic&NJISd41P+PJ%}^vl4XPZmOYJ-cD7OB;ijxq6vRTqolQ46} z40gk`IDP?~kwpt(W2h9l5oz0*jQmpd$1ri%9~!QBkbN#AkuDyfHweDX8Bv&mEMb!- z7n9S8aOhH`WZuT}0E++YV@?@L)DxoGmn9?dqHn)mYa zaPmYTq?M#pAT)5gRA?#B?5G%`;18Sa%ydUCA&I2Zx_9cbBKCB2Zp?6cKTN){$()&E zMtEH?5v*}(;Y(t7+>bv#4Vv#x*bh*?w9t0(x|+$AIsI{(cgPQH(4-CZzgk#jbFa~{ z+FonU43@gH%b;qo9DHU%C*zFc`KhGdd&4n{n>8I36nxeF#RCoF_wQaM>>9xX_yoK8 z*d+&d31d50@PBt~2kk?}!@~JE1Ib?uX$=y-W#);scIm4J2_^~N_usv~7z{P^FnyQ5 zu{3AiFpp<uO4ZG?mlNundpObx;?&3$k_^6C<38N?3#*4Z={+5{*?`=2(yPIJ!l}i_^b3 zb`Od@)giKZZwj5p|A2W|@ZAkLr-&GgcdLfPgvPjkiKPjGg@jrE9eVfEM%jg;)~H3R z9uEw?8?B)*ky$E@dlGJ$+nG3{`CFEsS-hbuS;1|!{mZRoTC!OAcVAyn_pd0$PSXf+7gVa#_h-Hd7&&nf++Cx!9fUrm`%=M1AQl|nP)0X^;{%{#()o&ihOm&t() zc&vLFuO9=O8_UZ&X=CGHs~V-fj-pOvP_v_k-h_OHrCX5S4w-f<(sED?3Vi4%i;&VDTU^0=I+v6~CtHQs{(%%%yL#8Gx~=FN8GXO~@V;bh>)gO(37b zPt+U^`rAv$Lvy=M>c-G%nu9h|g0)$!uhTciv*DSLN;bU)*pZP^&Nc+v-Ce^!#tEA+ zC{FF)gkO14*Q@=Io^b_);xd_Pt*X7Rs{s>lfS&`wg>Tt44y8Gcj!0{(rSd%l9iIga zVfnW$*u%>|Y`<7_ukT6>6D$yeP2wQ}2Px@OXwS6)-#c{8HlgQbn2AIaZJ}UyYaJ&R zp<>%-T4+S6=hRuONM)=s7l4MBWN|0DKQbANlKJCTf&&qMF^{&nBi^G(B`YafHSlXS z`1e4~F7cehe;I|o3&dUSQ<7b;?-O((JqG#=>0x_@VKKb*B=1;(&ZFD2a*M?bZ)2j5 zRyg|*bps$BEV0|-_kuL{|7cXU-8X7+bVi#u zH1OB&i~Q`LGPzchu6M4h^qjVHqS4x0m)j$;qZZ>lxfI)%ywg#*n@QJsAg;66dqt<92qsSS3DNbM|< zQu^>MM2SsZLwt8k7~GGqc*0;Rx{pY1+Na~c7Y(2LE=d5`kbSHwk_yRsA14ol73XH0 z!?_PiD-uZH-xBZBmPGU)*#kLJH_@)2Kl=#022Vq++qJMU!tv7>aR6}w4&MPWM!5Gm zfHBx^!R$SV?hf0B8MvHhsULULGyq^!bG2hQ?_I zWF6th`*E$fU=vi1{7Z(3S>N!eOP_gdZaaM%AaFCy#D~M@N8(TPH*J^BT;TiYBjArr zgW4QtJsSHK61H=oe+FZBnx%%sFqQ=?s3`)^V4)yBpI{IkGxx5Wz8Am=v1S{!1l%*z zS2WgLn8?%PTpkiF!I-~y&fG)U6DgLJ&3?<%CBe0oO8(?9z%0t&4q_ru4QBH96yn-mO0_Oz!8YD96B=&WZ58WRLc(kBw*0%WLF~B|U$jbfMsRxqo)I z8GK`HVOo}tajt#KBHKzMH7yacNn`uyfz8m=%g+tF)f}SC{EORsdW9*u@0CdP@4HBR z`M769Y?$G$G^nL&aAK%t7GUq5g%{A=50skVwl#l_ zg}yJ_gKLtl+GGNMG($&=%pcK*1AjE}zAZc}aG%g)E6BOo;pTfUE6cadL82`dlLh|$ z!UoQ&3yx=wnt?{&9xKs#Kkx~bbb_>{fzE9>T!GA9L^#w)D2u?GW2G2f;7qo5W8q$Us5$&T;{jrBDn>WRM^%1>ZGr*mpI!Fcx;R3(uZu#S3g^zU@;$^9|+> z{UnC7eJ#_PH~W|ncoxt1eH^dkdc^t|;|3yt{RX;GgCFj(k}RW1%sDvDbA@b>x1cA{ z8hJ5+Z?vu(3&kAw(47AF`UWM3p#7KCU%#Pj*cT?Bl+*J=6;h#Rw2rEbWpML88mkqF~`}2%24`qaNmJ^FQ*}TK6 z^HVV(`KBME23DQ6xvJPvM>q$2B*_(--31ZF$puB$I!!db`;ezz(B41-_8euMU0zI4 zL~FqI`3HgSh@8h0GzArvxWtmT>+0ueX^lesS%)kstdEhfuc@nXsfCn_Y!jQzk@KluL z1|9Z2Kn?vvdc;~a9b z9*ri8)o6rQsL|-g$nxe7839RfG`W{7bwh?d<|uQ)pMcPZ6vOR{8P|)kJ;T4k6b*2J z450Y~)0MwTAJ{D!oUI|~g#jQiM1tKe4U3q8dzSgHVG>o2N$S{XG)tG2N_C4)orJ~N zW$506oVE-nIisym-%D#N>%M5gycukw98HQfSQ2#Z)>x)3yl$$%zFFlw!_XjxBiS>R5pl zMlSZyau;SNDm!lv%$WC5H>Z)`_=x~w|cKzWmTDga4B_h0jmss22}u&4=QwQ$}f2?(UYa)S<# z{-sVxofzKkt_jRAV3t3ag*{bi#|U(Yq`yslaO|11{i689egvJEe*(RE3qf+?=AuOO z2qHiu_Tz0;`V-t@gnnqdx3;<7-01qVw0bL*u-SF%Un1FDZe3*TzN};j2R@$czO}^0WbTKut zq3i_3FX6Gur?3X>?rce?tEvnN60Gl%g2om`V%Ul(j&wa{aUT4M4l)n z9m>}cNyf-tQS36zJ!v+UhZY09_e6y@m?fTM{k$FId?Xl7O;tX<$05VlaiLFQr9Ak? z;d6LPS!%krims?Dwia}=sPS~s#@l$|IIBNXV z^;2jD-P|&J0b3(n+}RM47ZOTcFLcgOqvUkdt{bES5gmZl^m%pCLyDyBi@Oa9fRYlC zpT{%cxN&W1MC{rspK#rguUMJczMcVV1RR_&2$fVS6mCwT=H&fkrp{_zkw`$o-+_4D zG}0BJ7WT$AjR6L*!4-Fw6R>cDOgfc%)IsLJ76cOo1K~(W4frY%!hoa7-ApJBcG1Th zxanm&uL^gsqY!;_;|{2x>zaTkQ9Y*m9%G})gFb?4lQSE^n*?N7rk-&6)@bJzh>l#< zaF&y-9uNT}L%5~i;h8m?AqF#u`^y9H2cYPD#N&fgzRahrX$i=1XDrTxiV5Zg$|@a1 zw)536z*}6l)TYOnU8n&s@%aZYN}wT|ftPPnYvpR>1>Roi_KG7crmRr+#s|+Q^Jr|2 zX3rE&-{j0OZXEPwb03(9<`gz~UtK`@A-SR&W<<_@?D;M=8?-Zv=8&gN5EuaU zKNo9?#8!C%$pUbe8#%djirqS3)3uugg=BDf_Ucqk00N!*7$_5tZHz^9|df zWlL}%o-Sp(#eI1B`uCAO*s0sGwNh0Q`GFu<3B0X#%*m!0spna!>Uz3Sfbc~9p{>ie zT!OP3rcQ|_zoS>lkNP6-jP$4)m(G`K{bG#1rtoE!Q`)b&@6T6#T z)TY7TZN5`kiO{Z|NS-NnEg3h)9wCBZ4i&gqVAwWZGbW6079(c$Qi@E=pI|;xE71q;H%#~$P)crWGZ0HxN$=C`8;VkWt@J)TrJlD-- zb1Kz}FdsHJpLyglz81G-)1U8oReIJcu}$M?5^?*{B89666r$D~6;=gMHuxJI(}2pI?2f$4D&bZG?%EVCLYDMDzf z8b-V3?{U)hjm?>~vOY;WE1LX#6uYHJG?N;*Kt)!7&3jP>w{?3vQCAa=Y=<2&qAkaq zYE@=4^aCLmnnbKgM-dBCppv$Ke9yU^K|Ra5b?ww(rIkvY()e|6T_Jk9C6sC@rti>b zoWh!#OWPJ(tayz>M6_N>9N%C(b-Vag)R%?%kJm7L=f5geQ1`o!08l^O-q` zO1Q39d+WG)?fb?VN@~FrmX#LgJ2i+6XS(!zrT_-wORQV5d^EsO*rWq z!3;E#X@<1mBxO@iPV!RiCe|KZxmr;SMNDe9&HQS8qZwWE;=WXVhoLUnnUQ;Z6!?>F z5F5G`p`>a<--Ibh_!O2jpN~r1$9?7g5t#$0E(?WJ*Rl=!WSwY$M z5C(mJXks?O51<;1_)_Sh9JJixF+49XZF=p<3X1Eu8xN`WDO=-*>^pd68?;^T9VJQ5 z6!92S*QfZm-axvOyJaZ!kB@K{o6`r_0_Vl}soObVc^o8g*j2YjkX%l%x{h9?bI<{C zZXNowDH4FUor;|7f#Slsbc)Y0>OibhXW&>Tj-3Ywa8)71wn@EOxdXtHuVWO z9}_Y>Sl94!fa?)5j@@VMdjNYY`^!25?Cb%-v|<7!>9RuH5CXAO z*-ph#amm*|%Ff9?!0bv!G;+!&l2taJ4<#0|$MgZLfT>8H0uC!xvw(?|hgN|0<6bGq zKi~`QoZ`Y3$eNb`>dUax5tUZ-cd-nUMm?r0uo^dvIxZsx&~4VU$~vlKsMtdGMF@Tv zr)%FBQ7J7qnt>7H9!#~Ee~+*h#VjcI0?^TtM`uMICQ073nP!M18j-57Kv4Wg5852= zroL?)wqJ^?D!oJ0RlLA9>ICQw+#(icpYuzRlxbBgsGs1)W$Fm%1j-H4zAPl;EMA3{ z89PL0>2%OYb8pF5p6Hz0&D}c77%UP~&^bs;9_D}yU*Xc4-F7|fdI3hhE|T=dv;`Xs z8!8|YjHYP?kC5B07#D721B!q1mkuhR1Jz34v;aR=ot^xPD~nAEosGEMwgaT)Tdyd5n|)Jf2O0oKApcHtCzn6|Phmc(w_%LH`lD z0JUA$A9!lE$ZL$8V3r@yK)B&v203g@ z;lI>FfZC-98=JXPr^gsWM$cvFl2iZ$vCMHxe3DJow&9QtSDNdJzt%tjw%dZ6W?(-s zJvU!?izrRrEpxOjWJ@Y?N(Y3cbK#OOad3sQy884`Nz(vrmJE+JBa!Ob>dVfVUR(?c^d-%!^h}bfOQsIm|6D)#lIGseOcLx-Ew_uC5i1=9MQ)K)-yBt+oGr9Dk z(3sV9Cc~9-M!BZ5 zli@cLHkTn{ojQLnDQexUR9%aXNEiBvzD+ZcY_ZBqG0>P{q8LZe5m3Z~g}k>71l_!a zYGo?2Eu;~USEa(c^l+QYo^u3YTzp&wT-eO=b7J&?t(aDhC3459UwG9JIoM#cysRKI z)p1p=7n!6kSC^`#i;HtN(01#QUQ7|!z%rX_zZZ0!)jnm>lf;ia^7no$qJu(`g){a# zw$eVUeaOc}aa!0gkInY!G{o2G(0)ygb$wZ8ccW##oE4iRHM|jV-zY+4GoFsnNqGd> z2*}tjXse8P4{0#W$GcV3N;9PP%PI0=&Eh%&>JH7HN%nWPcWJT{fJvRQ^;q9}T;|0E>(usWm7X#L4}Bq;O-#7(;=%## z?(ypO759c`k3`4sx;!$^>eL-;{x|&J>FAXda>tj8aTlnDik%(mlHr{p;UIa?sDNz9 z%RB!d=D>Z7`kwgt`@nais&MS)q!xu-@FnEG0v>GP8gk*Q=p=90E#o1hDFhRuaoQ?j z^n7v^?xuTP2psUf&2mlxAtjCz#(gvM<+95VJZQDD#6)ZbIwbULLL>#}K9jklSXxONi>Y8fQVH@eAP*i<(V`M1kv4y? zO9*2I2?)f5I~f|D3kp_&o*3!!Kj?^#{ZA{@+Hy7H-h6 z-uyd|z_LCC=bgXpdny@zZ53AAKgXi}_twvR(>xT7q?Yz`>ngvYb}AIsTh%-CTZ;J~ z+vH$dzP9Cp&u@3AnF{$Imf6zXEQ|Sc0FqVoYc6TaI91{f;}sb$o7Z#uOf^%03K4To zE;rW=1^kLRMbefQb1BB*hIbDS%V>~~E@4#{CYWar-7(wBsWvznvZB|0^c=DsdgHhC<0YeELdfen&X zddC$;KEREaF8C@@D#f{Qw8|OyphzW7v&yv(yyy$(0?zH>T!}%`k(kXiDrhAsc!uWz zG*2tM!Gi#Fy2>lcId_mtod4pyC<0eGe2jOBx?E4{7^FHDS)?CF(=wl-?+JB#WS<4K zT$*VLXqYa^L^gx2ZBPh(SVUq@HsG7kN1la{?uXRM6Hq#Z?nCegI=W8bZBNtcQ#!p! zhr|!hFWx9Apes5C#=fiDUXGNnm?`KQU@C-ZK3mQT!qf3ws6ds`^4gty=4R~Aa5Rc7 zz7iVP6L!)!-OYoQA+7MgH6pS69X3O+wg@d=9zg@tF+=1#FsL1$<9mK>+6k|*!nkDT ze;WQ^3jTXXt8lzQ9I#GjRA2^yqbGr`T!I8P#c`pAg4}%_Q#hOXl#%bCY6fu7;I@My z^b=P>2W~x`M0X68k@cj7*FcMonxoNBvvK;_R^S9Xb-a+L5to5H=uX_t&EC4e&!H`p znlrD)OaN3r^sA{S^p`S3*vKpbAK2&@P8=UT2_b#_ROUcvKwcu+qZ$BhX0e=Jx~6H2 zS5ed2XGX7R;O|%X!H!;VH97jG3cB4WMe%5a=T3Z~O()VzfbNGv5zl6J^t2TiDYLE{6kbAR6IdO}kDs(w z>nbY`taLL?>(mn;aEGVMYED5dejPIN1$TT)ix8gB@=rHHxCEMvH zOFw`+^$Zgchar7b6?z4av+I1xc&Xj}?tpNk%!O+Ls%Em(s30HI#9?iaQLj+i@8)ylGT7U2JchT`u3*WF4LmQEJ{cvZ}( zl1ag!{qFVIACu}Po6iY`WL)6L%!;CmlhfUQjK7xv$LGy6q({eBMk04aF%W&3r5p_v z>sPZWn`SI`crXK6D)xO|DcmqA6t+=uTDMVoe4284&mIYG%w5Dx8&X?eY;Tdu^b&wm za2J3w!&Q`9v;6EQXOx1BK-AJ;FPr&$WligttjsEc12WD~K^)F^4p{ji?aTCva8At^~TT)&P+5VL8vh*rq%en(Z?KGE9!=hUKoWZk8ZHkB2-lH^|eQjD&{*R|)L(DqrOD)Y`TH$7NQmVZAPy z((^;VEjgxmbNAUuJQm4yE0!(Q)pylz6c)KFE}pZ*XdQ`Ahz?{_`HB17Dk^SMMC8BN zxAE_Su_UK>GhXw1-tpiBkLIRyhDKoMe=Umz8y`*w`{wam-e&OkyVuVRQ+nLxc;Lux z9%7w6pL4+vTD3-!3KsI+c@H=OBbbrquD7RUy9mRW-Eo!!OcXXluM4SyhhaWG>4*Dz z9`nGC&rkeA03Lk|c%<%xX@33-tzw&LcNJfWVkrm9By7+n*gJe)YKC{5iD=eBBr324 zx5Z-FWP*+YR@sEymr*qXxWo9%4YJ#Pz@~t?X``k--rW?Dum?tPMEk6|^EKC~hKO#BdxY*Np;(FUC!QWM z{AtKSC!`xZnty%i&0A z&Q=X2rShe8;o*(UC2XFPj{r8%*KNLp;8Z%pBqY?2PNU%X2uu#{@lgLGN18e$aX<95 z0&n?0cE*R9v`XaioHZ?_7Lu7TUjnkIE>)p8wm&2&e4?R13mXg|?6S>gi zl{bf2OFY|r2pjfh74|;OPEAYmHUa4$&;u_ead7e zejc<>nM`jIU3<1qrY(D^P@d2Z7bFq@GO166?(o5Rq0psTy#aPwEtqSmxmZr}a@Vy2 zF5lai3k03~x>|mb)j|R`M`Zlc>jkkd8p}oH=cHBhfgWH6Mg(cZUo577Xc2pvFK_*c zU;s z&<$_4fV+Li@>GRIv=2-dZ-pt{FY~_I~F*e zw%nxl;Z`AxfGt(nqXTH2!`Pu2Dzn*Kn;U{Jwgem}s``2ua1@Su5TWamUYdhwPv zo^pDpN4N(XAIQA%wGVe4!)^Bc((paZAdCRQiaCDyP~Z4tE$y%-1$#}>Yy7h~n>OM*o#YLNfXL4yPEOWOn!1;ndBZUWIM z&0)J3et(lM*oQj$Jvf-%P2*0c%D=MVU8(^1AY!o>_AC~$NLF{{of#&kg=fm_&zfQm z$;I?A8AM8t??veJvwqTMXU*~GLxw?4qkQTh{?Xmj&r$&yRO(`6QU6VYHOQZHW1UrK zQnmv?NpvhN450j|LZOa((9ondv(JQj zl#1`$dS1T+mJ&{^eW+%(ZH{0mjTY&3J{b`_2Vm?CK{*Il`0mxC|G0V&RuGstfKPYj zZP&QHA*CS+5T=-)&&9SqfYCcv;CcEOM3R*Q-6>_R=nl~zLCOiTymleoCn;BQt4r#* zRhe^iG=Cf*Zksh-DjM=RyLVRPA&H^ikS?6#wAsqUjgmGhGV-lW)9h((fR9>|oSr1i zud=Ud0I?*YJn6W+%>a{23GR`~Ec1zE!t~7k6#ji)6eVz~N*aQSGBDBeeccewk!hIe zDmku9(GcC*(2o`u>lQGIDPYf_!3NE}Y2fLyFa8YxiG;>xE(SJ!JU|%~z6S<>m5M@^ z9Ru^?pPaCS-1hWRJSi+ZB?&w=0vTnKwHfyTTbVMzCkz>O9uQBy_X6-3@j5p41}zcy7?U>$Yc zXSvD`o9RjuLsJ$`v`hX0!W!1|U3;z&C?n*0{UHPaQKKlMlcqSN7n?Cw-w9FM|W8^~3|RHMA|X|$hQ+^0Oumyou+_hgWy{p7(zF;}w27Es>U>w$*9 z^_?5_PY3NiUx<2{0w*08q_|15K{{IU86h&3(-UDTc?q1cy5Q!$jzPkNd8;s`(&?Pc z@zm#t*oeOxqnMcL30Y)9TMICC$^|7FX3%GVEr9?_v8-clIs(V9*HYt6Kbjbfn zO)_$p8OEkH9Bbd6V@Cf(t?KXt7e<>ncw@|&*E_cuBYD$DBV_9*@<#3gzVhjrU8#32zY8>5$ zDCWzyJG2~f2vpNbxVZ~~CGHpL1vJ(5VY|uvLG7Pe`}i->bZl94{C%W~z#ie78?rj6 zT4dv<@r@l*og^oAy3mpm$pdGxtnrsX7*u=0Z33^z?yQU5cgo zc?`Cq-_UGwJToFBfC zFoqq#(cxOu;mo2|qSA{va;@-EVTqfZwvuySqX-JFzi}p`7CVRfwqgn@w1bggL8{zl zA%vw(P+WR|kq=L8CUrqtGhMHrx@RMtrh^V8JqTQgI#{TaG_r@aFg$gIB+X#cSrkJiLq*8ppGLFX}izu zF4hcbNcK}`B7*MaHC?)L8ldz;n$OemoJ{CitwzYqfFprU#E`4*E-j}t?`BBG`8==h zhf1@&b2>M>pg(UaH%B)4oN3%}oBXUCZYl{l5(`gFKG}e6sn&{N3w7bbDs9$Za3SBJ zH~2N1=J9uTC5iQD+wIIH>vCMpBNIKfOix@pq88W&eQImtL~E}Dex{U7Zp6m`A08UC zchopHR2%)zTMq4qv~+W8Jt|(^>QIkRkA%lbXT8E!NxSubRt0jpfs4#&AOKtb)T;+%FgY;gMO8*=U-Nn>Dz5z4#k=%yIN zeveMO72l5CB2+(kry=H_bx#A=YMKFXQ_FFfF+WY*PtDS&=H)s1q^sR|+tD+4qIK53wN z1A3CCnPifz08rxIOKK9B)7tV!HTjnU2o=y}m3qR0O*>>oE^3*^ss!(XMjk3Z)t`o> zfAr|~&SxLTm^p0Gd>m)xGOc<09?4tESXa*@qyvr}@VnyEN+{0#8VmlG5#*7)U z6I{M@#tPmBVoH*dJIL@dSezgRN$}$C4_vBGgl|XpB88Xk)0qlJx;_Q688{zh=IJAV z1A64r80Jk2j5=i)Ff3@07t8|p*D{M3a`xR$KtLqHFp|w6cHhQ_@jcFMXiE0dxWK>) z94epPM~q#Bq9J`@Q!yN1=)`f)Ch-rMO+pZ%FkzrB&x>Th2M4ox%@7Wj5h4YqEWyJU zk6mUDNpb>+!*qOclE+4pDWYKR6Z~2hensb6cJIWX46K;Y+U8Kp*v0IEo1hlq$3HL@ zW=iwbJR4Q{G#h2JnHiE1xHa-Lhdg)Fsi0MI8%_q?{#Z!YqZDT8+#H51k8jAdgN9`< zKtV+=q>+%Bs5}^$cDBS%(7#vjRckk7NW>%8$sp4rqtO62V)|?v%hKaJ2k(;DPQ~az zdNTrmpj5}^ON7y4R~{R_A0$#_FWAv!f{?cD6wgq7Ncq4wosLmsO;XB3Cd;;a>pv9NXU>y^GB{i2*O!GB*8Q; zEkghfFijj#p6&JOpe_#J0}Jfv8Kv{e$T!Y2iM*qu>HfB{d~14*%es=Cz&>UgC1JXAI`buhsva4gq8;9B@wa~nuak&hY^506!1 zOUzBrL0ECWD?P?8TFV+l&=eBTo9|4p5&snRDhlj2*zuL(N$NVM|2{vh5hr_YNE(b1!{Z zKcZ5z_EA4Y+@^gU(cfHA#IrT-`BiB9o;+>MZ%t@3%Vhe=esaX}(I?;Yl2gxj^^wzl z3!3bX9uaN6aqrzj7liX^u5n}#j~;`rPUVgzo{T31!5DFfb1jOwukqXPi>(f9-stUa z1?JDfyHmhWLy1_=bn_79Fbf7Lm`45g5$nX+IK_+x$GgK&*T@BE)cIY6j>eGzK)|vX zr{g)}@;gKxFHR^z2JDJxc|$A&OsWtTYFY<)aYRq1i2efSjD#YdPv>6VZ%gqU4s44v zt^6=?EcyWI^6AjONK<3u#beUH{A>Wv=}Dyv_iKWCV;@xf#Nraals%e5#*e zSSk1$#u^e~efE8OBX{-=Yc%8YdWkj3e^zfX>G{uc7=w|8M9IA%0tJ4|cIY)?g&QM_~BNiX1N;`5L)?A z?x1~W(>&m}xW0vpxu$vFqluC~ax)Z85Io%AJjYYx-<0JivXtWDj~edq%;Q_~jwhEl z0-bT}~g=4Jpj0=I6>$tS%~3dr%3QLD>=U z7TG5b32;b{k-CuEa#Crk8%kCazvz~6wy|J;&w)N^;tTFN&fs_^l7QKx14wz=5?c2o z_^1Wu0Q6LYI9tk<-=f1N*gG;lLvkRu zrU6C&Xb0I-S{!(H?{KjWXNF|o1x#JxL0vq8o;@0%tXENn~( zKnCn2>ETgO%Vc~=qks{2;7?&%uqdW9v1XhJ@Bwr4&IzJi2PE3$V^E5_0Vbh|Tj@odU2t$<7mws4HSoCNzA#CzCkAQ3k@NU^M|Yl0OM_M^eeY| z90#hJj!LX9xmr4pZfcXyEbPJmo><0vpJ&Kxrud!~wc`p(J-#b^90SIA_;FkEbGjJM z*J#A=xuk}DN`Z<#dydOk3ept##jMDsVkd8dR}~}Xt4w=lgcs(Ho=-sa_K0dyb8HsT zfjQN$>l>S5u@LiV3W>qZwQZ36hkWP187MN--^<>M*9CpAQJ%2#31M@ACbtEx= z98sJW-d?X(=~5fI$uk1{;ru+-(jZKzK+sbYyk{mlesaL41Ez(8)Zrcjtq9xL_pTvT zE;}xvi;aRxW!Nm$Z^xU-^9`oeYEjsmwy)a$-95UP&5WExCVpIWj_KQu$>yXGua6r0 zqgWmg@L%roK9Fy*ZVC+YMdmU|b!a8KrD^$(`4pEhElLRMB6n&4l7WoAgcjPDG(@h| z-iDS|AbKeFBGRBs?P3S#V2)QtcOVsVo?~)IJRPaG1W^$X^w&g(wx{E)0|rx~y~{g` zht!*O$KpvFMyo@zqf?7AbQHWV))1Bk{U5WUEG0FW)^JLOM-8fmFgOaty5DaZ569#y zH6GL?T*MLysEIX(f_8JuSd=Pju-)2~umJz`)l(-t0?t|oa=S5r(`y!L2Iu6SJij(u zw7yy3@i$FZH6Pn{eGP-Hp$|VO^Y*^JK^UDe#%uOKT7u-P08AxEdY^!Kaox_Agh=C- zEh#F={RdpbS`r_NgaiJqtV9q2iBgIPi*8^-@~3eHyz<6Ec*UnZsh}O&vmHDq3Ea3K zKCbS2EedcPMMv(p4v#7E5rPS@-EV0VAAcM4R!s8B-q)M(i&y*e<$OR+H zI^CUzF;tiE^N7X-TH(R!!nnefe2{!3XrPbKrXW0%Z4=qY8e{aEMroRRD&{%hr8IM? zR&jE-tYiX5{m&Lm5YCpPXQq}RI5CCMU2uPE`NaH3Au)1mXiK(ke1MiEtU;0>JeUjg zll2ik82!RhXr9pWp=&q6T3V^9-wgF`%XfxSfWX}Vvg+cBw7`Z%5T4p-(GdURn=q)g522vB~xZP-FJ`Cwg%{FyI!rbFkctw zeET4l!7m4ZiMBNB5Kb@Xg6et)*c2JTJF4ojAm=6NrA@`&lsI^#o_y?R` zTz;k@CTKFZq{L?>bdBOUb!~`feCu{Lwy)AP!+72mCBi`}Q3Z9u?qYtE#$qO94v=lk z=ORTf{T*F+nH;L7IS;RMe7kd9I&>>C?tMTUFRluX4m5gB$doB>0hp+$O9=e1PRkU} z+5y5CC8244^QOzy-=ZJhFlF!~m<-ni!!ZaJxW^A=5?V;0Hy4VH%DI>_P7C1aLKa6W ziJ}sr7_TBj47No{|1PB)1Rdoq1n5)AJnCI-P#;9sg*uWklD_v+%h2&>qb$+1Oi7XN z+P;^LI;9SsbHLyS5dSFY6*i0^NQkiJ8PgK9Sn~>#uM$;L131l_T03;imY>Wm%s4VcUwwWMhtksqJhM+dlNb&1HBRD`g%S&0->+!d;G|dHgmWHr zXjOP`dR@pWDYNVKJS|C;k99gPTB|aQq-E#BguNQQSI+&Bkhpo({i^8OCe(X-q760@Ys2L};(EY}heb;G zaAphKG0+ER$s)n`2J%qAmO~_UMS^4qiqn&*Rk$^zY`U}2GRnqbTC<+q=19(`FgIPRwTBrbsrv+!gIt#3xFJpFaM+qRK&lJsqY*o?K7OsQuevQMJG?TFT_M#2 z(3C%Z>=aUNsff0unJZg;E|~1}3wuYA_z?7&jo{?xeRvDUzJDa*qlCbRV#1NXuUly<6{^msM0!*%WR=iQ zTNI@)Amc{wKl+j@9S7}^Q#(Dfnc6w;sjv8CGzwgFH9_6&!MFaBTAdXC)On?h-1>H6uEZ+}|H0Vlp?59BCtkwUvRk|RKvYN`W>P&nLD%LqlTBTDW~fx_n;@P7b~Ui z#s0imW*9OP;B;E%7R&_%1(U{63OJoYXVpQT2Xcx97yvbg4@udfh0?)9y7KYBsWRD5 zL%=tP^B8`-E-QUt0q>clVl4$PV3ov}S{}%30en$(mEMM)tt-b8A|W6Gq8M)4+Z|n< znG;Z}n|HxQb#173UKe&-HLA4cWT2bl)IunFx)twgub|TC*fJrvo#DXLFdi87fj*_p3Vmp567+| z2rFpB#94lA*y^|wfR_#CAh?3+e;(+f*jgE8ZV4J3 z;T3UrW9fNZ$zX@5zwL7i_ZZ|Li`^aU&KCZiwry~8ZXTnL6ewYfa5w=`8YFa#>Nfu? z=|sg8-)Jf|V`-LJS**$&`L(&`MF-Q9Nmv?jMWWu)qY-4t9BqOCXA$}{gflzg_JL8% zpDAiNM&Nf1yj7P*5;fe)jAM_1-h(pH6Aw6^o!NO+?<&67?iy-m=g2% zE-@B2&>XaYE7yv|TU4VeTa65dLZK&w$eCt1+;vH)<{S7B%#bSITeieSH6F^ETMipa zJgjN=%vcpQvKO(|wMU-fM?;?59_i)=Nki;}A7S+N1s=PmVdx3o9h0X}1#D?dOM!kD zVWAs%e@BPoLkg+(%nnwG1>x6ELZsgjCZcN8HXzS>fQ(R+I)K*o`Ld(8B>2tvn-WjC zjcKFo{!9T@Zhn6N!cqG^&6fw1#R9NSodN_SGT|bxV6+Iv!R)Ysv#t>M4dN9pnZalg zYn7FtPdYdNM@_n6&B0w0tu0Pdf|8s#C6KEw;6KK9mmPeC9X!(T;G4MDTPhEu(>f&C zM-&JtD=@T0qmjW6qIL4B93-+P@fK+pC37kopZPo)XBzyEfaT-@kXEd5{xnWCw3DvF zW>9@iCNc>>P&z2>c5!fkwx}cSwwXzWxH>qH&Kw|0cuPnLtZTHVc$Zwjb)QCFk!zCYjLS1}VT)MO{ zBWq(^skN13g_KuQ-4LpyC!Uzx^cx=E=2IKtBr8vno7vqef6FrRKhLil6yQ-nL_gBY z(OZE)Zy;1o>Xu<4h6-ym;I9b2=TA$n(Mgjy3Tr0G1fcf$X$wZ~8s^(v` zwI?=pC^xB*JM=_yB2vI2NEt0n7O#HoVIK+@$mu6#zKVzyTU+gdJ604Hfe2mFs`oqQ zJGJe7P-fFl8eJ99PKpiE*MK#x&PWl1um^g_?{hOPG36g#24+LVwlDNmnb z#DXD>H+W3bO)_#M6)OXUlMw3x;lvZVkI*?en$DeGp$m?Exeum1Qfp)SIIHg!5MK1# z;BXY=DFnQ!2*f;}F$VyKKzYBob>2ZpbR!B+a-e7f@)ZpmmyYs)6dgjqFJ~P>KaZ)o z8rciZ_#T5t6qTGJZtB_$=(xCESLj&Xox)XS29C;wgqsKfi1+l8)0jnwY)ycPN8l!D zlOPaqo^lU$umg$)g?qC7S7jE&#C?WQtjS#fryo$hFlQI)U3@yTGQEbi0b%KPnbQO! zUN+x6_`e&`U)|(`qnZbg&Mag zg+E>x`H~XW4w6ne?(~PShm{Wt--u3^AM&zTf{z~Fi3UlSc8;2#4aMs^f(dHhp=WOB zTa6ava4@B*p2i#8cNeR=qNM}YIW(}{iG40xn!7SP)gTYaKGqW1ypN%ykfXC8OWb#F zUYXTfgT{aX=*{gvo8SgfX!Mmhp)(cKHbTyac|H~EFau+F)4ef~!j3@joQFhWwDCj} z4eSKLkA-1i7c7d1=Ci|cSPwl9HrsJB#mo$o178OUEaSfv1sPPVBCMF*Sm7rVkFl(xDvQen-!r{hX*PZxD(FriP}osjB!jT0s*t((CB*1#UlC>M+R2a zgJP3vXucl3yL|C)@og&~UO`sH#ux90TpN#J|6z4L{q5-G>4WR#ul%~7{`SDTeC%5P zcb`=Mcd`1%y8b992)HR^Cx}T;O8ZJ#D+14@S0~?4x&~{UMqxEdVs{4Jma(C$aaTT# za|5gJ$>VL^Rza-%|IgmL_qB0dd*l6^S1}RsBgqyr*ttBVCH-N*F;8vq2XK=dLUjgd z1XD{Ibw)C#N#6T!UH5HfBw;&gdJbx1q?vuctiATSd{*y z5~x6SQLT4reqwFvC^B^rN)F=;liE?N;C7dL~m}Fw8%;f93x_VH!*GyYP!IhlV?(&$t z>Lk!^cV@zbS=L{v`}g||y7 zDAgsjwfarJ_Kl?&m9L4emztxCr2Y-6P+AScSTsm%S#GvbxzxW|xulZDr~GR5!s32a zIlRMa=O5RMGylc%DPKOZ)stF1p$jInJdrReRJB=km36N)&qiTWJ(~61sPD>>?o@Y) zD_U(u{l40~^4k40UmhDPRK- zJWkL;-6`qP)y_~!sMeI@L^iq;K2`c&B`dLE!AWc0F%hBJB4>Il!YQ}MD%td8rq~T^ z!8Gl^#S!vt_-KgAc6{~co0W*52)QWccCxT54`EUsDgh)!5n~YJj<`#Hr%CRZ0d_>V z2Hm;lK#W{2N>?8S(0uEpGqCpcjAO#qOB98kcm{GJP_yrtL(p@yT$k_Uxb$%tNWMde zl0h&@wjYeD_KVXoBg94p1#H%Aa+Qdcj$cR72?Xz#7^_S8l|dFUw8&d^_!EFn0#G!e zj?;X08_jMqSL_ZqnE6;k#&iD=QEN=1bFyAS%cXWz4_!X`aA-zH-SR7jiG-f1K&h}> zW9$@?C!7OuxKN!Ssnt)*AjS|KC=a+hIHJyIIw4Bf57HephoxT_jt;70E;K4dq0=wo zXIIW5r>9)`@G(7bl4^n=iU9@2q_*GYLb8ex7o-(S_g}r|I>c3mS#0^Ettl5c30KJ^SS9*_-Hh^0D5;H( z`%j-#8)s2=I>t#5b5%-IZ@J4^J!wTne(J1BY$!{$ZP!OH1BFJ>-SKhqIXY+PCSDfP z*>@i-t#gY1AQ4ijB0X>o{izZ!P@s8pYYu{!O6@JJzEcvihOu6jhvp0$wIWe0=Uq-6 zEAlMeE9JHAqlf$Db$t7<%J5kVU=y-HD+(NJL5|?zPmYWMO@N)Vsf-&{y$&kGCSE#! z7%K^+?!T%rCd1wIs`)`A(qGsMcM5jLtG+BXu6P~pTe<#(d)HfG;XZE%XD$8eRvMN2 zzSRb@nlhBHWz~aJ?FE)6BiE6REHi4<{kZW>a!%-C#zODNst`IyK@kckgjX_o*HNni zwjC?i(Mks`NZ0Pbt*a$z+miT01ALVmg(1GGn;Md2KgB8?@gv$w&Ov0X^4%#l2)6!Z zW)E%M#W1!INF=_bkZAB)tTi*_+VNWAY0~oKD$WO^q@a5nnw^aCE=H#*SLTV%(IW4k zb5th}s+EBhiU&ppL-j109CvMcTd^+SS;e$`+K^{e6-1vh=$Q`9M+H4gjipWKUTiJP zCjr==1x%By?!u($s6vJ&B3GTfI`#QcRlP?Hy^3ir77Sr!bk2&gi&l8Ih+ewYf%3(|K`?o(ANLQf0x2L3}bw#+O9Le39~0 zZm_a*E1{nZ+(VtL*qRZU+c}1IsVhO@T#8M~Xb^0LHoGoe=5g5>LgK>iu3OEOES72^ z%Xb!~%~ryelR}{fYK{7kJMs|87>azMw-T)&=)*GcA!RKU|G}-ZjQ#Up*SGvaA?;F%6E!+ zAEBv+bCmID=W9-{73SN=d2nrC+z64&bB0Ed3#MEf9ZtIhcd9U$`~+*Pmk!}LgpECL ztHsYGWKi`wV^ZifUK5Pt_q{=$4re`l<8KFi5_lWCq5u-?`&D;Kh6%ngMLS4N79jL- znQFEhshr3K7P9WmN~02^T{{WzfL#m3kj5B*{mA{2$oQJgnwi|POjB9FF;a7(WBa?` zlGEh$HYfvmwX%hSND@UzEyyZ?PkY!FjEaNi5~<@{3j5U&XJR)-SrGsVW!5sdbXloc zcyg2J(b3-Pb>~wwuRw_k-)_vCFqpMtmf&my3R1fOnPQ2VOV#Eq`j?*Q!YSOR<&X~6 z0)bwXF7OqB47=nEZG|2c@;|B>hiRYjGf(&SPM?+P#t$Q6mf|XYKpiLbRaCl(Q~r<+ z%2;qXp2;d4vKeC;+Yh<5ms%pNp|+IULCtrnBu@lrc{%z?|I)57BnS%@n7zY^3WoVd zLWW>-ULh5dpTh^weB?KqMfaPysvDzRErtA@rKk?&v(r$BNzzT?7=U*oG3g$TZac!t z**x_il@ivY9Gj2!==kk_0^^$9saHM~@d*ALLGgRUUQ>Lx&nr zA%1GY&Cf+!V+wjmg*>zuW7p9ih#K%3KqNWVnNdkrpZ*oXOtVCbaE-38J}a^taF&o} zRV_ewJK4n;BVxC>UsOQ{7{{;MAO?eEVHV8^e&HfRheM*Yb183ge$lDtZ-zxz>Z4#R z+>l;>l#Vr)p(R}BhH_oFqN05Kjswj?Pr^>G%6U)o6kDLM$$?xOUq%ghi&C#~Mwky; zqn_dqipB%1&8`B34xWtCiAHkOaj1(Ln{<3EK%%=+2-?WE z6$}Jg%rDsiPLi5IL&vcmnK)LCSV+|ZFInfPcJ8z+X{=aw0#xA}jEg^%UYp&O75MCT zy^_l|s?1xkSmCoBH}`ysm+V1lLKm~ab&H!c(1sI)M)lD~q6Scaq(|P$T;=$zN57%+ ztk|M9?u)mAPG{Lv6eTsNQ1}@nMPJ}986Nq+5!8|A*;!q`3f!Fa<0cmL( z(ZY(_-l`3krG#6pqFnf*#y2*cPKsj5vq#&n>Y4*|XKpuVw@P*u;Hp*1y9b#Q5?bh0 z-Ve7^<;2~M>R?lm+9+`*;8D6~J=AQ}ORM&`e%dP%+JbCHC?&!5~IOsMua%k$J<9qmVFCm_BFQ9_l+qjpERygjI`w z*v}VvtdOU$n3XE@>WCm-BBokCFNro>8{2G3wV>Q#UU1xH9+FmQRF5M#<=86l1EX_&A|8M`Z? zl*n|hqd71f)JzY^Y|62{7jtGoyI@0!OGf3<2Co&m@3S2bTeJFzkUaJ0rKz3<*dvNA zMJf*fP>ANt5L*Ejc7p0@L9{_pg0i429B5;*mG%4cT!qsc6Wx9kz#g|M?!oGqM)l?- zT+NyBo7YQ)cy4NJF&nIIlA#b}L0M?`gr{B%G*+cCE6lQfmYpHt$30V^ZMazQoPSVs zB0dd?h>ZrO4cNYbudQeUVXe4SVaW*`epWk?{gFGd$Fb|1>aHEPS#6R{FDNg*Ll&T7#i7PZ=dUH5E{Pt+~ z?9ItRbo%Dm=?|x82d_fDtU^*-=F!Y|BXqZ};EujXFg&fLtfy7;I1iLmlB{$-(TK=u zRth(M-YuqCJWXV(>Pi!W_ecOXyS9zAen`@KlNL2ztSiYe$?dI9>Rizp3&m=lxqY_@ zzlk#YO|wVw83?^A*IQ6hpPpKRYUpe#Iqg=Dx5fybvhea2+v;~q5BTErGC4s z37l`1ub(?0d3wwN$vd5~-3iBRks97~zTBn{vJa|-QeL)iz7_Z$guC>WZ@%3zoD5fF zXJMb)wK^n{B^e#594WmvjQ-0!XJzRI%Z?^kyQ)!eam>%7qz*T9zg3ujl+;}=gJw0^paA9-MX_yx++JwyzyT)(-ZtM+G)wew88ECU=`o6` zg0fr!V9PCzaBu|VN#qw!RA6kjhmBzi>MM8`V%~2sZkO%K1MeGb0qiBPi5s1b5-5tq8KBxK6mASnn`yky)M?ukzH|g@l(RCmLa1EJeA`Z zE}^h9i$-#i+!%wm`axJI1_}5=;MlmJS~G=@T4v$QebS~o8>8$O_Cd{-UC&qRRdw<^-Rn=7nV`GS{I+m@_)wW)X$JeI(8DI@H#Ct`vlBgSUJ&4ed`X{2S|= zpIOB!-Eb8LZUIg9>h?pH7!nlnwP%qVc@*f%*>-f6BYu9<&-b8cZzD#pf2nl|QMJ^` zE3zVSVCm0d8zZklp%W!#OT*Ev3x`LO93Y#7b>}KLtKyTOuuB`gs33$765yRA)2v8I zrqeC+5f#+86-w5tyRWHrFe@;!mNpvm!5F~trsXNy72q}0@TX~h=yDf5haq?ZkR#Cl9|fO z@#?~!yV)s#SvKk6wW_Y2ruPHYkqjI`Dn9wKYI(Pu{m8N7o)d@>UeUrg90aJk98ryN zwXRxb!B#d?=m|H%&pH`O5(q1qG%|5=)M|oUkQ+L^peVA)VxO86feX^%ZB$&x&?P)S!!j6Tzqbh4lzzU{M@Jy~r_2kDw#Dr-SO9PUyNK#q#1vmi%%$8n)w zGz&4sb2YLEDw)QG4AYH1={B1$D21Zr5{@?ZgcQ9bKcjHk1a8Zi+F3;$BU_4e0Gq($ zBEwt=o&Z8EWWIf0RUy=Rgad{l8jT3iJV!A}ze0$iAC015_k=14&6C=^mw6J~ptlvY zc_qCB9Ren7YWY4aZ+=L57-D6*%Zdtuf#uFn8@CBc4IdV5Tell(%4%K|MnzjNq-K^ zgF9jR%yalFI6l((q+32d9RFCY!gAXmmC?T$mF4`})Ijs&L@aVksj!&vgQV-yDeb3z zHX0=uCIlll)t$;~19k@KIe}13d|2~JFE3Z4>=H=0eL+u!QrM?k{&z*mKKZd_00Hao>yu${z?-qkL6T~byTtk z#oX+2ZZaSh!{x)VQZ+12CmIfRNDflX#Oy}b$!Kb#Uq~#f$)Lx)5g+l4%LwPItDA$A zmXG1Hj)NKk36fx^SWs7Z7~+9hMaG658WIDB*`5Y+NEb%)C&y{3xNlm!f2PCkNBA;R z4#ut~wgg*1?h586mCm|dXw&wFqX0Wp&IK42a*aby4agDpfxyt1UqCjDLrIeET&1hF zR@NNKf{WfJx4=3>_4(Bt@ntZISfU{L_#hc-s@(YWCBT`-#f^0&A^m5l^`nesKBya5$@quQaLY8A9hk!zf-p z!%-9++@6D5O-xmz>R~BgK?N2@N5cvcq6mwjhuKM4Ts?p~i(1mVyr?ZgbA1cS9S9!m)ia zCj92|$}WnGonr+AbofXP-(4zpzpbi%5yKr42UE@mao*)( z$Y%PEEO@yTk0)P|7hi3-R^uey?o_N8`5=e30&`~4S957zyvZtd%_LfpZ!_sjB*;{? z3MXeWETl%J$Q5`ylYTWOFD$yGR;w+mitY3IzJ&k#@S3rx10s@W(Dy*eSIOKp3w{G+z7AKx~ zFd(%tm0+RXSo)mtWCD{s98%KaPI`Z2&2$4Agx2o>|+GSvUA z=Ik*ZxmP0w+fxO(1c~PrMXW%9aG5})s1WEEjrwCvRCK+GDAw+d{WyvIib zf6#xcR)x>$EaWJMVF1O&C_hgBf?;l zT$bjR`RhjCfB!uV7UJI{DxWsUW=@UIBXxkfC&iG?l&l&D1oeN7y(WG5wTk-SemQ~+ zvQkL!2e-{4y&P#FMK+pS5<6P>NP<#;H=5GwKZq_LC9UF@(D z>%=(D%V1nq(1xMW&MfTKjKL4Jwe;@t*7O6mJgnrP81{q_rraq_tSVXEi+3lpomrV3 z6f?;9!f2!m6q{sHIZD3ChA@$#2Byn6ez(OS6_UAf49E= za?||_pD(_8`qk4vY;Aq@^vRcBeX+Uq^bea`Uw-w~)*qtHPgDKk&m05oM$sQG2T5`j zPioUGd;cH$Szlx0Mh+C;>bIjOFgqRm_X+*`l>YsK{{52v{fhqmn*RO2^zR?>->psh zcZ&{hT13%vlqz!2K&JEC<8#DnIT0&!Plu<41bL`j3?EC*SixGhG3V6L=2;2$&|fLlcbBaXM3+Okgl@C<}3g?ND1g5jmpc--J3honMa9;<^K+ zz@^(_)#2kla3gHr@R;!sMvUfR;$UeDC5(Cog-mg1XEIn+_sz8ksZD=S%nRH>T8J4Y zoFYX=b<$}{<*-zHG|F!980n+nnz*`cnn2=V3GkqAmJA$3&|(Sbj*ujFlI2x#jhBN< zi$%Z2NTp7{a)KO#yElbPM;?_}$)LYoZp4d&==Avc*>}4q2hrha^!nuZZ-@H_`%z=} z6n-~4(RYVuFOJ`wMUdiT_vq}0==gcGd-Mb3{p;b;KHTfSe|>UrdKw*{M2D|lzdSsE zXNO07FW>AR9(@}S@YTV|-V6A-`|R-L;n@$J z==tH<5tjNKirtM~@1C3;?!9@rdlJ2VbMpH5^Z=T$52YL(9~~Y&KY^MKUL72r0XK96 z&!U6B!H?+l#qP_O)Wh93(1#OhA5IRxeQ_4OIDWZ*01uyGb6y_qK6`n<)gAp1 z?Y-PReAS8ecVF#(dqDY)p_~&+CN2E##Q{Bn8h7FUduNBoM^F%SX7Bju>;xWkptmPy zCi8cPrw5&A_vG*t2j=<7F;sv93ptLd7|3^YpylAO`?CXy@b{b31KXbcgWZ=<+9_st z6XWEr^?G@N7{u9xY$_UXrGK1EuF^^39ZS)dPrv+{h|S~8Kf?c`CtKT3x3)K*Mna_< zSQPAkE(PvFOpo@W*Lkw>I?oWk88Mr(!5p}LOt5}-ynh@$={}{EEXe2R`h<8VupY_x z!HWw+(r37E%fqbDZ#mqsaiQ{ss$szN8n~YGG0|(d_peqF}a}`}P2oZ0!Qv#_* zODm1!ut!jNrtN&ig*&srFOs6eF&#wwU80q$lp_UK;mXM)DJ@FkA|2h*46MW2osnj3 zQDe!dlP|tTv1YVlNG~BbRUR~Y1T%@ZZWE2z=>^?zY@A|KFi|CL^NrveQS}|MUgat) z8k&hAsA~DJq-}G*H873U%(~S~hs{pZ#2WA~)xlpT^sU~>X*+8ESUhTV zAGMpEwS_&PB?}E_(rmu;CUD!O>5|UqHC?;J7ka17VoDp2Oyj9&YA=`Re5K--cHp7k z0TZ*ZPp31k36#m8>}N(g)pfi%<6*LnxN1XQZPFTiMT3cNpk}I~JUGaM1WP(5#Se}I zmbWb#qG+p4=b^kD=u~!1f+w6n$0cG=-0KAG!iiNe^|TbVCN>(O4fKOEMMVe!)6X#+ zQPV)&!13C)wLRt9=vqXlFNY_S?sFrl-~JLg@9l2tU*Umvod<)NYG zW3{nE+q$cmrV6Dg@ihi(07gE^`Rc%7FsXSsZ;f==2!P~mTugciqt+m5*=?+$(^j66 z)Prd`Qq=`yCmpu_@)>01^*2J1m==MiV6FF^=slb1zQ=lKGPiNp-YNxCMx|>T^*3aNk$17I--aaixfK@@#%Hg0UnfzUq+GOEkg_GuHi&A{|+Q) z9m!5JdK5K(Zg&5XrIXgEjlbGRi#~4>s{$F(<(!NrQ6e@1P6j28yq$vO-d)>!{knHx zH@Uy{I{52VdksItfpNn=ntww)^bv2vfI>O+rWK0~4q|~4XA#l36MI2})|Nm%Fv$O$ zr$F3s$i;LaBwWJYA+{%mH4(OMz<1af<(7tsGG}RQu1u_(XITb=iPQ(NrLvqhGui#8 z#$v)Yq~NigWj%h?JDwK3eV2o3e%5q%8O>I+Bd>Ov?yJvs%$xu7f4n!ir1aI#KYOoG zfhceO;@9M6+V)>LnSRXO;`r<-cjy|A=F4zng8#u;2b(AtKTM)md~Bkqg{!0% z5xqbe3T!7J@flk*DicqAFNBzPaojlQ$b70w!g%%&;A`P;A4xN7l)77&sM2 zLHp%5=>Q5Mg%h5kw9C$g4;>)e_u~yR_hvsf8$bTIMN36vwKOj}VXlWiwkY3^nsZUE z$3H&oKK$|FW2X|WD@asM5hQwClg3Td+I)Wwtn2Q^^LS$j{OkFfmluyHR}4+Gd zu=-KdmuS5gP+ueVsyb@KDSVyhkG6$jV`WPMEymNG^8mtHYea44 zqCL`urCmWnfW&vA!ZP7lX@``?O9xbI#Vk}h<}!t;OWqpUUyLXfeHk&^E=(rA7KbGn z-UIGmR^zwyf*pP-jO{Hf0~rprdN8+FV{3?1+8?Tt=MkIRb@0|xbnKn7#-#idW(zJ> zBbZ!L+IwOlBqG73)N7ZucEJm-Z6&SYm<~GIA-V|Y)xmMDow~^9vD89qWRAD(v?e@@ z!LdMt9M|2DJ=b$`)YBIA;CD~xXO@crBBohj@@zZ4W4}w*=K6Z`PFiRWdqiAkD~fk! z6ldDZ=tI`d*36+SGP&*Ag1sdaOcC4In<9)QPccpUjJnHJ);bqa7!p!(GKY<3cM9w6`8xDht;vBiVbQN4@e-65~qHH~# zFe!oo#%De6nSYwXu@9g8FND2B7fSMdVT105w~HFG+VJu>aVUicij7R>O}0VTYdw`s zF%345PG`K?V-B6g`~|j=m8VjSyQ(mbl^ExYJQ*dh5lZ=lM7n)z{dfv&Sc`>($d7Rs zk+M}*jLwhoh9d!YyYDne)D|om5MI^1b(-W6Dl?|mkzR7JDbjy z6IW=GEjcg@oE17tOQ0pcOCb$unMul@yp7XrG~ss3pO!SPgF+Q4$_L7A{@JG1x*s|D zZ0cWJr~n6@5V4X1nFkp)k-mV8o$|Q_KR0+}LevEpG23njjiUI?_wo==^bUu=glR;JUQ?Z6JL zhc0T?!@yZy!tKH$LH-YD56bE4>HPVJCVhloy+IQ01t<0KTRh6+YTL< z81Dn6d?fX@f_Q(PoieQGumuCy(J2MCa@Oy*6->f~uraop?qMtbe8K>tPjx!u#Sr&l z=VxS`%y_qq>Qr!jP8qxaI+3h^Hp*WxFs%w8WMzkaM564JLgt?gR)Y0lX3|AT56F&E z0~)K86_{gKU`|9tPokPgkXP&!>D^)p?*f_NX3!$NR)=MQ&Kg=*Jr}VCMLM1$U`;l) zma#VNpiB$M@~bC(Wfd>gk8KL`HE`8O~zSyXSS(uR_a8*Hr(tg6YJ zi%Xd%M@_hLbiKB45E+V0$n7aO9ul+gechh3+i7Awd&yJK zYk-_}zG%K9XP+i=aP7-n7hScRO=s5KBeN&-v=dZ8{t&QDTjgBBAV%k{@%iQj9gwVZ z!=D#jMnxW%UHF2T%`x=;a$Z73?p|qURw?$DP&1j0E5kxVIf1ZzSw6| zIH(0zU3WW+!%*01hJ@h;SO6IYPD^6Pv8FBEa3eWZG6nKTGT}GWr%aNn&*xxV%d!Ns2<9}7df|Dy`jR&Tf*RedGGwbA zlT4&2h+U5HPD?{F90p^8v1JQF;&ga$9gS>K6J&{CZWr`~hXdDtN!E4pDha0XsK({??kbh!e=Q zxrc^ruw9}>T;k^{x==;TW^m3qF&t6iZIARN!(fGq3v}NVbbUY;5*Tfolm6 z@~c^99-JzqXG)=LtiqX~l#G2sz4;TIma4LbBBz?2Kk)^U10!g(!jW@a*cT9y0L5a3*|kBg>;za+(^t3=Rio z;)tnhnd9+vh8#TO2gAkAin`QxpU-voaKs!v6_M%8EAKa3#O>|Hq?{pR14lxq-hIfF zP;;DCh`p+Fsn>R4o6E+lC8p6ulf6tlsRCKubV4Bko0E%bw zM31~gV!4Z{LeWu0IyURAOzbR}Z2If17k|ls#iVQ^BG}Rb~LOMNZEQ5uqC+gZx_etU>Fjyy{ zX;CqKkx8*LIUB#aBiVJB<{oZ_VGrlIXQsD@rY5cTA zOsxyZj|lDxz8Bpf@V?Vtd6i(qL#@982D$El@XtV49OUM3VfpqdGEvS`BrE5s!D+4` zNK4uWv*~T~ng&(&-MdiGJS}08bUT?_7`G=Fi&=V@mM)Co)0IX;f-BH7tu<@Bn>zdiM4|wU1I8Uq{yz+ z!s0lG;tV&#(908G#;`QI;g{Ozcv;GeQsd~b{MJqDZ-6k;v%UD&=b0ooRvUKSs?q@a z&1+vok7{CV*%p_T8H+R3Ra_E2wlB)V!_7ZOKqa?3kKDKB5&_qr?dS~e=ue(RN7*}q zuz-KIpM15w`NwEu^UKXmN&6gD>vnWHgY`9uPLpJqj0Vx43;y!|6}RZ>o%XYCKmLYu zNWN#x_TR*}o!~y1l7pt_9j6Jpq&xn3_e~QqNFUh4O$_82f)xE}LHWQGtEtH#aw3pv#C~ zhqh&puO{=ygREaXzUuc8h@iK%)xDmLM^YMTfYHeYwqlV@j9gxq%Ap|lXB03Er%+s& zVb?%^^{Oqya!JDJL01Fo< z-4foW>Js6w0yW%ATa8@z=i^Jy(AE86&0Ti`Z3@BXUrm5+6(m@5FfDM^K9>8AOhpgufP1V2LF#f2LJEr7NpzyYU|6bKSW=CiprOL{+HwbEt`MzZshMD4hOjy z|8eu_*H5>6{KqGoUp@Ki9{=&T_}t?^-s3;s<3HZxKmLc{KUN+~cZP#}jR*`I*Vzce zAZm_b(gR%eH=^ECjaqwQsdeh)}7!Lf(%$Vc!VAQR{4%1E*+e2&AD0g6MJD87Z_Q# zf#c;A*tX80XbUQ%kPQA5X#j6btj+e4Gq6n8fhO?!(!~a}f@k}t0-1Mt)4}FB7WX;| zXiu}|2mpdn?0Sv!u4BK)Th226?_e??|2z1&;Q-?fA{>~J^%L~#{YT8 z(mnt8oAZBm{67)@=M=b{|HH(;TmH{$+(P-{f=8|K0O{_x#^I|Mwrp|9QvKJ@faQGkqRtDLtv6CiWlt$!FJL zTJuC&P-1F%LS*h+Ax~$%35UC~t5%s6Uce3qEU_*`Zdwg1Y+9ztN-XfFWrM3(VOvMV zm@RZsGsC$%(25rhXbsbCzdl9yK|6#G8=D%9?T>6sQIoRv3+Xks) zQA?g%T>f995mbDA{RaeY^Q+Y_#ryR;f|s2##kjwD{`)_5_dh|}EIvl>r=P|5zb~FV z4fTIpU%{9A``>Tz0e*nDwodrk+Ov6zj1h-!XM{+?Z7K|_M9U>A(pds*%L};2H48z( zyTNGJYlAt4Inr=o7Q+is1`)A!KFeU)4F!u2<6q*PfvM=1@G; zj|V*RGt{JrP{sZ^#?rdTp5e`8AdvqEiCSnZLcC<%;;n~3!CsyIZI9TtY{FrX<3ccO zfh%F;TF5Ym))n1x@vbkSg>$Hb7^wSO^Zl*){?>edYyMr_nu%WZdc!#|Y6)C%t=c*4SGh~;MKLp1#w5=tnf@rQk#L${u*dZ~hEFliZ;Rj}x=X~ueZij(LE?-Qf7pjLn(B@rGnQF%fw2w#g)y!lMqMOWn1eLO>>ycd!;XdKher_+RPh zmTt#HPUPs;UMEhk3r3R$dX^);GaWv7_mQe#;bloNY0jc}iUFx9)=@fZG930KNd|~& zO!r|Lx}GdPXJ9K#3Kb*sgl;2zs!OP&4XPbaE@>=sw6lE9_kE}3KH8>8BHZzCt@^y@ z)Z9C@^H9Kr%i(!xJ?VgXgY34O3OR7wPEU4e`n}vv2Rq10;k?}Bz?wyfwaU$J z(@{)?)K7&Kov;d#(YOicS_8u~5VxHdciGnV&0SH-5x}nQ6L|Pga8e166%o><`z_ia zMwU~|Q0yh3t<)FHWM+#XP!xkylP{3gXnFN zObKV!g=;51)oBdu-qQ@nzaQh;S25IZj=;5+1CG)`3Undkz(I%t>ta)9z#OEEb$f|8 z*_O{GhV~%>);{Fv=RjywO5J)_@-ne&qA+!J+bit`k<7Wzt+>D=o`@oD-UL*pM4-EM zyT}y@+H$+&!I!>R(5(QjF8vfJb*uSm|I4NmxTF;JqdFI!N)<@j`adY<(NwtvUMOjpvP_2>YY))`*VhQ#U43@xDe8^F!agf))n3P5}+ z9ZSeK6T3fgqt2RbM~lqCOAb!7tBeCdQXE#PYLs5)F_1RmtARa*Lk7c}(o3fDr^w^? zkEs>X@YW3PDwvKc=-{Fi`bjs;D~`2BiR=Wz z&qwjq9u7IQKx@|Iwgm-;GR5^lOn$5R`RUm%FvmS$h_pAYCfybEr~iwD`VPQ6t*M-_pt*HHWbsOTc^w4ESRNcNxq!^? zYN8sb`SR@QM?u;tK?PEKoEC`^94s3sNRj09wFMVgv!b`72c*g=XnoSQkdPg!&KdD# zR@)*A0kSos2T=>Us`GZfiSd5dczS7S4b5YoR{pd~3!p>&BRS|P>S(4(hB)IlC`w^` z^BF0H-idgWkBQ4zVS~(QAcNiA zlQypHm*4H{yO;4<@_qTed5mdbGjqqw=}H5#_Bzk5%IWZF9*;E5|O5r<#+#akkyl*RPl6d;Qwxr0-JY%XIQ~ zN#>V_M}O7K@?E>TLhjQSZhrGis~@}~1^Ft?*nf4r|K{a^X^QxP4ri z{)@_%%S%4JfzT6!R`h_|ZVh02%Tc`^9S$S>&Ea20a2=tW7~XZU(QxtSsIzz>k1%r^ z88eFgn~V~}82T)4TJ%t8;Z{WFCuG0|PzWzS%DI$zZZtfs~t3q0gsJrxkDuFbo&6W ztE8PnkS0L7MZ2oY?6Pg!wr$(CZR0Q7wr$(CZKG>?^Ipt;B66FX++@c6@}AR~A58>?5_LXx@DF$lRe9u{jlEN*B~1%kY^CN9`d+wuq*l5;EEqH5O0YQiBPWNm^yx8po6af2d7olev2D8q?i z>XEp{oox;+hNnVY+`rmiU0;AZd^jn9ou058DiKCq4OSP)f4!%d$-SqkG8E^bbk&{9-*_LuMR$$>3##gx zO3uAd@W?Z3E~7MA+r$8pSQ8sFxCELPQtq|T;X1ec#Gt#J??d{rN9~Zz?dBUQdyv`C z5=pRG*xDrID&3-<@{*k?U~W!8cNQdi7kJp;{@y)NI}&bo!wNZO1U?QBu|J!>C*B+h(v$yPD)L3C9s<7H}yK)m=77V#0)pQG)3rTx@TemCuG-ySY^b#8LIX0$q1)wPF3B( z5UqAw0ZZtE!bDc6P!WF2o1@o5LZJ*~4=@YO51BKb`_lR3&gmriks>W6vwBE}ZzwXz z;HHow!%A$IR4BtyJ+LG;D2d`i6Nb_98sn@%4C=LbPq(};3^?1Qpf6&#nri}EG_9u* zp?Dk=t#Mj_xE0K2lm^>(==FkZ>?mraywyzh!L_GvZv=$V0&tY-jiCz6(LhrvrWrsf ziXHVO(i#F_V)n3^AkT_dgi2nk42kElms}zk)pP)NTEIyY3rV8Cu&|?Sa6+PyCkb>E ze|NGPqtc_3>wSn^#7m2ti*GTqC9lx1MF%nzMj&V08F@@(=jC_l_MMxXZ+Rwue7&Y8 z!Q$F}Ok{_;ICs|wQL8XmCd(&a0n*r@tkROJn(iEdN`Xi%bOp1nmRR{b3GH6U-FjNw zqz1R{k(6GewZXmG^-hUqR-HX|SZi#ut2-q#%gz0$vqD#2?n55jAA@!wU3&vLo-&i0=x3__x==8R;nRXjon0I@W~S4$_<>3+RCc)1NVZA=Enlunc}xwJCJ*JTdQN0xM}s-WnqZ2lib# z;UEfRj^4E(oyd~+kU?gUP<$PAFsUUHBoTP@BW<&>fBIb3ZnKDYfPYwGW+E#unzNQM zck3SFLHe*pg)3_3q|XT#Bg}!jxv2ZF;ZkU?)wHdA+uDW)b1U0rw1HVmYZj^| zV5#Ty1&p6`#N`XFrh~C}5T>-T*TjZHY{ooU@E#@xLt~tw~N7`V~0WnyGYPE9AN$P9?f^4y0F*-fM*Ia0i z3^~Bn@o|#07@*A22yD0&nve)&J2NnlBlC!OamMbmhEyoaurdu(E_lvT^5Ck<&P-a{zOfcMNS^w>~ z7G;I1x4P9=qu<@v-5LLsuedUWn&K7%&N^z_n*FcN7;q29W|URg!d{WJ%^(fVr%AC3A)xGzz#z2SaofM+ zozmE&2CtiXIH=z7HhP|*tI`kU4-;;Z`5AAXQp@COAn-N*;E8Zk|1vcRq!)TQ+IijH zGtT0@c#@wiKstT-@OUfEJNW~If5t9pE3tgSr}dSwsBg^M8OFc4`lgO>%#Q0${kA3? znt4V>%~h6b1aEg`soc(lSLeb6bi*6h1T_I08B4ATXlK)$x~laKwow5 z;e-22*IIRZU-%Q$y4s57`F#B3N2?;Vlew^$`^Y3Du_&)8i6YkeW;t;^1RUQWB4eql z%T9Ap5W*xvS3u#rc@rD1ccyeAcc{+MAuNCO%S}luXn^n`6(*#{+rrQakv9v`k0_;J z2dI^nCAoe2mZ=lNQ%3|D6VU-id?t=g!4NCXYq#gyWp&UuiPG>t`RzS(U2-WRIiKoL zn;y*FPFRHk#os+2?h)v*uJBr8lzp6vdcQ3$PF^nu_a~cwQ`LR)^Ofskz*@X-ph z@CDhv&dLYf2{jm~XUyB~g8Q3m0U8MpldFlhMqUHKz^*m6Og6BTgBXa%IX|o5NJ?PQ z*gq>Uo)~v9XpCIDB!>67h$K`eN)2OH$}0-Y_MioBp^e9g>~2~%8dVN{Bd9`c*yi1c zW_>EQ*2s%Rxo}F>DDlc=X(TCwWeyFwC)(WWm;y>qs4~ZqEcq0?9=T7aOkS`ycT;Y~ z=rwg%BGtFXr|dq3JsqK{;&i5s2}#1>0Ztz*ZJ>u~wC>74pYp_tu$e-NH>77wOsV^N5-IDxW^cy6Vti;Y9>p z`wFgOz$Jg5zG1Lxnb*&8K3nJI%p~5ulDem)xPOc@+pL# zG_r*q;cgyHZO+T}*2&g;bhLUoAN6qb#If&IzxZ!w zunrp2#q};5r;&GG#5Pa?E}kzJ*=BR`eEs-YY(85KD4UL(K5ZxPuYKcR_Rj2N%*L`} zac+96su6b69akvcC$}4eZf(HmIIxPa#veR~=A;DI_p-yyLdwOlvni9S3FQ~XJ@9E% zQ0Jrrc;{Y4|Li2ao&=P*d%Ru!+iN!+&cgR2QCxTjCHHrg%%0(+2dRHn=>rYV(T|Hi zN`7w5>Sw_jD%fI`vA$3BC>hc&n^5oe-{_a_j-9+|@RXnIz^D|70es;v$w!w>MbNQ> zdy|?PPS-*QKEW#JEN>lQ8$k!kw=uiD@9ENz+qlxVE|=!NZnG{QpSOoTDZTNWvq5Ql zuez0bX33o?&dfr~deljFBN{0m6yI%HZiD$>q+X!PBffdCFUL6GC%dnu{hz(wK{=qm zk6|Qld|Fz1+#U}hK|hb62S3&}JBL4Cq_4pf=2w!|K*VSNHy%uJ~)%mORdY&NMOe{O@_K> zKi(R8Yw^Zz=+O@kymjr7eN^Brj`fMXYu)ZCe__8MaAwgtU4U zm*^gded5BH!Advu%$q)so-5FRTJkKkyv}%5t_H~?0xTBl0}4gEzu+e%J5ZEAX~*{P z9{kn1vIp*43N|Rpy|XTCFs|LKn9xD_Vz-{JJ}-N+81pZUS6wocZUXCcy45#>b{zcV z#VClEkRX6H(3!Nhv2(i}?wSefH~hBF~Ft4o!*ek@@X4p`JG6yUmoRmbK- zoBo0Cv?apURJd4L3LOX2y7*)9PWaOwITpblJFoqD=n&j{gT1r6Q6~U!%H)t6&o| zCfLeaIBp5YJ`(qWo{fk7smk! z{6nzic7p0)G~_>0X(pC1oWmaGM*mPqOsN|R&%&xOMChy>EWHZd&8s`pO8`Mug4PEq z1^Xw5#ysI?7_SJIek%*{r0Y%~2Lg%cg zC#ViFe`!%fwCSk*L5&!Rw;IEfGAYf5UkRgCA1x@u*_NU}*-6MpiE@7{6Km2#t!c2u zv`hfVsHs3`A=HaiAHQxpv3^kOzO0-MJVohozrt0Bxj|z)D1V}& z#Hee5BSK|9-VZ9e%ny!MDzD+0h%8ugjT8YYkAr9&m%((>TWQ)iTLkYy%e=4U9dW#F zyeABHAE@_U-KD;RAdll?;t_QPjIU%#fC1h(Xb@~b;8yc4Rc>=P!&>DwMOvBpP%*wE zQCZL5&1$E*2i{-zNR53x&MwUK%i^uic2QdOgMJ$oWmh*1xIBvOPcw4UN`8A8o3i)4 z!db~nKW+D20%rv_0+X$L*I>iVI}Mf@XH9&er*_L3V#u*cD()qJ@O68{P0p}eQH#V$ z$+r@7VFIpE#bw!pYX+_Wg_>@ar-eluW5ul87=}xm3S~1}G-e7Sl^U0y@7LqrhMDxv zl_AIOfnq!U+xqLD5VI~(^5L#y6X{c>zk6(EomTKNG@r8gON5^Tivoo0HvMJ-It7xb zj=L^YoVzT%vE&)`TULxawo@^K9PtBBaOrKprA|tjM&;dr3?2%%87{P<0oAR-ZYyBYSo@1O~8&EL|+>d{O<+Uq5oyOpiDLgqbc^=V> zc7!@V21lD~Dm;pH*|Lzk5m}0vO3YvVN#+s0w)KcgcMHH8p}=H6Yo-=lrq5g$!w4_n zwc|t9fx%0I8aiOd&?DubTDyxStx@Vbg}X}6$lEL1m4nb!QQgto7=itL==o=ge-=idYDa;IBdBAQ7lEm3p4gEethP4W?_8gSw<_vr@P~V2IQ?On_J;y;b^&f$g zo#JgQ;v5IbpkvFiH7T!IR-I?Lw%=gIo{qY#GpLNFHKT^Ld25r7kY`?jDmLEUMmP}N z58@1~n`ftbdm=mS34x%20-I^KI+$+B8;26DXN=jYooK~M6ty?GgFW*eOc$_Fire6c4-(gOPhX*K zO?5}1Tbw0k18Ffr01q-6;&)G9pk2H;Rx^E}U6hsA>^oW>4p2np>4H0q6gHVIO^GDfQUgu4I_I6ll{})zDp6$aGPc1RbWL(hl8eXWjGZW4W){@)W{}zu!RQ|IE-Y(?cgA+ z3(8?%=zj;@zlqjN2p99xaqJT2BNE}cqv;Vbuxt|h!LupcGQ}hf)o@ZL8*p2rSE^h< z?@cV`pr;x*=jgJk@v@SEG7;45*ij78bU81O6{onSZBLF`q3jJiO5_|)GcXS5uN`-h zpM$r>%`6zE3X2_(W$vtUxL^RRW)Fa6Kbxk~h-;-MAqvaZqih#v;DKosk&dN0(Jn-s z*m_IBeXN^<2*(KM{Sj7EM~h=EEBB(J@qDzfOL@Ly-HbSD`g4NjNAP(n7#eZgmZoHFgUzjn?iw8JnfO};XnZP8d>wd5s-5HUw> z8`Lf-BP{GB$PH>}=}3KJqrRavb0`Ph6!S6?KA;NXS(eZ3iEHut%CrKk~e%I1vaVMz8MrK{LpQwBV zZ<<`za-ZC!sZP;5Mg_mz{)`CBXu{Ig{KpA0|3zAn*5x8Z7hg(rb|q}P&E)!6m@HvN z2NfoNG+yd1qQG01ze%=U^7k)IoGF#2Lj~pQD9*^HluW9mPoW_(Vu)jlXMt@jj00PF z+ik*Uv)_T6L6O(MZfH(z!DL-4-$LHCI~&$0m0xnB7{*wIQT4UQo7x}r-&5pQbieXkPR-}@iTnuvK#1;R<}gD4kKj9eCZDgYqab{w zV$K9k7#`<9r*vS~WLzrwS`v5faSCLxk&I09Q^8WcW!u(;0KHF1Ec(4=GQ>gc__Yb@ z>2KhjsnbP`_WU0FiE%GU0ucC}Qk3+&(AjW&lAnp{25_U{d^dbd9ctd99ULXkRyk5j zlo&4gaXH8*bUvyr?&m@?Os1~Xt%#2#)f`GyvX_Q7d?g9KmgNX-7ZH~b$r;Z`Z(Ezs zyZU!QASFx8>(u$>os?O#?<#N((wp=+A!ZGF;mdh938%g^JL@4iQA-B`*H0EF{sNdb zY9i?V$+AGFh6p`lHg`l11bT8hV9Yvx?~rs2G!=a@_g_kGnR0LeSx7~Z8H5i2yaU0-N{hpyk=AVp_k6_CnrT(jF(M!=D7PeF)tR``U2GBvTy__ zE_b1D^T`#qy}h5=Z63Wy-PZHpvR^#6!g8C~Xc9D{G5GyMk@5f$h&li=a`PgCEkb}| z>hE4f3b-9G=xk@tKm1?{w&7u7KWxp>`Dyj$lHxa7AdkN+Kr5Ij&{>0i#}iaF>jLE{ z!Trvsc%jGjgl#V=9%xS`yjB$M%!H|@)|ida#%c2A;uPk31;#@TR$x;?T^5v3kWxnW z4fSLog$%K>T?t9_hm8bp8euu1l!>~u$Oob=uW|@{Eoee_D?CNN4sdW0r;KZ14SyiI z+c~7LtQMwXI~I3P2B{40G<&BB?Xh%2=7U}&Qq~D}&>RNdHpeRZwOOM~&mj?Id}C?4 z2)6zaycKBVY;l1Y0YjN(;&2z~-`U=JuJfCR)tAUoC@}w5Q~5wvs6z?FXwzsci8;`2 zzRE_BOv~ce)?iRsn-mb*mXH-58-<~uGa*Yeic%>YFt#)W#D(c*KaId|3(y9CRBioyf_L5WJ}9&GB><~e z4q~%;Xq5clD)BVsY~q1=^%2DgjF$~4=}4SwE)6Qqxzmh+eep6ucUlYzW>viE02K7vLtn=G4vXS;t1QzE>z@bh zN4SkmCk5()Yw;Y_Igs(Y@7L)_m|Z`;%-@APtWq3z=LOWZEUc4(mN~Oz-;OM^l|;7o zs82N1X8onmjael>k{QL+Z%wS5zQEqUxsKUvSWhi%X3*@_Nl`6@9Um4yBp#g3iD@RE zzh7;6xtgUOx~1u;Qb4$Mk`B4{ESp#}2YG3ZzlV4EI)i`|T>=T3q`GwgV?5k}%;Np* z?g@#p-%GF=d1U^0i6huS&CKH*+9&WWD1LVj&A(Jm-{ z&~4Ax;T=4a`h<`UYTMD&b@51+Ul!3lP-4iNj+TKr}jTh@OYokvj&R>b+$?8uG{NG zndW$KDXb_cST{tT;Ttr;koq0Z{+6WG9_;Z99PHFqCt!NmGRTxBF9#^U92j|!<>+nd1aK63vVJERXCMT zHQ_@jpxUfZvuD^I4%o2GEhkYahl?i}EP|E($G7ic*mqUmSZbg_<%}}PD90cfjeao( zA5Umo5S)$}DOX%tHmzi{)O}=xSol(0A`~!T<8=Y^4vFD6efX7iLM1Pzh?GPWx?#6O zb1FtCd7~iOhr-iDUb>Bfrk21ph724;=WXGI8f`4W9A2}|BNYY5u)j08gg)9 z(=M(-hzJkPlnK_6@3B^KEn&TuWU#^wTEvw3rdI z^*;m$EznKThB=_=meBL8m?gct-}o9P2bBN*G)R5b@?ZaHD%X)*pFYjca+tTCFPmPo zH9OdFN>tbru_RjWDj~UpgcX>3HHNILoex69kfSm%0yr-NzP%(9B zOa559P}!p+_md}QW>bWfGOj>Nkr@V|BBtqA>>PybCklDf)zIZ6o_M@M^mgK3A~7_Q56873}xc8k7} z6lE zjmADhO&YZU~wv;vD2gC3$YwDI!V-?ZS4VvR-wk)t7Zk_&!|BSKhtYz4drZ;kCu zG>D693g$XsB&dz~#4iXNX%4yOC)$CZPa4KP6j!Av`2$vFw9e68O{d@3>&qcc-2_WD zG83V~QbGb0GHU<0z@sKJ?{3O;ICjQ9+qEZ%S54Y{p=7ibh0K_s!)-NQVYH`p8Zg}l zl?*^k%b6STxP($ow_MYU*4cZK7GUe#>3mZubYd$^dACXOLPi|J)IkO4Bp^=1!HG13r<2C`5_q7pI0u#8%!1hL>jMp%m#ZZY~1 zxRt{PrEKmv;?~b^-FF-pSFnOJyrX+pF2OK|--!ZM-qn%XU#(#>X7y&E@8z50SKVgy zXeVtdVj|LxlTkz(@IRi1lYAj={qk0Uu{;K5L!LFQ0j8qj^8#t5HPdd3K|%4Hz_+Zs zoYy=I)L6k!ZWn_qjEp;iLE9ta9WoNA)Om+t1I8vx3lFykF^fc}Ct^wtIT)8gRw4di zQvHO@&Qih13jJF5{vNUIM;c`XqT_ocw6qc3Jm{T^Xxa06X{jdYv+1VJZW@3#1rE!} zkC?|X;Q2B80NrT8$_9WV5gU=@A`KObEzEJ{mM$! z*(OCC8un1p;$F9WS<&<1HY{bL3T32X=!=1LCI3X|&)!Iqe_`CX+N*e6siJqS7AJ%P z<#}pG%WAp1qHqs6fJH^)BoGA*Kh~=++W+!UDxiS3Y5GcDBlHS1ZQZrzBAkd7&&xLj z``zXMr<$MxcUlM+Nt4jq3v^Q|ML)iR3l%Zdmv+kG)yd+_ygle4iv}{UnaB3?;KlzB zi9ae8HH_nGoNqP4{V^v%)*z5dWIbXfYx(L-kKmT^E3D5Q$zc`pj?JDPA=eVP>GU|@ zx#ZUkZ8ew7FT_r7UJZIAyzY(fZ!L=KQJF347ORmC5_Jb)-8^y81Ov8cW;~Y8cnW*v zY>=)?+$bfd(@&{c( zA4N!6xlp%fyw2Opz>=%}TuY6tzl}Fg;=D(>xkU$Ekf??o<1ozZ<em<`_^~I<{>$+0ys!42wfC zn2u^X1-eceoM%Pe8CYps{_&ZWEQzO}ki9a}#ALU$YFAUzLg{5~bHyD5L*bDajNk&a zr~A&j*2c+a?mt5oDFV!erLx8z+TgCD~LR283Du^>7l3@df{;UXxBlD?>SxOJc^&_|q zNtOITGMle|H&y3HwPtd5UEZ({ZuE(|!s&U*`xWSxe{YrE&6U>WrP~&EUU!Fd&s2H( zw99!p&9hi3Gu&Ywyc`1_gexT6T;TNjEONA&;qF=jhr@ynqGIL3lhNPEE1Pb7Z9evD zuv#S^l}VdmrK7%*$KAmFN><(w0I)BZfJTuq0hQ=Q_!I|TcC5%7a%vrknsaWcNWtn& z*=KgN*VtazzyZ|ikn+^9)DmraGPP$B&6qHn!=Y2C05uZd zE$O;t=Z*hPD!~@OT*ej{029g&h7=9Z4PrEJR62sRj#!Q(2%uL+!wau%Wvz4s5f=}f zZ0ib~P69dDS|@3c0NaCtGkuf&xc$4S`ssam9@^~oBH~xMp0j6W@|ZX+f<%=UE`wPH zE)piF%ZG0L!~X7I+9ET#;mu3FA!R@Pw3wk&z(f;0{JtVGI>1F&PZ8D zVret8x@AvQ$i(%`pHk>ViWgU(#0T9;Bp6sLAenBJQLf5ZwGLsJ zKWg=sB+WVGXp7lu)#p(UrtAt&wvAY2DILol8m((xx*d<#JI@oZ+vQ0bE;-#&;8*n|t9pzU&QCYoYa7pj zUwKr~3KnD1Uu92H%q^@Y84~>Jy%9Y}k1|wv2CO7F|4=hFp2C?e=WF@8Q7^;Exopk~ zsR@`>>aGaDWXTU$r|xT{^t%I1)KCkgF*c= z#{!5FA{qeXB$jv0V2niar6}0{7r@lCQE~Mo3h>gd41i|!l-^+_#~V#djHCOSIFRA(Y_y0@fBCf; z2VIuBnJ*mGgJ6D+K)(Q9o}nO;}u< zWA<7W`16@?9?uzBDJuq8vT=GiS>1j7Ny3+u#ZXNZEQA?$k^4|M%hQfl!#t5b+>iyd z)MxusbL=i@`_1sPN$>@~jlj6b_!mrjFnA8eanT+(i>?Z6>A$ymHoAuQ?&X!85?QVx zG%c;LH$d@ZgL;)+wclvJ*VOzE4Mr57SO$z@=CruMo(D=mP@lUEDpqW2N zm?}XF=I$c$jRgT%{{(+Lq_gOyEQ&dp93fboXULuS7SK04DBch81fz7^`Dn;Y1Aowfelesa6nvp%FdU zeH}x^C^SpV>L;)SblQd@9y=!2Xz|#RmHw&()5*_*%C^|XePr0wf{oK+wPrTNlV!Ao zC5iphiJ5(L7XdTL>0Pjhv0*mDkhOeV?FA-E!)z~>L9ruzG|x`(8BlPIt=E*H;DA?g zH2B8UJUZ&>{14mJRjua3t?%lG^1_@=e=Uxcv>yBgT;R&A)hR|TuF z+xyC!RHZbb!`=|fCNq_T&Q_gpJ zMB*K9tz*&nr0GhB`{V4TtV^uHFhNSI{&p1WK~Nkl(nm9p!092|GxH%vDAK7(x-qkf z4n+0V!^C>m1@Sd~GI(^Ih_5z3V$6I=W;|MojXaw0C5d&`9$7B|F6W;WUN(TQF*7*u zPktKE=GEqiQ_+ZM)TFsnUeN&fdG$uAY8vW@psj!_dzOX4E9p4w1(r4}Xz~jpgpf`e zsks7$@S>EEII7G(l&qafxrp&Z0t;?p%dkE18Zeomci)X#{z8E>51Rc zdHRQ;kJgO!cA@pi_r0VVJs%5$Ws`w(Qd+hyqxGJBn~9Y;B@&mYt|I*Y#qCcOHCu}p zO5T#qjZnSH!|?A%pzKiq23?LDMpQ?IDn{J$Xc1DiX3jZ~U;Co|$M5qzf^|4J?C_#E z9Z4xF_2yZHBi3QKBb*1n1(z|+O(BcXM{X>pP{sL!^$`x)1Gn)>bPiq%yW8AHYf&6? zR!kX;E*Gg!nki(QC6i#%Sfdo!6QAJV_3)KqeTs1Jbqtb`vkCQsw8Y@_NOO&wik)L| z&V6j+2axTUBPzg$f-$}c`9R0hy6I5mC3G%2>M0i7NDGa|uI;5&?v57;h65L_28bMf zd-hZ0&cd8SBrftBAN$AT?YU51O6^l?z<)UO7VHC(8v7hIl&^5!1)3~Y~5+)i!5;UH`WScr3`3J`0(mI|&T z#=Yjw?qJ{~&s*k#!)CkYYHbk?wMh-k+lpB4yY!~kQXg$e^n%?bae-yC_A_gwBmh{Szr^7&rTBlI&@YS1(S&^F!l0a`R zjE{pvF^mfd+%Z;QlRpVJg|iCgUVx9a+;8cCtNC)CIkCrnv6t3h6gyQt4iwcIun_xn zHh{cOZ28-Bz_cU3o?A+kX;V(cFG-OLEoWUAr`+05F{LF_cV`>FzvL&Kef}$obUMn3 z&`&JyE$M3Y%vV(De1Am+ZGQ&=cA{!}yRIo8z>3e3Xlt5<1l9HIRyh0fj#G+z54ISc(P=%em(MqkYg{v1VtRraK z+eattm;Mq8+je3hERl7I4v{t4xuX$X!ETMnONR(IglUzAp(=1B#RRsU#dfam+WkF@ zxlswT=i34V9|hZ#wwqH_$dU+ZxZ!r5R#1GP87Kl(PVdDqS;D0!JnZscxYEZ=AMw~O zhJp5N_l4~XTi%k8_ceqSUuT@myZ4JIy1UzzRXy7dzx!#mOg+E*uJCuC>43$xhWSp{eCsX@f;kz~#1DcwKt-@j;{DdsmeXy+i2AMF0v zW^N2OJa)}J@`(`r-aaOBP1r`nn5#h$YKGJDqs!cBC|0f9jm@#P<&BZux9%jxODar7 z!iS;@Kvpge=HmGJEGlfDD<%231Odp;*8;q=9{NE>aV}zM?FuWo!45)K@;n4X<-e_( z=+5X5`fQj%7v!#b-ZF$Z%%QovO$w=)A13{=^V2av*;TAN1enJ@oHkWlBhB+^?mU;+ zleuG9RhY%;Ljy*P;}4#+h4%WmuR9&ZZNo^>iw~782~<#B(_rEPp3ncmOtF1He@K_w z&%Yd#P*-utP2y1vKYvZ(@!pUD0;_TH3dJe%A6+9wgA7eeXpASvH2#lh8kj*A$hL^r z!E+o4Jd+R|8GRwV&6+A?YmB@CQQ8>?R&Oj;FM||*!9Ji?EL+aA3-cTpy#TBteN8-B zdBACU$zp&6ZgrJ#@C%qD)5E~T>6PuOGD+y8q7c#;T;x~#XMWbK?m=a+s(aq5$hZNqI-ke zb0PfMS?k)%ljl7uKLbeuK=pk+@W&O1>Ad^tZX7%$B8T8?bn|w01%~IG+XOHH-a7{u zgJhe$9Ni;-KYksdQ}zXfV2>)p9C2~ZIohv%uLlUfeNLYT*v$UzzmX!pMsDnkaB!R^ z%$#|&7;#OAHqM1Txe}1bOs;EMU&;*${{qe*1!NIk!Q{1_$>f_}sbXwjyK31GiIC&Z zLR{Yz-RK;e^z}fZZ=i)7IA<>JF|aZfUY@*H2clu6T5hdDNJ-qq)v*ZQ>Nu~fFWNG^ zoKO(YzafIo8S_SG@8c_Vy(WQgwN9M6B4afAs4Ns} z=)MoP*XGzltg25n0#{mHiiBFTVNz8_649~X=YYk@-FTpefj%@8tvx}W3u$u>p2v_g zYmMBFyW@8OrC@|MDQH`<t6sCI$n9#DAw>@iO?Wqd1rfwXd zW2WQ{oQRiz=A4L1{n>@ZrEpQKrjePKKj*_z9>=bW1;n*?=^Q0>D<0gN1Vp~nf$3tV-(tc_a__3zLkX>x) zE|D$C+5s7oU88R7qn@pugK7Axuh!Z=h)HkB%yg*4(S^QG2$?$p4>^E?h#UzFp(~qq z%??I0P*|(*Eq@uB&ZXJFN(_CQ>$au4-|6~R6`N<=k0q$0zQ_1nGAeD4nzP)G?f3C#!}1FM9{HRoA;;M7J6W1sv__coS#N?> zp|yd&4V3N65ku3^eEj&(!n_!Jj4=gb1j(6LR*1pC9-d|q9C^dBvW)lx8%`6}Ruz3@ zS%-KcH<~ht?jc_2vh@4C!Z^wB(Q^ILe6mdXIGjP5e~##oU&~gTr9Gl^IwEY8`*xOV zi#sl$m&LA=U+b-Fg6N9BQxie}1Sgv9GB<1^Vr9iwX9enM=PWHG2Og)%BlcucX>p)S zgZk;O7$-I1m|kAt|3=SfKNSqFLRIzMAyt1fT!iGeK_{zbFF0`4wax?s{t$+8Xv`(M zOAiF4*U058{cG+AkAz#@^d@%ooUG7Cm^pPwPC5HDJCG=eQ~Q+DXQ49GypOI@;! z>DU|$9dUcf#fXE2#EN0|56A%;qgeqD z%`RM@$qaj8mhi{cBAB)C9wAEFm3rCepDjT-;MI+;Et)qkP5y@aA%Xq zg$M{`a2(K*fAFa#6UL~+N;i}3N_7p07nnHfwLqe-ggSRf0Wn7xP^moJc&(i^IokUo zRmsK&{6uuD7K|tBgv$I2S;V5Z#RNeKxM=w$nPY5E3-HUvvPyvTI`D;YttG00SeP-V zFV#87x|7-0q5i=)Px+MDbM_G+f#PTvgp(JmR!lgo-8HMd8Dc45%7JjkIyEB1H;(Ru z#21o0^T?qZg5ue;1|qR&3c;_i_GkgZ12n>pEV+GADq7A;UGuH+eqL8Xu-)5m20wT_ z-hDHg-6TX(*ah`zU2-0a_bBUHc>Y-soBMpNS?P-yC{SO%YUTC_Vt+1^270ZK2v;*o{Y-09P@WAn?( z${i!fB}`La0kH}z5CzNS=hn#XZxFIc4%o}?R%Jbzf zcOPM(^lncaH>!4fUo|ym8^*aR%E5VaHBWbUmw4u|yk_K@q9~`WR=2=GJ*0gaPXcY; zTcClYyxD0W(%@#iL|Pf8nc(#@lY=xivWLsujALKdWzLl5W0Dk(p^T6GdvbRvW)&?L z1tC^-DC6HlEggQ|{_q?f&KK?``g{Modc<*gKAf44eyfEbCdjq1moemeddTPhm>NE0 z)*#2u-c%>+1r<@-g1V|@>Q|R`sdC4Og$(ZJnqv!t-{KlAVg`HRfJA@@MC0vj7}L^| zvyR$0;bj{sj=n`VruFVNsC=Q}dwG`t$Z(p`_)J(zH=nzzT?N(h}0PA1eKX<3w4KWdEjF{4#ZHq&tV9J&65Xj+tOX6K%lzRI?J90bFq%y5MlEJcGF2s@o3FrFAL#Ckw2Rtg=@Abf8!P9>emziC7}}R2#Mr?5 zTMP-CneaGY22!(Z{iH!QFzVu2TAT*812US~rqVrWWFmG>LcKW6K_1fG0=hKtsz{UJ;fC7#JjD-yFVFgjQ>H&3r zBcF(j^x78OjK|_`i8F{IJ9NJXyicGT<9#nRQsuq9mSTWV?L!l)T+?d&3mI}>zFrYw04xSy_CrGg&(0pgbd%`Pl5R>K{CoN(yzZs>=!1oeN^{4c(^=`N7}87 zSZc=_Ro2OqbJatRCMm3xjmFnfPgV9*meXWKyf^HefBM&{71IhOep z86KoLHF|H=&UEkZKA&}7YB}k-9TrP8nO{Nnv!6ajAQe+5kQ-_IMWZ;4k3nuI5FZzo z1k&0r?qg6R{@ae*X1$q2nBSe;5H8uG?qRW(n6BST$V#Ti!E@TcYO!!KKn?-ioHUAS zYU~GB4p&Z@w&XXn$n8LVJ=LJLBl3nw0ZL==^95_j1>^3qJ~9t1Po&w5&|Kj>izfRi zj_ZK^&-}~<^^nR^Pjf=#x_V8S>r)mTU}V#;zOJf#0MjlC3XcsIk>ZW5CWwIN>(XlJo4`Yy^t*XyLw+6KZ zQ_E)p5`&@v>6iCgcc8o=5m1&IroWNw4q1zqWynG0kU70x0nE0Ni-)}H{`X=HsP!1y zJyg@Xlwb}j8tO40W`HdI5gja`#sYS+jq46nbVG4}_6G~+vShutQ0RM?I{ke+3;ic( z20G8UZ6V95tT7!8=@U~SJwOm`WH@rB2UOImLQ20xwCa4wnvEfva>z#GYEerq9zI7l zgNZ5xcqUVK1hAQ@u3=ks1e_6z9{^-Po4=TyR3V04pGE%! zo{g=W#Num$bi>Nib_Z;Sfh|nY4=w!{`{iT%*t4EiGYVixT=y?9vv1G9s-t<{B zVXs8xVJ%<;vSADBY&&(4ohvm2Wk$*i$40?XH6p-vze_V@ac^MnW>>7nB4w4%y-<=G zS;qt6OiH2++6S$c$|9nNZ3PIhf(TB--HA-nc9_hOS1c7sP-Bct!g(W-4olHj6<5%r zDKQtOoL@&s;q3np7f?{kRXE|aSg91Y8BY+yF2roDy^thhfE5b!SDmnf;Rh|b8fr8s ztQ{E_7|;jBpH$c;6_RMmQoBn#&KsO(8&7O+_DykGnUlq+Fc2l;pYy~G z4T~T3(NIV|hs$yT&$8v9NMi7TOFy03%z%~~+1%rB7^sGJa>$6|?)Aev!)m0V?68?T zgAiGrI`vnVHIKQN6hmh&Z)ZyG^>fH4yIUlljN=&5U`Uygim2V01pElU6hQXQ;@P~Y zXf=K|jUg%@=P|oisak|WW$SveE5Xe|d>CwwfzfsU&tkRy$M9N3-nXcL)ITHtbo(fa zycDu+Jvv5ECK2ikU2rX2A8BDx3L-aFRM;mQm*)ZTX%7an1u!m8&>=y9bpoTTJ6n7P z_lrrOV`ZK(Wj$vygDXQHAu4j%er$U!TPJpdBgHO(i``zaD2&jk_@WcD(s# z@!I4Hu1Hg-#DC7wk(iCvF(dItc#%WIBp4x>%9}VJa6h;b#@9Dx@ND6~kD?rRmU9FV zV2hZt;Nw57l`U~_*Qb5bT~fI3IM3tT^DUraPojs>XRTIil%bRneJzBCX@%vs+ik=_ zdN62T{7JYdQ_?2lZ3A~x#q8MWD(WN_*s;pF_PGsbUAmyFm3!qjDN12%dN1I%Yfsn4 z>EVAJ96#>?QE*ubt-KO%w{hkinaGZi6&OzHe8#}GJbG1JO0X5V7W1YY;-u84NmlOC z%k1hok!jm67)=aaR9Wd}1(EyMy$rV`LylRfP zwC2Wl=;gBU9f>-c+;|=k@vu~{CoEe1o!^P;H13~Ew>WY2ay#folU=VQL#!BATWAj6jhqVuSJQ6)sYF+3FS^m%On4Gql_p{e z?S_DglyLoN#o~E5Ye6wi3}puO{~HRpDq#KBkvr?lv7AldXqjO%&?ie^k7z&{XUK$U zYbmqV?gXV--Qf)`(HmHw%pZ}Ci4@~rt1UcR&=Dsh?b6bnD93zmTUg6 z#aQx+s_9_2FOfO#`4+~4v6>b}f+F1f8_}T=iUFJX7KY1_=#Q$^bIo_%0!b82rD0w+ z%ZjqxyB5YIT+KH~o?CyNlVYo{bMmrup>wZuLAH++pyOx3^^N$oJqDS7^vG)2rGKSc zlDQ#abUv>nG&-)nA(0_4#ZUQPbpO#AEhR60%R7*3qi6J!AAJ=vJ*kwdSx!fH8{b~c zwiTv__d?sP#^K>TE`2>6PMD#m#4R*eY@>Kdt)B2{`5a_nVoOPnU*Q6FdlfNWVWfEb z2YW9`lxOOJ5-P-aryMMvWOzCvw1bJhYvY_g6te~R(*>JQ^_-Z)p&ZO_LxSM2Jpowd zQ$GxN!weRy4zcSEu_;mR&cG0pN^hH@uL{fruRAhR zW$O{OFfG+9Dw5AA!-vyuU$P~3=wdn&m?gVi!`u8|ppm(4^>*?D3>WRkgb)h&ra>ie zL}{K{#eiw(NHI2-Kscc7OlD3=i|3rG2E^u(}d5V}WEoT@=Y8Lte z#0Vbw4{Z)AdZ{Y$J(87HC{JR-sQ;>(?*DO0YW({@hH;PvtRy#e)pG^>Ukqsn(VYv7={n96JN82;*OCx_XqDyf_-L* zK-enWu)!^ekGlrjp_dC^=d5<1M4}=Aob;A{2^;n8T6HH;Z{S?!VS+075o!nSY7IMx zYtL(KDj?-u6v4Lqkkfc7Mc;(6tueJU2fv)J950zVUO#js+V%Uda!w1uQn`Xdhru7zGg)@JLe{LFR;RP;*5{IsDQY%( zqX?{*8y!LCW5GIDsYf5smr@Vhfz`);Qz!;3!R0QDx=4>;zu83XFh6s? z<-9h^Y!%$0UrhA?mP_z3|ALV7w__w^bd14!ynJY%7tE8bVq>eKIEtg@-UC-Kb1=yl z*g8PzwYClTS!gwhVll&*6C8^!2X`@O&pyMh(Xwsj?A%M3M-s`QnuI7Io|NGaba<#h zsg(iR$yOo!jd)N8NBaoYTm=h7r(ylDy-jY_(i9nOWYQ|FGG9P<#r$4^Qjlhj#a$>J zk0X>`G9Nj~8lLT91FC8mB?h;hSix;!(4gPPf9(Ve8QCAfa7Y=@Qd5SK5MS8k+<02- z44Y-@w}D+W*~Q(TbbV5aDitiY9NQ^Kb{QK}!k!r|6e}a+LvZQLbx))n1|FAN*ir?8 zW=)I0iU4KS2JY?g!|35-#`xo#@+9+e+6$QD(hh@US56=~rm2?YWoFwo-dP~>2_(5y zQe3mBs$9owCEc<>D_V-1bIGDSMV!F$daSgqtu;jomRBqUFTU(IXr1xB59giWHBp0* z5y08r?(4&|-IshJnPigor`1gQ$@+D z@)ki}SRPcju*3G|Z>V=UmOCTw?_ivG=%&*@&Gm?q)U-%)HZ;|$!iEpCzN8wf3@e>U zEaKVB`kyxYf+O2(mk$Mf=Wv4N5%;IibE5X#!JC$UIDDs}17;k3Kup*TET@4}#l$GP zpqXW=Qdqf;55H+tbwtExSyn!RJ><}GXEEeK5dB3C#AEGmIW!YRyO6Bq|HJ8Ahs31VlMpr#;D;Mbu)JkpIQbnkMF60Be}Z2-gt?`IxXg!Rw_-%FU9b+D6Z78pTueeUMq%1BzCt zhESXu?g}J$)Eo(Uh|A`@K|ss>hJ&mmw}TeH>h>ElQd*A9+kf$aK-+xcP z-oLLdloNM3H5cq&sN7L)yqQ>Ua2+&UVG7qtacfNkz2au)iwOVD3ivX++4=g9uty&4 zjJEIN^xc2%_Nj~iw@dQDPZ9lZN&LU9r(b;ibSsSi_jGIPKK|ct@iFoLc*57#$htY1 z0Iv^M-FQY`Z5aQGwJXfnB=3{{e8#SAx4^#Sd?gkg1KL*;??O6Nik6i#=)era@F6!k zptyOrQ{&QrkOgeb?WA85;fbWDXiud~qE1#Rb|2`NG4$+WcarE5VbVGJ7OB>ugWTI^ z@IvBYIBAd+k5Jc3qU4y+tB#K&=rAxxw_bQHa*dOy;N}IHsj%>YupI30tqET^k^1p; z$&+FV<8RnUH0DIFHVr4BaBP!Vo{iX)l4M0|yFjJk@nD0)FghK8{`I&5*~Nlz$6GO?BSR{A@9WQQ-*-+YDpsg{(d-0l1 zM6|)_43?ke&MS@c<-HMBi#%!bDjeQ0)gAuXwKx2P- zyT(Qu9JEf=PrV-QgkG;@`CANs7napwU4M$qq+NPE(%S99lci|nBpANex6qD>Q5fk` z;=LHZrE5w~@+4iFrWEqR*q|tk>1@t{u%uwN2+r_dfiB6@v^4B~X{c`_5D z8rF`}`vwsn(^MENY`h445LkvHU)a#9*JhJF z6g%{XrD=1AT;OGJongV%t5|X3q+t{d!tnx-4j_d=dd_^iRT|PLSXGD+Z4wzx-Z|qd zOPR8tnn}XOlJBP}pe1Hgl<9}I1|AgXr zwefduQO>^NAgLOrl4T>jv0d75o!ItrCz#CV2_C2&xRKBCkMX}|qUw_dIr_Pc@d}he zC%&*t%!p$x0W*V2$j0c1p=8B5NrBn0`#2;m{wgTgg6Sd3S|93INQ60ek*F^RT#7m5 zrh*S7mZ8{{qavNW%SP{<9X!L3>_Bc-BU1J_tEEz1Z?1$w84ZjP2~$K)SYP7)!R>1n zk=EB$gV0X>>JfVqQ25 zrdcHUJ0t~I{7sI6bVd!0^UHKb%6|i!xyEe=A`r8tZX^!Z81U4KZDK9%Ak`$h@d3cr zbgfb_v#3Q<8(6onDmSv>#sCL_Z9>pADjy3PD8Yim5@6FOr032EcBs+flSDz3gv-)c zxI8kfKoZpxDUjB9NS=Ev8db(jBaTgZ6Z3eHf6OF>j;`Ir3@A%Oh|6p~Ws<6c2NTJ` z!j}X9r+>eof4`)EzoLJ?rhor0{rgA!cWaaW-J)&8kYdl#w3ZKu`?%rval`N9hTq2x zUl2EZt=B_z8C*KO9_qV_y#F}Ch?PkwAB(Xa$~BHx#|_)Z3zEx!ATat6UB*VgFv+Eb|jYM z>Pz-E-JJ>14LnXY4;L=3620U-+$n*?)How zWrda}yC>hi!L2Vzyk~0*n#U@W*o2s`Iq) z-?rOpGO^i-&aJH@YV1a&IUcxXay0UWlfOpa>@@mwGzrv$r0X3d=c#WznV~I0mJfi@ z@Sdaf-8A}U#~-1xeIuv)Y{wbtaw6NKE?Zi*xGCG|qo)_`a+&Cv_UNhiyxO`lD73Dq zt#CCpo85n8>7>Q^+iPoh;!iE2CLC>IMAK^i{rtyf{ApbCm*4XCENFtc+SN5<=~$UH z$0dC5iUa;VSZb`swvZIp*6x%7tFj-S$VvM)hK4h&e9tBzhSZ45`2+^0KlMUt-Y@F^ zxJ6yrrkZwrl9e8&ztiSBHBNRht-(6-x^gd_7W10!4(8*jw6!X+G`A(GiDw1Qj9Lx+ z{Oy!G(5|m;-Adz=G#^+y&LkO&=wU)GhKiNBpd`F#&L=t&UAi*LaKL@M5Z@LtlU!5| zM^2$}4Z4J_BOM@@yP%MFa2+N#5_>|9HaxIqP`Xj{|4`pJ$QG1BuBr?$ht#HeKJjB5 zu5*uu`uoD+yjDE4-MvTop1Jx%CU}S~oD_xXGo3tgiKdLY13Ee-V^Su~L{ACKVlujo zlDI(c%ialvam z_h&ko=B*O}fIJlptRR_lSXdw_02eF89=?XHrS690Cf)UFRi?7>py;p(60H>A3www! zl+7pPKsYf^p&}}#Erxa;K`E8mvBkQD^pnvD3M$%u*(i#m$L*EwOzY8c-Fg^gZs_p{ z?MPvTJ0MWymTq=78|{_GAK44S1dhp-ns$H@j!OYnGh%(0Z!G&kPLv^+gJmai|5}Q$ zRd}%${4FK~(nfT`826kX8<7eZIAQ(Qad=c5}5^cfq#=a6oJ&B)3@zSEKCG*^-QlgbHD`yYPw;U9pa& z_uir-YuOTdD`(JC$yzmzuAkILjhF0pGgPaLm1x3|=3tu7KKpD@JMiFIBrxDvgX7De zui%oag`4xQ%y$u^dY3cM?BYsl9h|V3xJypi)L4&}n`xmZ9q8}SYPQW|ZwVlh? zH1&jcr5P?4Xh)dHZ`sw#uOiqwOFJoK7--ZK2OIc0R5DQ4g(Uf>i6q}~x~|W%TxCYT z<{@DkU{2=u@88qZhZofMUF?hb-6q-XdvY1$LN3bv`^d7EbX-cmScw8a;=64w7K^f* za0`BHktxBG_O@9MsS`o%?F21S(4q`2%SQ2(aNJecs(9`S{Qe5g5ajlNUstOHYhqR4 zISb7Jt}cN+%SLyW2IB?&3Mi+|z@^4pFc|!Rn#(IZe&RiSV$8uIc?lO+h9rJs5rx1ksY$+pY67zN3m)FgAtx9_3UE?Evdg7(li2jQ?JO%$edjos z7a!U@%-xZO@yZ*(VSq}Cr1v(ty~*-H(E?IgbfUL6AvQXDU=XO0i+wYk35K57I+=K# zW+5@xj8U?HIN=+uigmpMvYF(o9hURG-JF}6(VdQb zMj)A4!!UF%v19>2R-}SzgzwokwPG|Zp&Spy00mt(Aj|pYg}Zv3_p)P8~unp}Q+j zQ&YfJ=qh%JXjx7>*`J$gu!(5<}X)IuQTC&|?(FsB{GYFfX&@;G}^BgI)-jCABMDo$ml-LNz zvK;fo>SD}PPUy9f%YFC|=GenLBAVl8F9}$$k_nvx=gliM z#BKV@$w4xzeJ@PjUz6JkaWEIX>270qZe)bCEcYka(t=SUp zU<!0tFOo_?7|K9U&-#63vuG1#-1D|nE#jY zziwr}KuJ9Pn#6dx?9g=|r*x46vL9tECfTkA}HUs*%RXgHIU97fnL zxsC!-um|F>{O7|_7JIagPEDhhXf^gTgtWlxn%>rwXj``n3d5lS=+=Xx-FRSj=z4U3 zXvqX_U!b%943>QBFMs)q2MxS@hK$CNB13+Cq4 zo||ubEIXT!Z7NS9+5@_GMP=OMz6#{=7o)(YMB(h<#WAA4m6-v5;UkY7d(aG2xL(BL z*s$U(^mL^<71tqiKU0>iKPzn#OO6d6?i2QkTVS8=_BbWpiN`l9+D|oe5qn->T?=k` zLXHNo3Xt#FX1j4aSP!E-L$AUFrMn7NV%@xW3oX3st#pbnSzl|_NyAiYp=X+JR0e&1 zW*zcqP!RT2yw8FW@|^9ilMy+5VJYnNrKwtkSw@HvYG?;vlUcvp7JkrJWxMcCs9SS9 zR)D;uQ}Q<;j|#e$Tq=N6RwQN<8ForQF2~beuZ6s0huc*iSx7<&y1gDEw)A?M(v~yH zdY4GfaPp)uINCdre>W)?ZK2#uc0_S-O0#uX*D9`{>fTeT08b!H-ge7z1aPYrrKW2U z4TIw$QhE2XES+#Gg4i>WphZXyNK;^)Fo1#AohFHqaJiRt>o~&dX>wvkIUz1b;{b!y zhgY_>Q|UOx0CO^!!&%&Cw!}o0Bi_X_YKTr~x+>LH_SKgg8PUWDWfLP3PK-JZ$_uR1 zci8B6qwt(}t=`bRjEuEg&Ch>oK6>}4`Af45{9dwnvuipY43@2FU#jUQbqmVxToNVf zM8T3{i&9c|6n1+!Wv$nF@UatOOuk%htQ#k1ioD){BZuZz(E&sQ{aSxjuCn?1k)a2(D`vfECaR!5^?QL0Tjidy2EsR zWDO`etiN+tuRxt(ds$uBGaf{teHATV8Fy^Lj#rkV*%h zMUUj(9xM<+AJVnpZmC-8(-9}1EZ=cJ>zqQVX;O?|HkVeoR9MS$igBavsOuOxVd3o$ zFC|<`)cX8v_vG7yGlI{4v3vCG!G2Gkv`HyyN+F$is&28}%9`B9OAW??+jTT+mG1Qw z2d}CW=#XXBRfF~f913q{jMZy4GYfupNVL>(#X|Z><+QDKZLD%$8JeMg4Fq>b!+2U_ z?wy74&doU|Ib~FKnmX>yimVQkW|pZ)vQ*UR5@uKERV`AhPUx#Us%txPLy6(1+N_x} zxyA3emH9nU|HvJkgR1=qZ~03w3OETk2>90>1CD}dXT@)#`vz_1D{S)pdV&mILO-Q$ zq@Bntc&Axr`b%@6$lxS*GRZ1T?C8$R*nx!aW72l4Wl$z+j_jsv%eHQ+5y%V;-f)>f znJMfvvX^D|O)R^jgojYVL+#)rrelQ()@FzTo{K1zo+V|wymfYFjM=t?BTnN&NnF^E zFq7**aX0?c{V29m%8Wo8F4e?@w9D3zE|*%W*h}pQx^@SHS9Cf_%bXc%n(s}IT|jpf z*ki;oNUmZTn~FgOqABaUpPEPyWX1>t9kLKiCgRfF7O)ow=vSB)PWCtwN1QYh5-O$4 z1QKFCNy!dGB(oHg$*-PoHpr6};>Z-K8?I>LpLE}oqoiwT&Dojh;F`3C7KIUQOFI~H zdCK0ZbfWEfHn#_yvSX07&4%$=@#Oa#xZy0qTI<{O!U!apd?+iz*(#oUP7D z8y-vx3-Ai5@~U&2;vJ%iRpqr2R3Y$y$ z49j%bQy&iEUjJqgGDliUFj+QrR^W9TzPJ2yr_zq!GyoA&@q0JEET-}7I)LzB&C$Rv zE7j6tku}Gq8OA!7g|K~^x=V*$f|5#Qi|{MfR9Q|nD_KFIMHonjKflBdkz3qZmwwjx@(+5Bj0xG+;bXkOCLeNK?DlQWhzcnH7$rI&Y9{ zQ37+bT7ee#HWp`N>~*42+=k#J@WQF$!B}4121YDo_J41qU%8*COJU2aWM0m7x@xN# z4a>*WQSw}ctd+aAf};4fwVFUyC>5%2ew<+VC)94q4o6EsNLZeSI^Q9goyj{KhW~O;=moIv$Q%+dMm0B!}s$TliEJp>2C3k!q9il{w{KSgj zB29&KIgV@XnaeF7(v6CNg#OV0kVJ%9QQa}6-4ATi}%qq9Eas<`on>p5G-f4$> zo+xl-8P0n2sDhyhrp%hPT9V_<5b%PvyL`Rrp~_fx1n{T&#W9zn>zxhjsK#@2&vFbm z;gJGW6W-u##y8CX$qHo#20K&5ef?3O&&{tm_LUs6L91l!n()7rEJRVA-u!W)A--hB-0x*qMqA{Y#y+ZZ)yHXAuM^(4{cJ7?7N zfjFFC_k3eTWRV!<(3c5%p(&aeULL0rBZw-75XnecJ802BvMEaNNiDF>q8fR{5>=Wi z&+CjFS%7h&1~tiY`w~*TEv$c_fc%tvW5~*aT1oca6b;q*)^P~TIHlPqur{i9&D!b| zVkk?lIB?mWqzL7M;O5;mmBkz!{Y^O&m4`d8T2^2pjdNbo<0^0u$aLO>1WnHtzig%@ zM~PEpK)7pW(o19=W#i~N9VFSjKue$FMY3s_8fGDt5+w?Roc zdhQMxo8Gl|5)mayFk|kF+x(7;F~}v|BuyxaqRwU{W6T)oPgV|e6c$-Np7*bPnW4W- zN;BbpZ?)Y?cb0>@WY$9O2MX+hl2RZYgFGJ25S1$-6EbYEX)S4utO*&~Zmp3zP9yxH zA0JLkTrk8k5=8nUA!kuTX7PSls&v>KQ^Zp$sR@lx7et^9vzr7c@4%hMA)4iCBpB^M zj9AEBC;=5+YT1T#t_8by80UY1v!O|*O3e<3orFRNVU^*&=#If#gc0EQaNw4<2$j4? zECq2yZ1V)A-^MKs~?6 zKs1_zVoOHX;xPVqwp~mV)pt9W$2%CNNvL2#8pw+%>r6S$$1-U3n5W62-ScIR1qbq zLy~Tmlxf1jggm(n8Y$*u$1{QsKwYY~K zaFt;>93>3c+I0-OX8rw4Fzb#OrWyCxl%YT)l-_WPpd9NH9J5HB`61mJm}P^kKe;9{ zolg62gHjk^H>t2Q_1n#bcwk#YGVh-BisYYjw3U_qdf8+YF`Ux{3$w64MKTQ}9GrEL zXAle=c2j&KT(<~IFfkX09v3ZxNY_i}gpgxJ@9fL$V{}{ts)_1kVQ{FOL9h?;ovcDwc&&ouAPT41_E}opK@I6 z=LDW5w=!m{QIQvvq^PrYe1T6W;4nab4|fA2=5veyI9PA(By2BQ1US&nQb9U`n;c(B z%C%aqt2DTDU|4fsVZ)pgTd+HPXEr%ufUc1NY;&6pVNH^CV?r4X=Hu~gM3tE#V=97r zE^{5B5*>ygwNaFno9USg>Zig00my}3yydXOYhx)F?r~J?#52|dLM~NTHk+MhAl1Me zH*q=(l5Y4Fb)gFrpQWOst4f_O_7>IM(kfxK41y|~WsfCIX>^*ksYN-73kXjg^a^%l zc*=*_F;9%$rf(_k*_2qWV$ax+$nA5(qF=jKk~K4D7oums1y}Dr+u^YvKamr@=H}K} z6;F{rS(rzs04ITkuOZfrkxUo@UR)hAl?_o@ZZTBjObn&`LfEZQ5Kl`bI%P=Nh$xIJ z-!$IjF~u4%XR-cq3gH#=Cvh~gSN+1t?u2(hgE0yg=@h~Ek!mL!7;vkiwST-Uz`5Kz0a+rlID_4@Ej&5Wz)F)mb7l2{z4v+)8==?R zu7cr|)zuX9bZ;d$9odVI$Q`Ur-yOo?P@u=b=1tkvt2InVvm|dxLR#MKihW-4AMic9 z?3{Ao79yFATCcxGxTc)Qb!uSL6DuH;Pdj^&A@`YnRQ1%$UhT5nEa_eROeI>QW`Z_} zYpg-1)Q8ye)v7{_#j!Dk?Tt!(>qMIiC9_pN*Q*vA*v;6vAxDpr@`u4=f~Sn!iSc6s z?KYhDgx<-`Ip4hC|9gq#ct4yBlJ_iUp)95I35A|?y#If2w42uXQN!79m{M&(D%099 z6%1E&3A^!a#h`H$!$xmy+g6t(>?RVo%}}we^sQI4TW)zIe4#GF+}hG4s5chWk)C#B zT7VX1wb%>#A zkv?Em0mpTq@jMKtp~l1}6^YyE=$4yKnZ1Y+WDpWywB*SkeU}axoJc(IOi_9P^vn&7 znA;0Nf=2RkOW45M$t)SVC{Pf_N*tXv&Fj38j$(+_J6~ZSS zXxz=tR)>7;cO2wy7rB+Aej8POt9fv`xBL2_*@>z%R7>A{wcy=lZ?wu*6Sv8z_;yNF zf;T&SwX!+e;JeA_Y6`KEyOm{iH6{5Hyp?5k`cc{4DgrxaV(%=p(@(t~eBBpMnLscV#h%1;FJ&aSsf4(=IU` z4s8JPNPf%|@{Mnu5r@sZb&gTlIR8rWYy(nX%^VdTiEPB$2KRd%0=ZxLB;(J81YJElm&F|Y-}@+fhM zMD+OD!j=(OZh3WKDD9m{0-7@xD<`evF>4HTT1lRxcNCPj)Xq8lu))oaTtV)doB6#R zVju6CS7vudhq82w-}D{j#D(|6SkPCTgo8f(@QfA$6=gFx|Sd*yK)Shc`ZN(IG@a+E0;-ntqtfs6TMI zHi|B_cX-y@JAU3qSx@*ctoLj^8l_2Wol`ZjJi#4oJdj=QFZH)yM7m&< zd*TiD9};-=yU` zVMyl64;Q&3GqN}>OxMnGk3DZbcf4P=-MAq6(3V}8CxoNttk%-OA$jf~SZQ#GQyb5e z4iM$8%z$m9yGx(JADuw=A;49)p_b2iQla&c<}Fn|m842+O%$L{m8!vqph{B~mVU<40(&AV0%&+P&%Qg6mb_fd%3^|QHyrA zkyLp8b?r`6@m8f6?J_NZ4%g{d?DXlxRnk+OG&5Z?Q6)*q{oOPe_Yc^b3&FWQ@ons5 zTIpB5ys@7MH0Z9)Tmb)7yL?x}AgXQ^Fp&?h2leEzrHlLjNC6czzv0`JJ$D|tWVNZAa5;gr(C-SQR+ z*xQ3UdIOW-_h_RZZDf%!+0o~x$8S#d4lr0an(|9@9t@=`maAw^Mxf#d@PHUkv2=WP}7(j416We|S;E8##>vr26GikJOfPPnIT)X1{5jYQ2i_i zsx-xL=a9fRp5g|G2So)yCMi=UnWHozJ7%J~@i^IFm#i8O!Km^!AKH(9wbKm_#TaUC1_#a5kzXeHL}J&a}UVXA;ca@x+-OZUdO|eq6X5u zES&(h8+LDHqxl{cNcbu`0>iveL63PHTba`JxBn|#em{p>EvtA}zur0>V!!_i=C*v% z{e)tk%2Qq@=t*j4k|-*4?2xWXYIf z`@sa3+vNvgH0!X7eTNFD)iNO;MbrJ}x&}lBD!~CVbbr>%geWH_bHd71#9Ma@E8%y2 zK%C{%BvCo0X)+l*RB;+K_Vc2Qv&W01A9R-1LuFip6>&gHQGx%#k#eJ@@o+0gGYqcg zkEJPN1p|7s#!O&AWU-dY{ajM#4OEZv_j(^ap$lrDo!0A>>$<~)Nb!yN1mglsdWFUz`f zCQc8E)&pS7Fd7N+w^)9MitSMT_ELS#D$PVDos#V+SFi_TmE_H8im(1b)yKEf@~ZK} zZeV4f#Ec%+6r5;7bp2;hn)#<)zi|KUm~6RX2Jh6G7OrwQs<~Y_6@da(b);HrM-`!! zh;DyPZZESq9~`2gU_PJDJRf6Ix#w2wWIw$vX36;AeR{`y2kNd{oz1v%Rgqa;0SM!# zKU)q9gr9U+Ye#4b_b#Fmxve+LdXA}}`Wk69RrHpEJ4~`cA`?)T#IgQc{UnmSYW?>y z5Pr_+FM#p;z)FMR$Ip87{P6o%2ixK;g5XCKQyl|8-oT0@OeDCVT&9T1!jexEA`-rF zR#BX|!U~4Jg<%0cI!~<9{PLET9b-XN6k}b|$u^n_%`jj>sTh(*bfZJy)ICIFTh=ts zF5!qFe`sRqXC~&n4OTHWWl5k4I1MYxA}nhP7OnxEIeH0R7o;!~Tw9_+Q4bEo@NHb2 z2@OejS?QRB>*H*Y4sW?p>bElmR-giX9ClDkm;L$H1y)tLSl9-|VBPmFF#f zSS}>Syj3SqVCP#2Ft;8(L+?hGu-^7jE$is-W_jFCDCDR(R&xxkG>lkhu946kNIb+K zqg}dZl04d@I56(Om>?}0ItEN!4xt(0yL>DP2Qr~&46>q_Cn0px$k3Rz50=NQtQ}R( zNRa@h^7`+>5ulEw ze+`Fco=nt@@g3qI49E>440y2~#Z#E&Wbh}}fZ2dEq$)~blU~7z;kV6N0JefwTXR2a z-1m|3wNRSRSEciKiCK(|t!IYtVmY&3s>k=)mk~ClERD3}c|KI58+q#hx>bT&Uec3a zizbQAj66*3A#h4j_b@er#7tCQ>lbE}*&1MXq0vP{ZolX6JrQ61tiE3`tG`PNsDF!T zr$jCC#wrvb5PIy#8yo2uEiyQA(~=w^uDl9Zq}Slt=Et8$8dvY)4r3&&HyMNt&>(R| zW11>Ma>Gr15*r)u(nO@?7~`@fgZ;EWyVE+LKzVU4aY*lAhEsQ!JBRG(aER_8;W$vS zGOBNr+XAh`y)c+8`6va#+;xI+0e2SPQm|b}D;TpHMAoVuFaSG3N)A(&Otz+3kuvHe z$ARa_pKS*Scs_w~bijQgri0dbUJ-~)#B$?j&*AF?4mVF2Z&+}UMgxtPEVd{frVwwb zOT{oG+YUia9T0*i$7sJIkJgD+5jI*P3Ja{MKW}VHH+d&$h{hSwn8I{*l{BIt4NyyEqc09w7-HLuDZ95U$K3kCy4wET-J;@sR{SGXlatN zzPWVmv1*dl!eI?H1`|7!sxu8g z6x?jYVW=xG>-_+zEKZ`fVV>%qhOWl;&M13MQAW8fj zd#4M#vmV}8=LxuQtl=W8_U+9|mC_L)NVvLkG2oq+{q{z<1b@KZ-WX5=+giy$Z4o{^BF=QiWbZJZ9iJ`Y} znO^lGsm0ToZ2y>X<-;FgrjSVm4wbWxg)dEK5>*xz+m`}?D zhK!%9DTz!FSpT(DfY6Wpt(GKITLZ>@V}>bL7wL+!$}Y3YHe4d+i)}wi6BYMetxlCw z5ZRxuM=vQx#@`h7a~qAmg#x?A1N$FvU~pQ71ll0ROt zn1!EB_n}Imzxw&Z*49@~pM3e%7f-h!-PTuIn_vGS`tnm$zU%{A5iZct zA1((;aurW%(=B`dANpA~|A+f8yVKi`s{<$at1rH&oByX@Zap#czqtkJw!Yr{>Z?CQ zo1dnOi$DL%=N~A#SikXvcVWWU)>zM+4dw`dg<@kpgD|QKTs@LWA1+0lgBU-^6}8Cl z0gmBhZ9f@O*d3smFX3pJ$5%<4oiO1XCF5N5*V)Uaw>e!s(eD2Vtcni)`-J{|O8{1 zbKwd+%o9=sW2D}zgx~~F#WGFuf=+)_wIjwCdk9T$jhMa2fZWCcwFJj|23HP*xE^GE zIeD2mEcu~SYb{n32D?a&0Hbeu{pB)Y<( zEJRY&p|*BJO$~n&>fCgG2{+X14wN!T3y~Rgknta-{bW+Gqtau%>mw2)l#yDUfVPmG znJ9kK6o+;ugGF`UT*I}{>kq}f$tO^)QAiaT)%g!GP*@lLISj`_!a?+Y5{!m*aY$ld z?!H+vlWdlXe2jJPc9P{)aUC;qH;-L1SmWRpPQL`C8&(3|7SZsNYypD)vUl0pi-YL& z`1#p)yC(~8dW_vGww@6F5Ilj!xElh?fq?C3pK;D=-_YgBRYMt`|>6AaQ6-L;e^__cl`Q?lf!RcoJB8= zU+y2k!)Mr>mxsI0ULJ6DM?XY+FLw`Lb)x;_8&?{pR$*wrBrf_a&5eirL-7IJs-RUY@*5aq;weh>TX`{l^K)ZIjC5F#7W8mtPan zdA#{Y_vEoqd+^eI!zmvTWVE<0`?zz8v z?(d%ayXXG?M{|F47=BvjPaft8vw@4Y!WJwEyQ#o4QuKR<^F{P`6uKbZV8 zTo*r|{qXwW<>9lRPY%94JUu)4;phFQ`_I2`mx_j}q4dh<$k+WGF=zKEBgD7DgV7n`S}d~USMt1vM!u;I;S`Y51tPKF`!(78K+tkE`%WZg4+a^+ z{fuF^w4Mu$IWta2ac*G%2{sn!lc3k#QXsZVG}Rda?R>{1WQrxY4qeZMC9$}o~TQ=P$r&Db)V=10D4~_s*K&`+2*1RC2 z`mP{CatfrG*BJ(C87L59qP@J3G*~K>a}A^rx;V{~euAfN(v5bJyrIpGgv**wHpub_ z=puT4bm=PT&lZDV61*3587q(p@r;uT?8r{obg`eW334WRXihohSXcbHY{IV8gU(;AX`4+Rr_L(6aKau`MU_9Dj#9EywPCIZ zHBGFIIDVfj8=9&QYu?mp88+2)w_9KXTDJD~MSJlS(571B*6(@ERG>*%+2r;zqUcv_ zJ{&c+8&OLc4GETo4%+BFSR`={zY78-kfLSja@fQ+ZwYkUCUa%6P{Z!;PD{Oqs@hTt z##Samff$l#Y(0dXUxdwadM~DT^^3$=sxH8V|E3~rS(sNE$jw(@=ak%3oTY#~WZcIn zP-OLoD+3|OEzl|1IXQfopjp(SE$xN!ohAhMfO+co$qywXF8UpGjB63wz4i1Z@IUacrJ`7;gJF-hvI z_K0MmlOBWo3N{D-5M&Pip;nS5?>+ap#W8#7TIZMCGN^mr#CR=UHQ*zUn+u}>eSmWD zU$pM$uqD0z&0sC4*>9-%(ju}ccDh76DxvO!2F&JD42+WI-IF0K1sK=?t{5tdK#+07 z>5LQZLrX3tyeme?mjtGy$e&R5mf-C)+(wmUXvKUuOy3g(Ow|1R zOY$tN~uu|2= ziv-ebdYb7$!Gr&L|M_n`{=fvTHU&Y`e&4g;al3b%2Sp0T&ijIpI*fy0%=zw*FZ2p( zRGpah&4gT~#u@OQA}uL2GUc40IcKa!d|yNX$@E;|<3X_vzel z7B5^gWRQ5dJZC(Xj$E#4nix;Yv!qp0F|^6~7}vf}Nfd(DL=LMd91de=-B9oipbnS~ zZ#K9z@yf_^kC(+xgjYoPcRVd<5#UX-Yn2se?6!p=wDF)y7#GIf@OmkWF&HN~a#6

9#hgCnR(U2)dU$aOnb?_Q2`Ip~_&CEl=9W^vT1!+5pJQ!BC~ayU!yGOdK- za~Zac4Pc-P*Z&L#0#im$yIWsuefh^H8(ZBcTk*z|?vtqXU2^$kFZ#OsW%momxrX_; zIFgbeC%iW;-SW}(z&Giv~oJ4>W4JxuX5?!S$%Rvf5c@%x#6(bd`0tBbV& zrOWetlKyj^a8M&}#=0DXz141ej(pz5j@b_7|2kDXS?F+OI`0BjT%L4ab=@|J2n6%} z2&@Nv`8#AnR%2`~_m|04$~zcpjo~(o^5kq@JameZh|#!jnWmChE@u$4NpeH)jRc{H zT4^`wMwg>_@-`@?Xm>c|G>~x$6~f_6xH+N?n!u1tI3}AJ*^`V>`EUor(LRH75aYmH zCc`Y};c1?8lH#Hn$u@8PgtQ)K_ZJIY%L7YBBjD|xn7W>(5gfs=Z0IB$m;M~t zL%*9G@?CC%689%h;I4(-wh`XOz#CFA=m*`h$`BM7`^Cv+l7dS0QM&fFvC2fm>`$tSd|--VqVw0c*#1{%j7pGu?3QdA}=8 zE?a5EW}*GyG|R&_&<|l8bSSQsP1*$lJFeV-73H44McHpFuv&9hPHWP>ligS-5}vf9 z07CX7M5`+`q)z;5>Q5JqW~<-nL;Ks2L5^5;Xa}i|X210*JLLB#T9yY}|7+G?#vWde z&N5U8zC*6Ofy1TBSKXmef`aLCw97y?tm9iE_(u4*hrqeD)c*8sIwewrD$KGB5;z!O zZ#oLXOQhlO^m@w5i zC?vtJjpV%Ow1iL+8vdd!jAJKj(I-m(=sELlnS~hQ4ET z_3hf?+}JbuEz@fg{oJMD+B-|Uah~W7r};vYqJpEV-6XtPVSa7kRF=h0d~*@~q(*S3 zgG~cTB7sRKYB@uPQHQiUFdU`*n$}X2)Xc>PHaoNx``FfRePrv)Z_GM`<2#$A!0Gjf zz6rV-eD#60PvKTSi1R`EFN%{%$1U1mZV>yVh?5Sw_8v1wekg^90%pmoEX>h~<`WVt zun9Ch+)(U968lvHZX-GHup%)n0j{Q#c~bP^o)m^07P5Wb;v|BJ>l6CPw4E`^gUx^s zJGPuaeAX2!k`^W_@t)~$L6aCyxIBZnFt|9HR%gWD8dPvKj?ua z#sE$kju@K)dW%^IX+|UDJ92VIP!nRKdM}(Z2s(3uXP0lHMlx-Oy;9{(*Ok}rVfhPBZNDe?* zojO&#ZFU>crhqPQb;U@P*1?9m4ki?(s(aN9ve2jHv(ufZiCQh(^3!HmM`VmGMEEzm zL6gQA+WoCwc2%)eX)BxXqzjkXaVCLxL}9_b!z+LP8kTl-@d3ws-XEpY4m&n(cEA4d zM;s^mc|i?@MG5>EY2cR)!%FonE|dsx&zbWI2w>HXr4zF_%A^n8^vE`B$8H0Z7POkp zj&lbniybhdg#NI%O))!ecADi3*rb18V{$Y)Zz1i}GU_4``Mp#UO?O3VbjP&f{2v#g zkZZkmdw8XKsr1wq5Lf&oiW;9cKyg;=n3WOT6Gh}Ko5IQRE*WWn z2&Bj+Gien@7+Gars3}I-a?A$1M~5=at7f;iS`~%?V}j4N0)?2bptX^Se~h1%=&$+m z&v`OWXi)~wG?IeV91(SMr>?*lQ6%bh=(>jPjS*#3v6}Z!-$bne>?ovL8+h&5fNPcf zM)M-4fYbiLA9}4vm~E%*`x-GVEAFyFXizK%WrHsu0;!s78^#J zfsTGO_mLlo-D62)Nxgb@ynnpS>Ld+Bl_)2tdYmjzuh5Vy5^r?1{Buu5m?E|U#P#4j zhn4X*L5~CNl$tYP7tRQ|lD={oXbNe9kh+)X+Uqmz z^|HRWKJe>0LinRR_>yhMh&yz}S`q9&lqeVEB{d@ltE{MsyobiFu-@`yC?-qfKeE9j z(b^>kZFA&RpDhxo>4)!>-$|po*t%BmY~MI3%cIqGYK^1J^>MyRVCNEdg$o?}upK=p zjL>sHW-9oo9c^p~2cFG%%TeIjRPF|=+-7jcBNbCYA9J5DmlFZ34gO(P( zeY*KbB#X(m8#Q*)g&hnp(yKWcd2|Am2W0iU&gk|H)CexGEiRe)!vfW1*a&!{9)V84 ziVyG#Wch}5*QJ-)WCPm!)`titvj=EHPlBkB`Gen$g9%)_Ok&?8!wj3-&$7Kli{vaY zyV!tE)UX|G7^?{x0IKcdMxmj@(D26LHaepFt;pPoecc!*V|*jx%}+Ecc-dNIK+;K% z>*pbHyKcv&c$-dp*gmmLbRMWE?;I;@cG+j3*)?B?#Y?Xy{96D%#NB7Y@EC(j)c50_ zV#ueipR>#F!vY4EW1{eK>P3rz+#HO~o@JrjDXU`!J6*;K0SZEh%)pIx4Z)bzg>``? z<2B=B+E?3VTcz=>@&iHYPbacwzO@*hk`h*MH?y=jAilS3Ek z@23_MN9uqpwa918O0n|Jq}A*`Zbpxyp?8M)bSqqoZD+aGCMqx32CdizELrF1WcfH6 zl!sJ$!4&ICmDzB$R!ib#u1AL$#FdXPU6P$B!eIaKG+zHRwzLv~>-p2^g z*CyYJ7*9`EFShP@P!TQZ)-7K5ED?E5)tVhK>H<0tb z%H#2vA^{7+Kky8Dq}l@(Y#exL0aLuRz+dt-pY(!Ke?}ERWzx6 zBhK&*p{!R{^Yi^0_$bn7JD_pReZ;W~5~-VdlLwrm7M#Z$|JvR7ug#4=_PQ63+8phm z!+*-}+7JKK4E??PdGf@wD~wuAjF=#mwEu#IRaf)#pW40?`9~PRud~?;n)d(!1v1|f z^|)GqW_i&jZ9B}#wWDHBDR!<@(S@7%DDLBp4(-z(yS-gxsEvi7m&u@6w%E3JWhw>k zqt#Xh3(Ajd1?kcn78rKBW9vCv1$MdBc6W5kH0~3w{bj8MXRX<&`TEGNB2!#i?u;=< z`*+-mmhu0(y+7s}umu1Ai?22V{qK{_Cr|G6zrV>REK4yb-P9TJ|=on8i*85M2bimFhsI3)f3`W@@vuf6@ z2nZ2U!JzY03Z69x{{>YZ&Z3(b2J$+)N!}p>?%dT`LjO>0aF%5wQNtE^Ef5V@!g?>n zTL<0{wIC>Lo0FyzuSeM&Nn|!1CCwu016Ko7T1t_{MdtE6ynKpstR8ndOBn0>=r5q% zsqh0%Mx-&SSu3np@C;ogBWb^DGOf8_=l4kGzcSMK|7eJFULv&E!u7*M0(Go*Ya^{H zN4F}v;<;nvV6ny@qXPh^0=a;R7<2@&Y6Ho*719t^pkP@ct7tb|&3Y=^shfRLex(u? zIQ$Q)!VMa7F9A9#k`RK*g7+STi!r_DO7mk+B)!cn06%4P=}{6lr><3s>aSS?WymVMUbga<3fxF8`E{H+0*M1 z^l6gZn7;DTjIN*n0zOmqi?L-eC0MwfXrm$}>=!cFKvzw=v8fRTesn1m4o(~30BI3)!_BQ#QVA3B;B=V~ULxqaBXNIV7YWdYLpX|p z?-l5tVld~tev560zgFFrFicR;4C=Sn7Il=KON|B}*_6bZd~JYn4dooPs%0K&@kNId z>vT;BfsTt78U1-Ge1`ec#@=YzY%C+fQnyz=x)M#3cf{d(AXB$Fk>w#pLegbmifP6E zMczRn?6C2HUjMBrQH8mJkq`9qbjps8ipV%5glU?fJRKvto)UIhdbSWYiIAZ&3zNm% z@)Re#yp&w~3tE;cGjIF2hyrRT2fc6n#0$U_bE$aQGPUWLgXp+t1Y`Y2(JKlERamNJ zO2LC3&(>C3W9%4UON|tR+>h5vvdi&+4_ks7O?GT})*)j--uQa8;!Dd2P{ z4BW3N06jwU5fPe2G|@BBlwOyvAp~Nn}^QxWorZ z0r}?|8td-svx@cCEM$Qg5~!Vz#bgZUmZ#GrX}mamW;aE`$^imUHdj%mC5s166g*zu zoE18dSL8Ib%mB9rY`ooK-g#!CiqQFm#*IPA-p*wKi0}%uGcEUFOEMMG$r6l>oN1KM z+K8*MDbI_)!X2YbXR;1>B@_bN(8<}k}#Eue=Q zfc*R{*=FLvw2T4KlekD>L1XkbKET3-sZ<4c52rK~ao`fal8}vp@@w$rL`tQtYaz-9 zZijdl;|<@Ru3^!(obBG9s1&OeL8-*i-LFFy>KtiMKu6$M`c$b5R7v?W3Y4}Z`pvBU13E&aw%pm zwW*W^e>lfiY)?yj+pXd``6O#XqqHbH7|yADhZ2D>mM&r8QFm)NCFY~dWt|yNtw;~o zY6LsN=kWI^1=Z4JW1m}3ff@kW3!jJ>ums}Y7?fu1Lzbr6zPsc-bX&N?VqB#-r=f3e ze3W!(_;p;wvso^gn&?m!3<-Lj8IF-bi&5?Xk32DhLp@P8i{>oyq*bD@Z7*kqoeU|o ztw&Ti!465+33j|CN1ZTL#Yr>@s66Jf3I(ru2K%oh91aL0?C$Z@Y;NK~7U7IaA&`eh zs7I(Oy`E$>znulJ*s}yb>%#jOC60QOkl#}rX`y#%iATT?{$2W5R@S5ge&J>)W_fLT z! zI;SMcY}q=hNl7KVIwg1UExm{-mWd%BC#E^omS&Xx( zB8J*KGm4n%LiovvBZ1*_z-$O!yP|P2dQm@!DE-}aluf*RSPD-eqKpnx&*>5toVPF{ zfv|6j=-Ff^>9eN@CmzMqag)E;B|^5b5?tk3@P%Z1aKS@$fN}=D8oXl-bGSRo1q@W- zKETkPq}ZNjP&9J|EmncUrR!ZnK~FR69-m-{#9eq{y{3V60jpl(5n4xGT}zOlx#P}k zjH)XHfW#AtW1Qwu#ks3!jGtV##$ulg!R)DB;p>)i)(!5(RJ_^%p1XBo+?JBCfK&dbE{ zZaQlsZ68V#s0ANF4+I^0DLF81%&A#TXkh0`L8BHi3zJNt?RoBT`{%g(^7Nenk|Y@k z+G$F>l9nhSP+V(;zi{YK3^CqaNlGk=00TyZ=|^yv3LDs7s==;V$ppX6Em@7x|G?lX z6>&ejY8P%T-x5V;k6j&FGK-}NRBKt_B#cdpD@;UmNT5KY>%RnVcRI@Wa<1yn?C(tq)N4&xyhBPlr!E22p|(S8jaD^DFt_;?jSOBrX9?|3O& zdZLK0fpkph@n!bD0gH&OQ+e3|8PC&!g2HIBmqnN}P ziU@P+=#EHg#x3*4k0abnqN_ZcPovG{%QcK>>A9SAB8e?n6Vh^pc!b^#_@Yfz69tlVXZz1sC$UXULLkcL``n4_2>|p zRZF0ZlE|e_MK?N)F)ReJ#|&!`6eHne3Z!w@NzC!WEl~6rtx9LR4h9KC)=#ArP z%ejdbsB%FNZEUDnRfQV_?mq{$*;5)TD61StPALuK8vaZsdi}(*=`kFh{TZV$3b$?SFs`~hrbtFx~jt663@KpGcXrPe?Bvgg-qfo1(Kuw22}4^NV>qr zWr3ZK@8gN@nlBHZ5$q1LW7DFP+`t)kqK4nehO43yc5GT$h)`D+3JngmOx9J;HBu)d zD}kY_>~|iENX6~HxZt9uQlf7md6B+(7F3?YCWp|(B9T~G>nAxdApdFPii?eBl{iCi z`_dWl`vpodyheH5^fX|yd?>^!)*?}!YxP`2q46NhNkK#nOT}fw^Xe%w4jN`$?8pn; zx9RZ(#wBA0v9Q1KN3%_$_ZFfIRy?!C>NS=ackv~fP338eea9~P;f?z@&aGu zo!oCGJC{=KQ0k#Yp6k~oqw8d7HlvG*RR#Qp`o4VU6NmBMY0o|M-|~9hta6q4m({HHg;7;L0(eq zy-V@=_5Y@u^?$}^+5La-SI7U|+WKPis}lb27x(yIzsaYB|GS3>U}(-xCQT0J0lUN? zT~@h!DI-{pr+L4da8o*RNGN zU=nm+=LuY@`w0-CJh&HAq77>|tU5edwC@tfZjH$v>D0EPtv1?U zpydUTG4l0fYG-1)hz9Laf+}^`)rMh2{uDhi1+dN>m1nL#-�QDYU&OS7kNXO4fzd zgSF5GNVAd#R%*AGqyb%eQluQ|zmgs^uu{wrRePazsJYuXH-X|ux5@2Ih5$wXoM(6%48`Ac=2YS}y{yR_ij5rG z`J(6q?-=J);6=#~kc+GEX4EOx`=MK`+Gt{ZW%DSxKWMk}Bp$fkrA!O#h=)@QQwEi` zV0E_0VOvb;+9eMj$G=_T{!}?;mbyZ^?nOOudedw!yh%}-;k1jrY)Ld7v^|ECz6bPS z|KRx{90@IJI19_3QC14;;nix|q|HwB(=Tmv;kKzumKRdF_@R}rzX>d7^y9K+cxzRb zk>kcj_O5s?+Z;>$lx-cAW{k82`sbSze!CwA&#tz(&|F?vHmAGJ>Z+8_kmpWKmwCLL zG2NyresOQV_H7NV|1QUMl-Hw_G2?r~$$(^%$xy81U6^t8_z>GIq_YD1c!lT)m&xoV zNfeQ2G!j$JWZ}j~d&`NnC6jVo!egNv3-Zb!7M475JX_c=M2u3T6OpJUykmRK!^wq}*Y&A*>VKh7>5uJa!Zmcb+cGhcez^8C*~ zH`=Z9zkk!Xc+_rv13#Lcez5;-+d` zzsaX0|CI?}E(IQx#K4kOaHa3%!1r?CdpYpE9Qb!52VPwYY!AJUjREmdf@1H9URVq` z{vS7eIdOGFw-vlST+;1%nmcYFEE;iLK?sZ)M;>=3ukHh~zv6SE8(3_i(G?X!PQ_Ry z74ZD)uQ{CYA2*{Gez{C;fhe>iaiq=FRatyGN4Fgxt`s^jAOHXCy=y}o*_9}o&-{vt zY>y;c$i~lv49UTO+e~BN0B*Z4COU;wK*f?uQAx%m)BpXh=YCa{By6{no{`cqQq|sT zzt>*x<@mDues+k1kTn#)41$Dx_WKF!#gO9ZlEB2$_Ezle*OAHan`#lqz&_rLx#9~A zJSw*+`vW4XBv#cCkBrEXI!a>n&Uc`;)J~1UdklNz5e8fk6@*R^2-7jf*DR3aB2L3$ zJf5Nn+E&aiI2;C)n$=ji^;7!Fp4v}FJ6sV@ zj0#F#j5gRc2Tslh7zTvnMbcIBwO-ss-UTktj<{)KNA){E?kwi<-9cW~^ZIr`vgdc9 z3by3eaoet_goE|!IDK5y!~Ayc<)MJ~R{rH>!tBt&ygUHf7kHV^kj8yipYs3*-^K&I z;4STj1^BgHrhmUt;(7^aBk#mkv2+{1={vaO<21p5X8pePSXPI)+}~S)m$jae1-#8? zGzyrp-t(|-I6R8hv*8`(uWM3++*dXV1xnh5r;9q)+lG%44A@jLQX4f!yCi%Ep=8es znV~h%3vM+9o?w^*F^syoL*R}3}lcY(wO>U96jf#jgAH>viG zg2KLkcKUu+zF1RI1jB($1~$^YACX+zDx5q=6~$0g@|-`AHj8l)-53A`n7>6of26=* zrj^h{dGFFiUqZOc?(-LUK|g({{#*CaA14$6sY@Z!s==_N z4)b1&vnYg$dA7b@_86rmCr7*c$GcmVCH4>YJJqGA+@2AiVe}kT@vdoz)pIQoLbpt5*M9MS`_vj=ixg2KWq$l?aQM}jjO|(nv zm?w$t3Z~>Coi+;&45JAzSZM0S#YSBt%3FsR<^ht1#$CF{uz*^*-EI^j^Fl)nW~c@HQU14O%LjX=G`G7_L&=ZAu^Qo0*XUKB#H!IbGJ ziNJ}Xu@ZjMY;`f8ZN5JK!gm7d{+XCOhyDJsmt7C_IMAk0OxE~I+6-{ex@;Rtj+G;S zYxohW^4F;FS;wj1V6nK-1@d~a7{^~h0al;F?Xn3Gx0l}x&a-}QbS-)ONsp=QMI26z zK-6@y{`%j43YyScPY^k6`H2(Bmd-SX=+&K#d6@J&0S>2x04HK}H623FUn6U#8~q$P({=8!L4lfmiW(Np^D9AJ`P1!+??j+OeoL~ zcX~>DRC6fIhaM$aGdst|jB95RY<7>v#rugj@MXM+FC|fw=|ES0y-;{dd4`U2>N9J z^_B+GBu@nak5<$kZ7-LF07Vi|?(rD<0B~wZ>f@ZScoZ%g%RU-FB|+xS6WAo;K|@q2 zHpUR`)aD$D;liWU4b6-lBBdfZf#;+GrKtC!vCJIM1IZ!Un|3|kWx!HV$w;Ka0M*c% zWF#r2BEj8}=hK8kWjcKB5K2OaQ%uMtW8gRw6OqgyZk=-aJuB^Rdn=#ja+XZuP{TA^ zWvgMoi_!d;LTX)W%^VF#cjCww$VB9$>7*h!) zVurvxsc?UonNh*{ydXvO#)?L|2s{nKK?Bn1$YGu_UI_-oL#7AT?gmDSoUp%5oCz(N zOy+9J`11Cr8WMn-eW(eCWV@XN&0 z7O``ljAE1vqHO@W3lK-T(1On&$yFj^g*-vwJ@?y48z#X()7-Vyy7tCWE^#o&n?(zw ze%wurxFPrD4R-V6!p53=f_Q$yYhsR#Y>0CI-sDWK#&l;HKrs&DV%oQJEY{F$PJPZ9 zFi?^|7)?YKFE{;VjCjP`h-PJOhLiZS9{gXCCoJVf&XY?L(+{)Zs*vVP%}dbV@;jVs zhfF4cpT*K?#s4m==zcOI`Z%rE{(J^x$DiDyE?Y?b1-gJJqGXgrV~+$M3qGhJQGqQZNojx z_ePVkG|sao=}wHqFP=TmGKs?BC8 zDCM$$D!){Wp{;vWN;pL-Rlz3$pHOVJLe9WVJ4BlxAmAN30jFtuVUpC>kThRKL_CN( zHD!e9a5(wJUAf-rf7a?}<$ZIF+hoQPhbaL0g2{fgBEEj6L!#3T)-n} zKln=pPv*{nL#zrKg==;?s@_UYCwr{CMAFpcgB@dV=xdi)L+A#gZ@yjeG5uhVd87tOYf&dN z#cqO_iB9iLHcYC)jHZ)`*ic(=!Z4*{H}!Efj1yvvM5I25#~;z7ChZa<#>usC;@}=h zdT8u7Yq{fzONi_y*vAGU-ZczAw%bi&*@~wolJZaU2i=#wXDDcv#u_&ghhi{gLP;|kTjg;e3~?O zLM4=ZdcMj_-Ai+zLGdya=e84v5sC}$?d^MX~S`O4B2cKP|0K2`p|+ZfaO8^nIZ&z$&= z&(@y1`5&HO{KvJ&OaI^R@-hCuwBRc%Tj)l>cBSz967O9$2I0&sai5)X&~NIPjIoe! zIZsy5yo5=x0T9X@5@G2Byz~KH`T#F|fLH!~e1I2;1*uoxEqs5oT)*Wv<|{fj>i274 zvc}29$_fQROnM!1y(LG1fBwP--~H@zS5&iGWG$LuN-BQ9-i;e*fG-B$Xp>JqmZ4d` zgs(DM-VQg_>A*L9*Xelm#knTvmK#aWdvQuJ46S=Z>1(xwmqEAT|7Ijj31`PV9cFKq zs*T)^ET^Pq_v^(FM{Fn`v}%%=V@cnfU+bA<1l0IN0!-3gc6pifbjtIF0NbeDSEG7* z!cJ8iB^{Q(9?5>%=mG-oC&RFhr`_RMv&qr$h7=OZ0LPi6ZjobkMYlM_;RbB4c0TFB zjUatA>7+H;Ty;O4p#B*D@OliaaHDoFUyttfJ`Bl2!a%ARgT6j%>N=ta(|7K2{lgGC z{b5*#9ve8mQ;Z}msP4b-@R6PWjT~$*ikX%1Rb^V)uw(f9Ud}y3lN1c+rxVqi4o1s~ z@+L~UhLcn*4V@z=8VvkJgNI^(!q89%a2fg0mAN68wM4BweuR-3b#)pRHZBk#@(V=W z?NL9Tpaa|K`ZLO`SnH?5>F3(IgaO;bUqI9(Y${{yH>M-P@|?*RNHR2yM{%II`ducI zF+FTF+s5se3%g?&!1`*K()vS`+dV|*qlXBo-qdKIL23M>*zNT3%eLPC7v+|5as)th;mH}6A9GeY5y4n%KZGm$T{cEfkmF=> zoc<%xoI~DVs|Px!%Tm|2ZFjzTboBNZsK?In&fcri*tk&e^ywK2z4$>+wlK-Kp5qqm ze&nl?9(CwJIqcge8ZI&ARP2ox~cs1HU z6aY*x@r!A!oqWdc6X_fAZ5vM>;43Ev0eKcq&Y{86^(XF`b(5&n0Y%>HHSXn2L|GX2 zpp#!A1vEtT)$|At4Kyhea~>PwQl22uVy4uc9AFn&tn7rB0h9Afi zZygFK2ae|W#ml{Us_)LD!pr2Ww3`wPaz(K7r#TF%+KE><}rRH>&BnkIoc%n_768@`>2ug+rl* znTr;X?`$pz^pqd|RzEORKK7t|&fXFGc-C?!tGF8^#`?;N0O=^sq&khL{*Xk;!b8`s zpGmYPk&3BI$ZShNx6CvjKkTRH4@WnXt88c?J%Tc4M5$#kp~(nim((8$)e`oqF#+7# z7K=)trT`4rLSY={m1d7TS_P-7IA43SML#GPwyqzh+%gaK(kO-JP(4_QOgX7*-o8wu zKW{{j3SrT)pG7hP0j02h#GrYQ@FUkjt1(=z3}zJUh?VTWO7r7X3{}&Jc$P*`wgzJ3?4jNy zdC!9xa^JbLp)`&5(C4`%GhU$)pNb0>%7nkbtw&Q=1T4=t%KI6BFD^D$FR}!bo#DXHR8#r(hXQ>FhsiSv(xIL9si)k@%*`roy+XHT9#3G)BFc)a#xssH^RpQUbg zsheHuW|z9ze*@j@N~Z%ymX)$VEdRi%h$4FvsR5$W*^Fc;R!YQ7W!n#1FJXUk2|thy zFla5&Z5K$C=S`EZBj(gfxarY!WyKUSS&vE@i=1@plk1G6daBpO7ZSSD4#1MePKKrd z^iUwu$gbo5M-3?9Mn8l8kr#z0_ThqPki%%KoA%JiHJ*@?0uPodD>VMgxx}|=+s>7C z3&jGF4G?W?!=0J3lLWX!Va)1-QJ6TL>18^TmLsM}N^(t5F+i z*N`z&dOV__Yc@)T*$UWLC3xqscN#=yZul>J^EUS|PIQdn;?V8ls z8yZ;JY_!U51rxkNZDh%+J-`k2i@ zdBC+mkG$A;pPI3I%12b2jX4=5uOM8eBXo{d=kC?NYiDr4d0~MZ)WG%ylazc{VxDk` zRD?~DxURuI2hCtOhzHp1Zz8)lcV|jd5B2m$mg7UFZF7xDJUV(2ixN^8qASNF!zeQ% za|fa*9U5yB@kA=KIWe+%KYBc|x%s^K^ft!)G5Ruau6sW#r1N5B|6qHkvwzSzKG{6k z@rw~&=h}jI3o%cIiZ%+zklFvzZIx(f*9(GO+I@^WTrX$pe_e^^X z4ess|U_gFQnOB>;dtr%-7>LB)$_gE~4(6dl)V>itiu}(VVsznv$nIQSC2PqIse#<| zCQ&puuw0E;Hg_+QyLKLv*D%U0#>~PuH51vc8!Tpn}t`&I{drY0mCk9DIr6!s8UFpO162BqCd#f-G{#CJ$_j z5diIY1f!nvy&|lL5F9yBTxd+V$pqtWQ=&IP>gbTcVl2NhFEe9D#3e)rQ`;!-!AZ7G z`dF2=9ZSA=sd@OdGEtuU5+@yQOPh z(lvy)DJeF{4>XJZk&Lr;X_Fk^TEgSOEaMsSU&ehjXSnI;PA`_gxFmfzJ;gWOE4Xz! zd{|3f1KZId99{AbJJdAy@nlF^GPqNeGnM%CKwgH zj-2;0rowY~M1_}TJcT#zXbLZBEcDbGAQ4hFb~BXo&e22J5x0<*-RQ@I^Ijaq7-(LDY`!F`a z*~nX-lbsR_37|IABd>Hka`Rf=Fb$>!_%Hd4fqwY<7ZMFwRjI-uH~TlM9DO7G2O>{y zfC2>N?9xg7>@&Sg-N7KsB+s}^F-`y+sXjW|qG$vw-{;7t_bCI02KLFAipDrIF$O<4 zzai&Knoy;gSXoX}j3dO@pW~DUKS+Qs?Gdf!9AHM3aj^$@OVFw2f&GI+MsWs?uwe(` zH6$1~7LGWMowtNkr`bYTx&&|K&^V~|Mu*(=66r%hr5N}^k~_qdgoQHB5NSwoWTp1A ziG~P6L}{ZB#nEM!^&&VGu@IQj>4Zn7NR83DP7o|O%tVTTl~86g>Jh;@p-|_^q}y&q z(8Vk9;$;n5#&J5+afFVO?oOom8qWL>IhS|G=$1D(G3{i~QW6`*QlmnqPeM|Y0qPJ2 z!9$_UHxxkdIz!b2{2QW5ll#Mek$moUy8R>`H=INjL|k@6WhPzbNz}|0fI7P4lqidh zIzFnqifqE4^?R#migzz(TRihtwUV4$mB9iYsjt^Puq~Z~HunY_X-HIW(@`WH`tgnF zB6Yapc6^P|Sy9$#LX{|A?Xj8y61JFt^H>oSX8=?{tG^-~S_0CTQyeml#KCkSV*|u# z%#tu|o7oH}4GBI1Y#9?e4y%TdMm8-(N<*r_d0_*^p5;AbjLxg!hE!F`i*c?*B2|hl|EJUEaq~Ql6q;FCcH+|NQjYU}6_Nrp$MD^@(M0U&?9$$~6Yt(SE9-WBB zk;{14V_L@z(AK4B)>{@Mxnc9~z-(PR$ey;GLyt~KA8KY8fxcnP8#^$}Oam%UbB=q+ zN??*Rrx-6*C|j7FYl`h>4y^-+wjWa^=BNp}WLO=7?`BX+FU%Cl*nMp?y>tETUtVH15WMbr5YDiiBa*s+aEIYOG@>IK92uw&(PMrUZhVUEo^(G7M#P{h zfa2f;x?73IKPhxQ=epckW*w<$-BKzJ7!+U*_-VpZ!7&J0?%wN3&$gAPgp&Is!SgB~CAVyi;aUjciEm(A=*wtM~GD zyIs3CDO4b;kTiWBD_HJFAkYC6U4d9(l`CiizTNqxWZbuEbpC+-ZrO3q?Hhh_r+?-$ zq=ZwlnG5&ft3+dP$i#9$UM4DFR7@<36s3R~-Fw8%14rbh=>RbvvYQHbGMu-A?E}oZ z0yiBr)m#U#0_$WvO=>EV&-y)MhowUFKLt0yB?(~QU;_iOt0YF!BiU%w;8@G!DF2&Oa*K(hs~=w{CzyZb}$^q>3myk5wzUjD-UN!?lhF8bnv5NlN$_>d45uO+~)A zN_uXJ404p!HtLRm=S1igUZMC$(q-cH3S~qbVTo$kFccW8mr403k1vqnoL*dz&=ziu zZSxL8aGSTOrG?E&ZW&?}ZV=3iL_#EWGzy3M^%R@VXC=i}w4I0Ta6^KG&UKu3D2HLM zkPyzA&pLNaDTE6VW2x`7z^{7b&i1uijUy<7#H)Xh9 zijj=64Pq27v&jq~O7LjzH_qiqv{L3O=di5`5GnhwVD)j*E-eICOfq5JOT*KJ6#aj= zxJ*jnZFyQOdQ-8Uk4j5Ze5vqSM^BtmFeQn&0@8y{Q=?$Zc}%L7ozLkc*nFS69oA!P zkWn-KMlz?-JYDls^)Z5V)n)A+LEs3s5p<+wLASeQ)};zig|r?PMuE-Btm05@YVUPYHJfbBnBEY8es}jzmZoHSmhTOakLi za^$Rh4HIQHTarbXaw{k+nvj5({RAh8ip0oT0>MiyPo|LGDwiiVD)HE9?#UE9@&CpWb7{7Csii^35hO)(B zlAwfxua~9=b*6~!E{7w!=G<+L>S>~{INuXzS+t7Ya65flG_b@0{^u6-&?F@DN=lSb zOOv+Qg*G*R-TRv+M683rSgu3IL$Tl zupDL{0ZY?Rfap$7IySPm3G#jThjYUIk64PY#!$S;Wg)92{kzm}!r&b>dO)rguf%>5 z4_74~saTFhWHkYk?SMdaRv?>)P6rTOY}Sgz%3-*Ub__nQU>h;NlP8JXToZeRLkICz z;PenIXTPTRp%!+~b~iHvd`D(W_?!$}H60~N>?3c_a;$Jwb-1!EVPHwxBenpJ$^&n{ zk;awauB)M_14Ontsh*q!1YQrKtlPyn6}@nZjq>?a$Jyn07JyAhXF*|7MXv?t8BHeA z;dG{)#++#_ygb+c7U$ZmUFF&Swg-`-j{wEiVPh+p-C4WlO+5ZcF~W^nEHxJ>0r0i) zB`RIY_hHC=tIuYW8IohTs$Evw8z@2?tgcQEpZI|u=& zAv=0A6=hC~_qA+83hw)cn^FRGgm(QBZ%f)M*}1NFy3tHrhz&cN$WbmUwm3D)&tFS+ zTrx*r&!m)W(^I=MUF{MZv%dm#n%rvVh5~JR7;X_ewi3o6%{$gcoeZu}^LrKNj0lr4 z%kDqSor(t6WFD9+v&5?HHPLALeNg$}@KZX@hNwIuIa@(Yh@TQUs3RD}+1~(0JQvAtFufr4J`Tfx>_Oebrw{^Ax(<&T3lVL{Lu#eBI^b0vVhW| zf{833D+Rf*hWX2o7Ufq&BT7pBR*u4GB5#j!i&FmurwHj-mZdP25*ePD^lC$Ef1$#p z5)ddF25N~go}0{ZMBKEdqjR&A6k)hznxoQdM~xF~oe~T(9ZM*;9D<{`Yq#5Ff&k0% z7vBZbY`s?GI9ODH@%sum(J@qWDEAqc$svlitY3j#BdpIRqbZulDw{PVL@3=~Y#1Bf zW^DXM0<8wiy-5DX+N!Q=2PQc&7#*QN%nCs%hYzJj<3i|H9h1X~FMCwB$7+SBoTYP{ zYVXHzMAGR>_^#uWkbvv}F=WriVsEFfv!SHChA((RBR-+_8lqi_wu(S<#} zfUdm@Ew2M-#(L>aB{JQ*DGF2v29LoxDwf5~6JHnTtcVCEIx3iO_Cff{){%MlLckfM zTZ(VdyNuLuIGIGi5p(FuMr;?*Qw>8*p}%P!fT6u1gYQU3e6=!(cY_k8O8GC@){7H< zW);4mPF>9o>&+sv4yOxmOh9b~Vgnas#JBp~*^n2fBaA_$vx@drBNMrA!G*q!;aGlK zDLj7i#Rci!EadZN;TE&8Kr=wWl{bMVa~NQpBktk2A656o2&W8IP}*`7Xgx+Pi=|S7 z9LvKFd+mF_Rt=EwQ2qMHWpvB(^|*%@KBa3Ma}qO&NFJV>g{FM*sjlAs6MqJ{+8t}< zU_IB3uPtM*J7G!D^)cDU=G|_Xh3(BBh)f|$2p`%j<&MoUk9<*&g|Pm!D4vK%m1maD z;Wmc?CN^GSm`RJ1S(z)b#hi+;-z{<@#RznlxGhMP+!8P_BYBG1+G+EU`v;ot#uKEk zUHbXL5^XzkPOMnx)Oh0y#-n%9{0QWuh0@l$3=lwZQVf zhR(tq&4)$r>Cm2i$%E) zWAaX^oR)p&22w08Bl|taLt9?lm+ToS^WFGWW>_7B@XVvgnG6svTr}+VUG34doq_?G zne_@2ro!sjy|~b#1kdeqlrLAfn|&OKf|B=<0Yy9rc8-p+*(NWA5H6&ZFiJ!t@X4vq?XO8WaXdSRaWH#lk?hnSBCCh#S;!QR9ho5-M^n$~oJ$t<~pSnkMO zI+#ulF1BH;U$x>#3oo_1w#?x=9!s9Tl9+FKS)N6UrAybcVT%k532VS|B;SDn5q>=G zr^(on)rj}g+&eq)C~}yW(ikujn;ClF>7w&RgM!SrRMhC?55niCt7~V7_vLLNKZQBb zQlK68dJR)kBBYuK3HrR6@u4D0~&RqKuCEGBN!n_GQEfd$yFP%Tynf19#BeFOM zNA3n@;4NPso*{^{G$TS8P|?=I-QGumi#sp*#_3s$#FWWc!Zwrfp?oI<8j>5V78s|O z=`ikh#G$>y*IhlzI{oBgV#~SrOli>|H-RbVh><+)5*{`hL@`n_e&pSTj}d|{Xc_(z zQYADkK2&-H6+~a&Uyu6XByj=y>%R(oNBy2qL~Kds;J-ru@s61Uy})U2`mg+IrShEKeI=ukSJ2jXnSWr9;gaKPOs~W*@ zyfi@+(E%|Ao6$!1>DIq*yBV`W$GJPKv9Qw{blK)BkIZ|af6~7nhpJ$^%IH6g()J0zr%746+v9_m24hoM>u4UdpGmUw0(<`mXOP22GXMK zVGlp*(A?as){t0z{%_h8QY(1K$DkWqP?`n`15;$*@V?(p)IM?hL7|d&WSaXq)tLYj3{8X?_=u6m=vOjfkSi<9`F=xL_6EK zbEtZ`%C5_4WM;L4NeWeTh|W@Jdl6E=f#u%v`MBm)!<7CRjl6x^;gCYkbaIPd;Vkmouu>OHp# zPGVRi4HyH5AsI>$PzdKQ*ENaQQ3)v3TI^qNbNx9xmI!vK`anJDv z%thH(httr-!fk9KJT12f>6NZs!7iSg{k70LjK)h!h z6AWjRo4(yPD=$SLU!aW0>Z6}<#6mQ4afmK<%#V34+R-cb7VVRL%pj)NW3dEH9>l%G zr`s^pJfX72W-EG0^vhNgO)(gs2xMFrhnyhuZigx65k{L_(Aha`oTE_!4e`UJ>$$V7 za2{TAG+(8E2-=WT0Ml`v0%du_(Y2tU#%S|5rr*F;5ga@Kl=msY-6YYRDz^?!Hq8RY z9S#I8)t`Nz7N2j&R{r_mAa0zisBy?!rWviS{+S=XEP1wt@^FewThv?rb)lC3yHM}H zzp&ne)kQuJ+&k*JReA)BVUDp_*g|JO+B7^rE$7$~DZCSE{CeZ zpb8e3#H7(f+a?pcnU%O?*X5%OdzSRPPP`llbSmH=eEHV&9=6*>zx~Cw+FyT-Eh@d- zwg)J(TXa6`hb3hKT=1|mWH+NpU839=2IdhWW!5Qfc}yH*U~9Zg_A6!w+HJ}SoJg3d zyV1ZH#Gli_bYQLgn0vp9`PwEjZJbEF-zs(;nNT^NGJ$mU({ntB&N1BbPE>&Vq03U; z&eAR#NDT(02iDt%ZY9Rg5{-{3QED&{P9yKwN7K0A6r=uoD2c1uh1-}sO&l02;?90q4Ac*|C&PdvjBc*xGwo%a(3i@vT$*`v* z6YIFm3A4X6yGATi?Wx8S(wj==x#EJ46s!U13Bu8mWnTQo44Isv*x%aO0d9Tapep z0~LKzb+;U4@>S9wk=O<|(476_vttfztZINVsix1E#muR2Ok|q8h8qLF71(7pTP4&| zgQF+6A}8AZsXz?!JxX(!400IiCYqQb^HIx!D=$@H=~gIMRH8}at~Jn~y$10wX54g2 zG*{*YlntY`3ghX-wX%8KRR9uzq^PhAFNmP4!2a{z>KcZjU44RqX#Y%U_Pp6f#N#NX z&K5JLqH1QWi`5w?qcP7I8TdNV@s1UbvxT@BR|RAJN@zflj?;+|Mo(1q#SAd`QMn|_ zXYX^efy?Q{Bw!+Z6!$TnA)rtq+REaii8R!aO|%Rs4gF6-(HW)IAfcMQJ zlT{jBSvuQ}KEU%y2r+S#gl;Eg!dRF>{%T1G2dBK_!01LM zro2Z;P4+nEyS32k#{KTJ&+-H(bYG}Cm%-vZYP3>512!>;p-t^9QiybWm`BhcE3mrB z7N4nGNItMT#M4??CYs{}P6I*tf@0(jH%As})l%JNP=|FSm5|(xGa~@l9e1j% z-oRNFH|I?0y5VsHM1#hG4Guqq9iok$0pSh&4;j}g7M(;K&Mzt!1%IP8z(Bf-1 zAIgURGclYn4WnYS)f%*-U zIR>JyGowdpm@Wfx#B$;uWMaHhVyZ=x>ia1OH63`l_D#S39J$5HdUgCU9XY)+E;7Ec zlMx`5?z!W2SjRAv%|^+yGB1Y(7cR2qR|>6Rti8%yS~MDizhX`>XM;(c2+m&FQio^9 zhQklx-0xRmhdH4crO1Ne8L4y_ywD5K$fv$4Dx-c1XsJ;N5rCDra?iaYq@W#k*($lX zu4K-tBRS%SJpZoZ9&*W&C5fChVd03n&Nx2=+g+Bs$yy?t5)7(Vh~%nhGIEYu(BVg+ zSF=(_*`HVkww;b!EEziPvR?x#SgtL(Pq{0`V+z3RC@(A^wx?xepa#gEL>zDInsH#Q z&0o|Rca1C}on4*14d?q8omE|Q+y8sHs&R2rTp>`KS=VIFuJB%JUDDBt1z=H?Yf7oy zN$ssFL8%vV6(Er)Y1D**q=0u#I^+a}jRIyEjp2g(`;V~( z(4rUw8ql6}uTcYqi%sCVU735sRjrSb3*1r>nf1#Xs=TwxR+x-dZ`l9`)0qR`EW963 zp?r@1b1`qj;?4`ED5PmmC{L3$rqH;-L%y8#9NeIVK(yKG1=Rt24Af+u_)3b^PI7SO zj!8&@=0f6z71pPWD@akCqx9Sn#uMa*F+DSzx;^*MmROkLXTB{?8L$9@+`$t;R9#u& z5*mFjLWA8U7m8zt0;r8dZ}AAFR;|*;nqk)#YR-`Z46|!T%BM{Q(^NRVWn>*P;Ms*o z=(^H!f5}2FiPO$u@-9(OaR5`X6ReLp9g<*MCj)O`Cz0TYg5z^$$!u!3jmqu$zOa}0 zc@ltOu?JR074oQQFY2{%yMMviKs zMAUpTO*n}vH*}J0mey}sK_x8g@4Qjg@oq zJ=Ep|FyM42J*Qn)Sm;={Is%WO7ivKkOEfE(GFlb04R^Ylg^&WlEmIyLc6iHdG5VK7>pFReIRdLlJ;Bw~8oicNGmd~0)kj<~a3&;T|o$x5Mlg812 zLp)=8k6yvk!Utij&FO}|NhT#S;nJ$Q1U6^uhWE=0mf*Ju_q~JYWy|_8U|5{&*{At1 z;BJsuGr=6RRL?$6NLAb|kFa!yJ>;F(CMDu@Iz|_cwAY6)PF5WwGi7^j&}Tbla{`*k z^7-@iO)z=klLtac!&yfM(mfAcS*ukcb7iJRytj<+|-cJwLKtbnxd6JskI_TZR^@y=lsx|94@u z#W^cSG&96vPQ;xORU20p!*?>(QWt|j=z|-u<7HtEBl79G$k{M^DY%oQB2~A1Dg4N>009^2(>@uoa$`dE}BcT-z1aX;7CWK?e;LfJQBh{0{ z#0JeGi|3AX;-|UHE9}M z*^&hU*)wuKvQBlS!*O+o<>nTCF3)QHVX2~TLYIH3%-zVytmludhT}5fcF`|X-Dmrh zNt1gqEc(QXy^94DZq$h%hqd7C6mG_Of@aELsj2@2F_1_(qF4qbtoW60%TY z?e1YDxp{m^vz05Cp1WH~ndcoycaQjGOXj~AX2li{BimN1-etofM0>QXHOVXnh=sn;oJ+%iaVM#ozIhKrR{4>jk-o8gCr5^D}pk z5%V=T_biE19EO!k!Ol>lwF+y6z#PFmUzB!oj$p>JERms8@0zpV6&|e`7~bv(NL9n? zQVT`8U;%Q}inJzv7FLFteJKuMU$-;ur@%A0dsGyt zKPERNF1t(=Bf%V}q^Q(IFeYlr01$J?ZQBS-4CI|UlU|Wv4Oo3m%UE0>F=z`M?C*G< zhK`5n=w@;STYIhjto@jr;DGo&Py6ZQ##og(K-finN_;4J#tc_Eixo$E3j$*-u%GNj zGB3dnyXX?zz;~~Bfve+QzaonKjCc|S7h&5Qx`U=OJG4cHbL`S-%-h&svb-l^G_-(C zH{Y~&2f!7@WZh|F*m6U(oDR=geD<5oIacFm7TjeroWgeOh;t`^^;#clWD7EIU<-Y` zFS?$aviCuOp*n1YhGS?H?q$LYaRNl%}IKb z0#%1(drCVwk8eo!f1_~*5FQ1c=GBa%|3V^^SEW63rBLkv6Iy>o{R%fmBX6#|iI2@B zN5L!?a-PxqB0`ns=+A?w2K$Zvto}Kwwf~l-!-iFwQTAxG8?1d8`lM#L^_LI2+{3bl z-b3OO&DakOta&WFGCD%U#f^J;Q(_xU5@N4w(Y>ewv{=`WJWgbDGtK3l%daG%fJuki z!0|xYJJDW-Sy6}wIH%2qDDr&uI_*ua)}O9maCl6^vkDLM)d4iOK1}=YcxTwdvY*ny zI{9%VpGSZ`P-TlH0ZDB2{T^WOhPKX4(^g0HL~WkjAIBcNq4yogF_I0j9g!4X2Y2HY@{ z@sydk>4a#L_R0#eVRzspZe^pJG3Bjlbeqv*SnC%4`?!{da*Pmpb&VRl#Um;(KDkV-2EfTmIjycYGf6#@QBFYs?dgBwlH;g-5; z0hC^vOLSCXwLe zm-OgrAW?#jYfSN=Co#u+8f9Y+0UeCD&7F9?6CEGCI(fHwv=i+fM~6oTKkaSoxq`e7U=~d-8KDdbNA9 zk5FF$*v;s0^XO!E>+Rm=QFQqB=J<-CrFjgumY&@7Vrq?`-Y?v}3I97RJS0>2%OW z1~*Tq1EfB$gombE1{__N z^q4pwemF)rr*96ncVEFyIJ2+wo19Bb&w;s~(hJfOFsx$3Tmmh4(DRpz*}mMN#4!=2 z)$!Rx@_VIVv53o&8(c{TcnE|9;^OAiF@^3c%xAy4q;}-?`@9Fck zCH?n3K1% ziZKVIbjaFcCY_l)M%!u0QcIg8Zy7RJQ&D6qy_6Gu!IHw7j#Ze7w7Ro5ao{+M5*A!n zQ#$z)-Z-wYl#Bz0GV$tg)KL_(nuGx?@klTxrn@m@*;EadkS#b(Qd4G#do*^lOu

y9>6#aL~qIpU4SJcUH{BD3_E^`Mo) zQXhxeb*=|!5RYhiU}7&W($5m@)TrC7N{?VT^QngQVgx5C08N|~J>~`g#>yQ*Lt``) z5J9K97%n;|TUDZsh7;}BIg)ce{%kSyOOiXjFn!Irr{P0n2{ zVJaO1@?aKRJlFElYrtxbL`!eiP?o|NSy;V^%?*m_Y0{CpsoSHC3xh!k#%Zrdi5aaR zz<~KA)MVSu=YftCIi(DwOmwlt?W7Aq652p_fpd*r#W5Vg0wxs86137vfJMcq<#tIn zdy;{K-^dv;BX@RoE7H{_Kai6wv#MrUc|v4VRZdF73D?^MxHoaPOFO+A8+~@vc7jG) zYzfb=qWdu8_wg(D#2E=MNy$LuIL{i#RRVYtPPmN6ZciAJJCwMmXCv4N4LNZVP1RA` z%Y94D5za-cdEU(QOl%)7``LNiCn>UvB5XbPfYF@9A zA$VkZIyIYub2>FqH&%Aj!LSc4U&=anePC6{2$*8W85nwi$pZw_HU$Bk4c0?g7DE_> z;%)?Fli(=N+4|0wU)3#dO(6K>(J6HahhI#^fI$6hT5dqpGb@(($Z?W@mXZsHdCGv6Cf0&m&#Lt^F?oVO5y# za1wi?nf6=U5wl(Y>6>JbrT+-zt0F8%~Ymq)*u}Yz`zxVxVF! z2IFQ4Cwc}$$51WJ19Pb@7@JohAa_v&)|Zq49Qsok@)ceRX4(pDIPdwKK}=1;wxGJD zrkE%})q%1Ro{Y(XukLX~J6dX_Ju8h9HY>@RxJm~Nw`egp)*J(^lDTb{fC|zrg&@&D zEO_J?su`Ls$wT`vnF}=GVM9fruJeW)V$tEhxI4;N@k8I%3|$UmDX}WEI*QY=$ver5 zS8e!q%5^anGFNveSQrR5PnOPZ3)yo@7}GJaiafZq zJI!I7pGlZd3P0q;HSY2$R|kG-v&2YvfHv+ELKYq-0{|A0l?DY$FQ@}};snU=F+@>G zupyccGDfoYM>S5gWJHz!4?Py?vcPZa7$cIxLX&Nyy1Q%eL6*B&SrtWX$cgMCAIN)o zF;tEBxrdpjM&kx;P%}vq2eOF)RB`wKwk2aL@ePz|qt{lNQ?MZ`pWVs0!DBgnbk=N@ ztu)PfxYG*Ldq?(}+lNJ`dp(*xOEp;oAV82$?-M4G1)3a*B;g15H*i>7{! zqz%Kz^o!6Se_gX*AD=m!4c2|k2}w#EpKf`iwllQlM8UJlo%wW85nt>?=SyxWtc;n+ zK968>6N`vVv`{kCgrT)@Y_n5Pe8pCf<7kVx_lj$XE++#1DCMyw6ol@N*_6kor8q*N zy~$~eEwnh(3*J8*Nlk*qay|%l-~~FRt#?t$W#ep22Q_P4w%hG&Z_nrm8U!mq`wE}_ zHcCOr98w9Fe%le*@0U&>ENfmQu|#ex=Hl-t=M4$|T5f&~RpzObIpySPP7`7{$5JIF zv4rXx=M#5_vm^jp5qeIc8rM1o!%b>51Cw0P>H~61`XPx(&5e^fMOYS?yeq`0&z>E> z^Bi=On`)*c5b^Ewoo=rS%<gADVJ2G6`RZ>kPk6Td>ZJ)Lp$$ZY~hi zCITdm#gXRu)Dd%w|2qtx)D43+s|d_{K#(nxVL}6Wf?&=OYvj0ou*C;y-cMrk-;kz} zF8(J?hV(E;ucvj>UtUM)u)dS-w^n{JsiH-wkGy}$mO@ZS=#AjL31@v_zWlAAZSQ;Lgd&n?Rs;lN3?qb6H){Qi% zPFlWeZ%OMc4Jf>#)eOn&pK&750g8BjQBhNl0;@76jf#~(wQF|hAFoffxifR^vUI7v z#iA=C8{@|ftYZ5QEW$s3vG96HKMD%aI|@HpHLwy1&&O+YAZpaZSM^r3<{iLdiLzlLM9{%-z)H85@av#-!B(@qdg=_l8--a~g^igbWhTg+|34d^sx!G}@4?s(>rncT)~v?uAR z%bEmuK0!>8Lu`W`C}3tO#N>z-;C&Hs7ZAy@S{3R-)e$9|cGYwSw(5~Td^c3%K>cSL(lsD3#V0ZEQW&RVNk(rtwg&MRb3hd^W)Eouf|ZL{IbdNH@Dfz3+Y-4y@ln~ok%QVX;eJ3TT!obuC~$(FTb*! z=)DDb9#6_wdhT4{2S}WQ?SsZS5sd4SP|cXx;zf^^^p{_PJd}NqbO- zuCa=fGNUJ^&!lSGkQ3z%Q5H2AxQ^S%T|L6l zGl^NqMg!$m#y&$uR!Vx87-cXbw`Q{h{9|%s?4VsL*cG1RYF}%lZQ>V%WSJhFiunL;i&%x5I>^5Tw9sOfu!eh(P~Z zvA#Cq#!ou3x$UizG7%M)_zyV6Ic;y9%wDjaHq`-a<|G71Lr|6^Iu$9n$(XLuD0&m$ zoF^m$zQP!UXs$|O0%0=$V(M@}vzc5eQ@d9AQ=ql%*x}Ww{PMV7qZTV?LB33LaXU80 zr~^uMC*{2;Z-2fwfa z^F|9XaD~IM*pPnNFh)Ej2ip?&WRz8UMdC-y)HkAfy}FR-2Imwqa-}&1c{_Aecsj3# zA~;*LxIoV(^VEXS7cFj&^6YX%y+)X9xf&F3W^;SzUf8q2k>kZYc-^aBik&Be(duHU zWjj#_K6}lT7@s?JLyS#ew+IFKt}cH6zEMy4+K$_)TO8yN>RRY%qSo%0KntKJCt!x@ zr7kS}og+BrvlZZ3bKnZbA5z?J&lKk`b6Vud4O%oaUT9H@`z`Xmgn#|V;a2p1d(wEb zZY2LTzx+-u!sw!-Ei$@2S;SC-Q`kE-RN;Q8nI=Efj3N|BE9=AEmCa(KJoZdmf5NC| z4~o`8wGR{P{f87rG9ZyviKaCRT*iq;6UCORc)^e9Ynvl9Hq3)lXCi|f@2JWsFXyX8 z7^!{tWvEyd>CTrf%KTOTlN5MHhRgHp)$rt}|_Lbiqd2SKMD0$D7R9Gd-r+`xfR zQZ|ilI-%LgQa}yvT?)?uz-IAeV@R^>3D8yIIOT;db0uJH_aI?|0ZOIlq4hUw_zxfk!l2iy~?nczESi9!1y+*ueOwadHeHWlOj;!Cp}50J37810@?UFQ(6F?M59_l?+OTkGtTn~^^J z341`_Lngi~iL&&@;1geN!0r0YR>PQZP6)J-WZts*rHnR^*sv2i9p$$5w*%063=l;2 zFN68q2=WK0e?UPdLR6*_oNn<#(V31Xfy5(Jba)x zuJGF8-KU!|h#cSxzzW0|$|{{FjS`a0q5VSYthBaY%1stxI5j%b?j9Ctoy-~c-f352 znPYR%wZ}5)QmL{%Sa^3*B;W#DXp{8bHV{monL1O_-TA6oDx+IUaVkvEI2&{bUm&>C zM`vb^>4nd-;%8Az8)`y?)Nox_!l-4gI}%znqQ*nJ6?)B4CwB4jspx=a4+^q#mn(P` zvEg0G>0OT31%oQeJ8qRK>P<6dE*th(YvpF-l8Ipl%)u<|K9vSL5DQF>-=#_EhBP^0 zz2l?5H+-S&a6?b!h1S8YWBTn-wp%x}4;yD@zm}IY*F*1O{S?}0qB)X4N zi2;8MDQ;o~X=dB&(zKd)gnPj>nxb!25Ew>=9e4*u)x>J1l9WrXQydU@uP`Fe#E!F` z)6kmYh=8$>lF6i)ZwV9sL`dUiW285Y;p;FQgm1$JFI1UZ+`&~AIL>OZJqz4Sl7X}; z-K){hn_Noe3Sy@}dJ#FDmGzo+YG#S)4zb1hMF5?TR1_BA*gY9S(<)tJC+ z6wP2T)$cV*DH73;6_&yp`@6huJiH0uI7`(sv5P$Gu_hV+;oj?^6Gb}iak!fR5KFuU zuKEoF0Da8Ga)!;SdG8vF|MK;BR%MRy2gZwmnJnJ~ccKH749uD(F(31sp^9L7DJ3Tp z-C# zMi48|npW9~Vm}m^Zv69?R^*6m;J1UCNL!8c0kCA#f!bJ>dC@6BIwnG=UAUJ>i;l&+u-M9y{}0 zDu{3*!r(u|)iH2qeG~)>=v@xO*}aC?@|dNPt=n|Pt2RrLy5vb%O2>$at3Q-&LEF@B zPw&ETkBe(-?6!DfZE|VbV;VrZf6$?p@{D63bFyV|f>&z59gJ>eVW|xkhO3Ille9ZU zS3&i!)K(No}FJ~7frf+TF zS~asnFaEjw7{a_Ksl#0>%!s3}k*KM;$W_9hGP|_k5c5WG2HGZDO>}_^8xBuIFkw)m zgx@RMIQVfkj~|b!{X;T&iCxeK4FFsgBVmk^-r(A`fy#luM$>e7kyyDwQ* zlL3tUlS|Fb;+-b#?sZRStcx5E1^f=8^~__K$X=b^6@HTwFg_Vt8xoCc%WsVFPN zimOJb*D8{1Wmo3J*ZvlN(ks;&zc2Rbfd`7#$yUEI-_Vq9;hKL9 zRX4=_ky@<~5=flAEN`RKzr0ALMBKR7NcA>`1;|T9<@!N^r zbzkBSASdw%oksO`y%p6v_|M&XbXKZ=f-=zqs#X{RJ~MrB1p9>mEcwQ%d)kcQnq$W1 zVI(zqo^2)PsS(60BJ88J09O+_(_z;H$BE`w)Xkah%r>E8s>5HAQqvw&!7cDHcJ;0R!nQSk7>E3ML^xh#81Pd$L^;5>NS6SF_!Am5yVpN~zX< z%QQmS@-to!R~`NUmheMgs$ANSeOYuF2!mg$uEfbP;aZ|c-@Q=>um#Y$4IeR!wi@m0|I7Ah3x# zmG{?aocw*7jw!GuEh`F|yR1?Wa?{OufX?Y~(^)ehqhc58!kA+q@jOmeVYa%I8LBs9 zUaIW0oqnqVM`yy}1L%bI$T+(EaGLL$(7+ABYxc97P9YczJl0r8)Hi23O=SCE>PzxI zc^MhRy74?w-63I)aB-Y&Y6c3D$=*K3mA7*%jl5EaLj-jP`Eps4^qSsmxIoS z(05o3T@n)l3bQZCYsztAC)O|65)LlX1UU4y-EKD{Cw)fcG;v`?a>vp>O zzPIb13@;~Fx$BF}d|9tFK7Epm>oVr>P5i`8{I-=?N=*mvwHG7A75W$qx?jtzbOLX-R; z9Hqvx7y3ALlPN++jv@k=O)4`N+Dj!d@OPz8CU4Yds7(iog-E`TOzafLb=86ZyPP{> z$668#LrNT_uAp%=tXbd!9egHxj(AigzjqhQ;$#$TW3xL@J3!js}CX)hNh>lhdF>;=i zJMU7Q=6lQLdmqQ@evD8)EXO?(pJ9*jWyG8XU@^FM-?a$UE%A!Nak&W!WeuOUIXL8aIAHZ+{wPLJwQS;IX#Ln^H)P zva#b>WK2-1`{X@mOcd{MSM1+wMz!Bkz923Q9iP?Q(YK}`gQYzb12&DsfTU)EQYs8c zNNFg^Y_E4Q7EHMs5M+W{b4GZJRZ%Q4_xqNi^KRaC8RVHWcaq(vg>K#R-3RXvn4C0> z$|jB2yuyads>E6L5*H5!P_|Ge=F^~O37gEROasJ4s-4XsWX;^!(>_YMfcvc(;eTVI z(0P1d997$zbQ9_qv`Q`-LMBL8pR+DW?+7@9OZDA)^ETzUvOe54>y9UOh49tCzK>=t zjfsek3B?#@X_W}M3J;a^dYQB+*}}q#yRhm0Ql_Lu>d0BiI1bQ`{e)R2-M*N?@t9PX z8snyjBk#ymy&xIy|6!z%3GIGT-pEv{kdCHw0GgUDr5R3guGyzCWa!$5)ZRmn*dszd zBZ&E8i*i}7>jjOW)GW4*G?~D9Ywc$)8HD3E3V)cTLH@bgPmimk;ZK9A>odD>j9u1o zE1Yc;okj>-zI~N8Q3g{OhK)?Mig~Y5p230LPBGw}TkVQe&x0Dl(Vy8cmO(-WT_(^- zh!r{$Ln8Kgz~d^8Ng{V%tnJvNJaFZI2f+?%@B=@Z92?;l587aDAsQE1_TM^z9Ni9e>a(vdVn0;6@l z(S%w$9+^=i3(zx&ndBj)qQjCMCH{(c$T;8&))t15aXu8vGGTEVk+|2xyM$5B26P#5 z(@1+sxrn%91yPvoE}A5og8uWF6tu>eui6>7W5V>rYSw9_hO{0OCQ*{z-=b;xNW$@9N9eWp)K+F((Q!TvrD~twBKpCtH=q+JSKI zo*|^z_%~waJna?$EvL}EAmM~3J8y4$whG|}HbC(Hl%f0^R(e(u-j!|~nQkjg!v_o` zdMx>*1Kuu-wp9or2c+Gr*J>?S2(-|6c)joo-xHH3(g#ThT6s<^@$J9CBPFDDye;C& zI|hAR0P(K>0Ul{iA_lJnDrxZM8W%>F){QSlLJDrUb@V)R9-mV4b1r08dh66Lk6t&w z5Rb1#ydUEG6tdydOtk5&NtdF z{Y=fGuzhkw_>iBQQBO{L{$hVm>HJ2D%Wo8B*4hB0zr8P(s$Tx0_+&i$Hh~@QeL8CV zitieDIr9HbuHx4~%`9!5K;x`-!(R3@1njCEcU)>=!%9xjk-;ZD0qfxppx$!^c+6K? z@nDXkV7Mi{g!gg~YWap~i5gR;$staY{xhP0KIUObOs!u>3o*BphDd@_>kLd|=oWqM5(iQ4{$a9WF}{d#5m{tWu@mipx@ z^vzxDrT>RF&Gq^Y`L5#4_gXrgDuWwvl?>vQ=8d?(StmHFlnrzN0NO!5vI?5Kfifs2 z1z%8=7(ZT(P>%2W@5KqwQzxkbd01V+I>@5RyX!yjT$YF!JvGpYQYMNPN+80*06LOa zSU9|BC1MQ#R8Zlurk~DZfiHs|f|%e5**mhi-Gri@l?JM4F=9SAjJwSj^t9MXE<%50 z&h3dUI_~M$GOVi6PY<^SH*5e0C&4(j+`5mgXt5o3n;!zg}FK5uBH|1F*5ET+E_*O7w6{srmu1PySlq|GmYVg zbZLv9BmBi>_bLZq?LSl#^DuLp?v(N`l}Dpv(=(P&2G4V?i~I{BRdGu!9DA&sOY6>! zHJ)gwZ7dvojB6hBCcr;|m#^#ZGGbHHqp8_%Q8ec9D||tA?w+{QmxrTmtXSJS+K8Qq z_ZAn&*Sq;^=qmaAC2&mY6H;emYv;RzZD{7^FXzU_=9c$jL<{?_)lZ$H74xbQhDOi<}Gm3m^iF>7~K%5d>b7L6bOn$wTM%VYXV6dy{7r^5exB09IdkFcvrn z*v0iN1x}WWuX7&4kB?`U(Pv|KV>|dP&<)vTnaSPDZNpaflNt$%~FAAX=d1+zFGn^W#NbfM@ zPZ2}C1;z;PW!wm|udx2{>qNv36p3l&VCDzrqhSIsm3!q5 z5P65;MxlYq5&548~z;+Wl?EVb?rA z&~W^@1}A3ik?x?f9vu;X{Fg2x?@>VM4yNm8|K*b8&!A(D8umhBKq9Wa0ve-up#=A) zfzjpt5#vuo8OCr-9kQI>CN7*f2dSpWaR~D}u@5~Cg?GOA@gF{;7khoiX7N}q3y84D z3w;!t84jB$h;de~++GmSvF*CyumfL6|1G2=MgrT-_SJc<%deKxXM#@iBYQdr29<;m@Ib)f z+<$Zh0uh@FMgUWEZW5Zqo^IuMBhC+0UQ7e3EzfrF$cw(dKUrkSYEHi{uecN)F)1Y$ zr5S!>-9klObriCUnCnUk2uzmJod%k#FayUqjtB7Rfk{k@YypzR;7iN z3%G9JJQ(mL!~Y`VH%_4T(jv|Fb>IOUFAI)FVtC*M9_){_ghnS6ZYFdT&!@jF z!+4TVs&{_QCT6pT*1Ej9U_E1fD0N=G6I3i`_AXI@Vjsv@Fs~I#TBJv3Xgnkp?Xcby zz~LPM2#xwZC+z5X)JRzqvs0q*;+UoCQHrPBzZ~C7nq5x2fR9py+kNZaLh@Dw{z>mt=STvxnZG2MVvA2Ptn}dg z)f2cdvwv5|0E#gUt28t^<4hBtU0!b$s%x6DKA{%i``mys3d+}ZY15*6Six5RF*epp zmy|-4G^&Yfv7LL=nRVjI_wqnYMXpG{eUC=goY+2ZXd4pJD1E zC&)QVPk>iC5nbiASjuUTX@Kr7tu7CSN$jHxVC*#a$goI1Njf3d@9k}Tu*NXHAhedr zFf(Ef9$pu$3Fu>GKfeW7ztvUG+rHBD%%<|h(74yf8=9+ulh*5b4UN@5PqvQ5_pK&3 z{YyW~=Z4SiZkc9}=jZnRaqj1JxX_F~sxsXH+o|m@QWXt9K&yoFaHbR zr;zq5JqNxcm;cb#5`yR4>&6vV2+?P~jyw8e)}i!I}ZL&8tU1ND1T2=dZbFClS9yu?fvW`b<_WfhyzD*w~q^bzlH3u@4~B z%;F4Nw|yJ>n3`7OHJ`?R^R;_y7ojz$zy9-iz7dHI`e|zbnfkTs`fsvwY2FHEatF&b zU})RBNn2ZwUQ)Jp5;i|mwsN|CY^y(wj4a)UD6$z@8~>&btuE7dZe#wWwjRM3wKAsx zV#z?RN5AjKDe-M>^7xyOC4=i+8Wq*EUkEa$ za@y_Kmx}N|&cey?Iu-2$*oZy}|1h8sTq_>0oR=-{7EGY8<4HeH{?`0k5q;x)KmMN1 z?oJo};`94{(G>mW_PYI8N&foA%3FM)>%4-x`LSELW)Z4ZDCJj&YQy8vxcigR*BybT zboC{mzom5f=?v>1?^5nQWO@Or%fs?>t}ZT~Deb-Sxbw_HtK6n`aqYqI_rs)Pm~8r1 zgt&`qA4q8(@NYE#6utpN%%6vu{~^drOir#gB42Rq-WC-r>jw3j`}gtnQ~RvqM!(&j z&VHq)m$Gd+Pw;vNL?R+5y7-Y!c%xmAyAKN@wdf(Q`1OvGgNLGCB^jp8W&QheIqVf% z!Bh$YKg$!_+u=vU{z{yX+fdqhxzHY1?wgmyb3<&Pq{7*n0I80{0ga;Rg>Rm@L!c1In2;Jp? z;s|%o7NSqi9of0;kNzks9R?n%!AL?t+$+t=4MO#Fp2h=J#?r}!&-=KlIO>fWKZ;_tE- zc*}Tm5X5l@Ko+@@@O+`a#(W^Wz}Ygu`PkesDBC@hD)vr7BqsPht{x^53m|ALhB&-J z@P2O^jA6GRM$h&1ug+B6aBl{9cJ6PrM5-obX&gA$GfwgXmjnbQr}ljlbM*s?T-&R^ z=}pM4k65(~fDfui`aPULeXY1mS!nwa_)<3bL~Dwmts@Xacs9lgO-ki zG=`dBx}xi#Y6C0Z(`>~>86G}eMXt=Hn{jH3q$SE?m=G*@t0A2X=uJMYl3foMu~P^M z2|9V$Bg=-K@0Pk50?rLd3QeiA?Rqel8k0%b^($&hxn&A3N{GJ{jFhsFO?kae-^*)f zr)H8100Kb!GfrhVd|Q0z-<$)ls}y2LV8raxL&_E4*vKW82|=2Nzs26Ftkh;8^=xr` z$El@$<}@xV3DXcwj!>Fjh#+EWr=PERG702R;ov1?liB-^zn-2j-H8(+>igY66F8D2 z!#K9KF=v2}GvA2RF3hggF$b5Cnu~{yU6?9Mv203n#*!*I*<}xOZ5#lBNx~@3cSU@1 z0^~I@rEO_V=NHw21UYU{feN38)glvHU73We5cETj#Mz@`eouu3GGHDl@C?-*4Ji5 zz`WX*&-~FVXDTpQRSzg}a1COYZDBlY$gX&pkp!wSv;41097zo*swAo*4j}pRa~Jx& zuK=z=KHLGKNj3F9kmJSC_ii-VDj5cVs5klo`-O|52oHU;S$dt2v zRSBEf+(o-WS=nO6mff^I?seI*egFI1BiZqxUeD3t(d{A%WGlx9!njFtp0QuL62O(; zcv`QJmcd%#7S~YTB4G1pRARy5Fw7W49Z30~y(SL7sqV0|&UkifCZ%?)5oMWXXk>OB zZ*Xgq)pilqXfwaIVLbjsb;D9UK#`rs-h9W zdBGc#HD~hIT%OLbOdrHaJB;;-iIo$YlROt!Xv!rm>I7SX%9WFO=Mc5cJIPk}=eN02 z?B~uhjyFTI&Oz@H2KiC-M=p=A4HwlO`h|7++nXym+iqWeThrZMc4cY(-{kG!_v}qW zCQH|nH?-`Z44c`TH+Ciop(#*};tjm<34ATMR@;+_@FXytww4XMVP|siO>kH&S4Uol zr!6C?^K>M&gs#6t%)lMSX$BD;ZdCzHSCCMt^vsZ3^8UaPkA6M{xeChutwdJdQfaWS3hQ)fGetA*7|@8CakD^c4QMTSEiTFX*ny@zk80~u z(!}Myvs-&}*!Q=*e6pU|Ti>nvdw1tD!+|wFm^gT5v#2f5vAb=Mcmveq+xrG3^$1;^ zj|%|{>;qP7w(*$pI-Qq-j=rQHuN^hyVvsMI-)MQ|QGXq}8T>ku$6j|99C)L?{j3l6 zd^6VhLKeEl>GrOVl!n4uUYv$yr=}741;6cK!m%;zJW8HP-l?5E?VVPt}= zzy}4@F;}!lWTAs@2k`jio7+oqVC^}g`)`|o)A ztMA&6?gYf9s`5NXis;Ig71j7N!F0K}Kx%M~RObY#(E;M+nF@fexw|3gW|-*vLXtxp zAFC>ct2|pf$+qnt08i+T@59yoqtMqX`s7>f?(yN`Z_{3++s_Ri9)3(?*`{b<*de}4D#}m7 zfWFe3G?cI8zOOWt{_U=|!fRMhuOd&bjcdp?%#)=HfUNW*sMIC{mgz}Pck+dW;zZTv zH?5NC?kRKT4=~-0dhBm+%S)TYZ#A#hi0>C&uHR1~sJrQeM=JFnk;cP_8C=4C6ZzStA}0?+q<=mGr$IriUf3pBzC zlNLYQQ%Rm1J=QM&&t(Qvzj95Pv74!CkI#PZ7N(&dPW-an zkHAdf5{I0mn3m4M9_Mk=FiaH#5yYI|CYME#z!;OO7lKd~A+5s{NG2NAyO@Q=mV?Uo zHQwv2Zp;m3zvps#H;V5^!4pS!Z%To-?(uE3eV<*AjG4U>>kk7~F$^0p5-$72LTRER z5f1>Vh%ns)@bH=)(~SzVX1Q5u562>iP&WZ2IG^@&3Gii#XV;fA$V1qOM{<-O#Z(+_ zGNqeY6|L@jl(#QuJ?rI3Tyd&dy76}t z(RxPe!ErKq4;3E%4oF%_<~|U0((!8#+jW#K@#|~$dn)NyE@5_;;Oo`u{dS{pG&U#b zrFS-${(i~PA?)b^y$J&3c@ABFQ}2fBKeA@3Ms(&N9S+))rV zB6jL!c|)p86nlwLgMP1#(C^&&y?&o(w+ufbzK{EPK5FZv_WO}Di;g$|PR0nnGI0h! zG}%LXI*v};M`9iA0r$LFHz41he&5{ua=*THyo#vvuVz8}}+*=z?cfw~o&opSEzNfru}b{}?1iNN zemy_czRukEz8OF-Yxq37+=AQ<{>UiXQP8Rb7?~v* zi`H4V_v%@?dA$>j0@EoU@nHKF7ovo}e|UWUAoke&xE}ta#KqqKwUOR+;0Zu;>zZ5o z*mg=`gKV23e6Xd)tU`VNFeJ?oionR2RhXZ3X#uJk)rx9rH0K7nH8IXZcj>?n7Mx5EAU`g$O|*>vw#hbJLyqk55{A4?a^S?a-nqW~6> zUYbq#-5gQlsVu;pxgTD-2y!}fe3cAQE=jKM+=5!%b^N?_=@gXn$`S?rZF0YlH5_p` zo&<;)`>lUkDEUE(|I zZr}bcDbh~472ZyXPhcPbQixT^+)L-T_Mj4uCcsNzR~xH{Xeek{=0gF%J*^U|y&Pe) zWc(fFjlAT<@B;yq+#}>0qNMeohWEJBBM@iM>twi!8R{0AZs3=>P&K_3`XMYf)6%dB zNrlUu{<pmkCzITwR2ko>MrvVM4e(?N*m_F)l;5}noWHpSCJ)-u>ZBWJU~Ue(>mKRb%s zM9?V9AlC=+Be3AeI;V}+_WN8(9q8`J2Ylb11Huc(pSQmDrmk@+>R=PFUXnk&Z1}b5e1sWa}SYtLp_aTSbpC6?iaXAe35~ z8P+!ct>2VbC>^wDIOuL4>@Q(`IX3n(T71n*0*BQ;`+55NKF{9s9)ykptr`@7Tbr_BbAv9e_T{sGPVp3QJzeu`jmf`TTwk9s|} zNWw!`Y<8%yOC+M?j@&Q!H!sy`5l^LaqNWf~i8c|X5xOpb=%FKQc)N2q1m6IAjRHj^ zgQHKPFDjq7&qf@D0q5Eg!(Fz9xn1LtZ3^`gGHhtj-}<*{5_ow_QDwAPr%t&@o!GKq z2b47m$$-ke?R$cfhJnJ1myH=$(bzD0d2#p%ifxB)-=&Mn!0{+)Un+rCBZwLR)vqdF z&#)sgpm}7T%4-UD_rikj1C%3Y@tp#UjjWA8 z)5J&5yYsrviIbo)<%ynwQp~Xu4C075AS85&KqoF@EAdVH zY1BpD39xXs0JhF9h!MDcMwBRM8fPk%C}!A$IOxw0nP?(YmN!B@bl9#$(N(e>ggx{W zKrR)r>TY?Uo~pAT2-f}szapNW%KJM~Qk+#i;1V5@^BPpD3}Dw!T>Pjx)l%dvOTU2U z8w8;4*j$@6qF!9s<>|3yk%yWh7>qIc0-ANwbgGp^rArlOij*WIJfkTnA2P4a5CagU zR{%?-oGUcQaG}`3uKf5cg@i)uY6@nbzHx~LDFP{ujRAO=V;cCHAcfhBoBY|jZ($My zQ+|NKq#Po*?ynd@L;q}B;ghW5dYmPVm<8@Iw`y=)GCdw!Hs>SQObagPI>r`LCJ;eb zxSQ$oLb#%46Oi3=dZwrMpDPGVZE7uOD_8xsox+x2fUxL3F3XlQT`$;@X^Lbkh}dt2 z%1&VXBW~=)#kF2_@^OTsCe(6gGC7!MDHjQ8nYe}-&gP(u1SkslmE4IGCuQJn4MU-oqfnuyoLq0o+jKCd9`0NA! zP1Ov4^6GMULGgAA91W#c|HS4BG5g4NZ+0XFvI=l=o}r_d#5U>&e9@}#9`PjPg;|<0 z6iws@ZiX*i-Zmvc=* zR9(?^vCrW;b{uvKfsZRUco`;6OS-qivKQXUb2ba69*ZdG5I?d-85|bn{sI$z2?tpr zS!Eb=wi>733XdQg7lI=4A3vU=Y$3mq=6T1pFT2Vi|FQ18cp@phNDTVNHZ%oTz9D#U zT%;j~Q2Fmuqvm-)X%eT=b-zmV zR5{So7EJg^fcjaNx|kTzG?=FDG8aSJXT<*=Om6wSh~+Osv4Hk4>%>KOhZ?P}jtEdV z{(Ajq!#x7zV@-vsTHq@YyZ!jOl44^vHj2KA5KC%0b)KaJclDUpJzZl0qPm{VeyI{C zi9Mg~>Pk82Y-9 ze?$u*gI4^z(wgX_%gojGgX!aF_V%~+I)lY|NI5|b!}!+4OMi#vN*d6O-YB~mji`f; zo3N5LqX>B+5RIQ+(Kz|I@c;mDeg;NXLmXTSR4JDY!yU?6(WRksH~seHfisWTJx_S& zW6&Z#`B7M2gz=R_4O<%xlD=+oQDc@6>MKRX19s;;YFSjf;;$Q^0Ac2#xCLDCj3{tin+mcAa2~I&Mx%E+v-ob2?K96s-ST z#8j%SYk|W3&K(&X>eWfswe9sX`t!r6ytjr2&aFz$DE$UMm9)0itE)Kpe-nUohwDUVJTWSP5 z`liki!W@#l>GBNY3g52#sd?wmjDP7dcW5^DR>989y=)>DPzh=}{jS?-n&!V(d$Ka$ z|1)bFW83O@u&D@Xf<4;pYhPiT5N?H&^+_`O(Z^aDHMGf-kMAcnT}?r}hVf|Z1?0Lj zVn7erQk^tyEny4`uuY8E7a`;fm-tYJ77Qc9CQaxuy!}iU(*Df~*Qt1M{p1H)?Ux3FB2VF@`7#?7gnP2>-?=8)p#D>>9WO z(d@8@{M*zpnKD`7jkw|#)4u`o?j07Z)Jv*xnZFPaCW;Uf5@n0oXbZshz{bh$i*JH? ziO^RFlnXsRpc93Q{N@ysyHkPQ+jCHHgEX|YEFMlFE=`UIHOM)BOlkU|vl5$C{&`vr zvpIqi5CAKG_PulngUN6&heVB+?Mm>9(tTFZ0Jfo2;GNVE5-@SCPr=te#gKSl2BUAF zsL}aLXEI z4+d4)PzOCZZN(i5mvc?9woI*`GXG^i&{-Hz#${3dC$md&Ob}2!;RLs0qfMY42ot|D zK9+H95SQ3xpT;|H3`i=j$UFlWL5;ZiM7#TJG#7zXgPOa5E_rSmF~{g$T<+@vC4k7E z!&TPOiyzB!t&rSet|g)yG83vX3Pkka6jKZHkZTZ*>{bZ|G3a?)C1ZvgNOV&Gre`85 zKMN{G5#_dTT(zde8g|LrTO*wg6XddE z2_3iukto&*%Kba&jTg|>{zW?24Tu!I2w4S5wNW&MUt~$;i)!((XmM~-Q@NXX`I-$S zq$hMBSOa&XSii)D#Osz+!1oFoZVxan{aXZDmay1JGvzuw)Sm~`hPyVSr}BBtd-qI} zmgupMOHA+5dO58%{`@n}(3Qe^i(!bx zTnZ(eH&~8tAnt9B#eB}{lFL@{*-IG0aO-lQ^LC@U^$AQfjCWDDu~J0zC`%?mA01!G`>VT5oio7IL@XM;nXaTLI zXsW2hH&=qr0GU4S8RG@Khk~G@l0rVE9^iK(&ZyjQ-Q-dWTK)9uHvRWR>n8~Sgl_gW zv6R_ajoc=SWtZZQ92c_XG~SVh3+Fik`ztUJzdCS6fm0b4m{Z9e_>F$Ype@g=l<1?x z$0<9D-#-gpuF8c8p+^$skg8)x=DdF022r}H7&Oxu$N2DBj09a(_03-Ak zkwmW*Q_dod;kL`|byTd0{MFw9(n_?;5P2@v2{TXKVodN*fhaD)&$|4B^9^XN%_0sL zLAh=p1Q88e5*xTChn}0+Kak~AdWds&%C~9BkSg}(Y*6DmDJjWM{a&8Fv&iR9l9vUs zLHTEq5$l}w!#ovp#-6~p>b9l{%|U8iDDwzI zChd|p5<{r|EXIr}lo_*UFIZtK#OKjo04%r(rT*(>K~+0>R}?(c=vIGU3-yc%A9``B z)00aoCUanIRA)>3A=1kSc{%DWLIL`|UEOl}fqao#`aMSG8$F8836D7aF`*{zoa(3* zg>5@aZv@8?D(D<)(0GN*Xjd8zK>mAEq`n>hX;(hw3EFNf%5B9zj+d)qrp49Jr+u>? zgMsEi+>2UYUwhw0FKagky@^+_RC0<&-ru{OLnR5SM?Ni3p=Q`I$(T{%%Zy~8NsDVT z=}x!u2M>*L>YtQElOwBVBjFc@;uxrcaiWDIQW#z`bRY zvDE^+&BC)XcSyLiQXBN}qd};qRn~-XRhqP@$<*_8;)I7VW{E2fS#QEX(m#agM+L;d zbpg*nW1?~oC(eDR+co6dYF=wa@pb0-pt(xV(WDLaHe*QmuaWFbU7SOAr5OtJ0sjmgB_p003ROr;mLdnt8%Jwd_&G;m z5pt{j%oT{wzM~ZP&30t6%;__WU2_iWGa22%dX=|WYGD5f$rO=Rq68w0@T~KA8u7}s zSpJS;l-`;x0EBDi4x}?__XO;toVAR}n#;>YS!T3`ro0i;BC4{h>(PO#ouyhzN#2rk zqHkRV#&A{5q)(ROU!Y<)YBjV1yH)3ZBM!5CG`W_qY)5NqJ}-dOxZLb^+$_*nrp#a< z7I4DF@v^=0TtTk)E)zQ)cz4?Oxl6hk0e9zpW&qqIc^M4M5_vXybC)L6I#D2ENLNM`tB$a@PEat9C@gGC>CQxi&KrIw%Y8Mc`#OHa z7xSAE{JD;iUL~=FTa4-~zq{jla+Mq4_CzNN5827@L?6*il^tMp608*+#pj?$kDz9+ z*A+s=D9;&QLr}0!0M4|G@t<#N*hU zAv`P)_Q#-8x&6nFla$&920w1d+mM=NV zLg~T9s^YhDL3ZONVJC7JRXmBzna_+h{RrpA)G|y3WTaBHb5~kqi>BNH%<;;ftc zi6nna9YPnX;9&NpnYcLuX$7;G>rcFrG?~e?Sj@loVLda$kKKIeAWJK4jJrjQKA$ZK?&4-YPGN4lcAl8f#WFuh$b~M>l2Mih)vIj^ zsAds_h6CwDs>Ek5G2b;QG(tc6~QomTfYJj8cde z12`$#!niV~>q&|#Pg_6kyBItF^ZQH}@9{49?R2VpX7blv%^NPZzW7A=|Ir~m2zvb( zL6A(aViBM&RF#BdtzqXiF#qxtgS8L!v*`I3O!zPAf1}&!OZo}_7ja)#`d49ce%luh zzl(Qc!{V^~efauq|Ao5u){0LHUfQb@k)bOXtY7r`wktoZzo!N*DtNjx0EsK%pGtce z!zfoWkr_Cn*TSlN7^wJVo8fnq)^iTRwA2h6YQmyPH8Q;wiAu5)JJ7bGrhqrY7Ff#{MYWl3u z#J_`2Q`suC9-ETO9?-HCs}jpaBg2x;6sf~xs?eO8KKEK2i}gh_za+7ziE`q1UTO0SIr zfK-SiDRhtUK%w8qNA}wS*`z90Eo>c{f1HltKxJ{{)b}DXU`GM@JSZdZp;eKr9Z=KPGNda)0l0aZJoBQT$Aw?f1_`Yz5c}vBYao&cs!4Oiq&UxyWX}pHXg}ib9sF~%wZ>ZLw>8j={%mU zLo~nBPZHg5M0>e!Nyklis%Y46zipoa{k5x_cIe(Fp!hU~F~l&EAmXIBBoBQ)V%OX< zD*SWC$K%I7#_J&FmQdUj;MA9TEo#epH(7F# zP|7Bc>u;7hGW8n#^Pr@&8Ta_3Po#35kjE*c9oAl;!hLQ5ptdH&*o}}R*gB@q7_UbD z7iW5evFNpUt%8CKvdA?K*`c7+Np9N>HU}PL&hK>mOVGzxhza_4IfgF9|IVR~L z4wdm~B8MnDykFq_f;jt?a8f}SmYoxi>F5^6Y@>}79|{(LxLTM>gn1hv!v9mJ>*Tc6 zB9=U%LTWcsZbI~DHO!@VhqU2q<>kVWJYi!4AFBKn>i%a>CrwemW6$r8E;;>X>HRNv zdZZCRMAN=KJLXAN^f_S$42d@k8JpB_>HIn2r@0Ih0MkHu_L%W8PLZt8X6Ah!CdF34 z9F1+89+Wi}duddkx<3&jr8pj84^sd-aPeI!lo zLuJ~ce)%Zs_BcuHs(ti$sRvhJF1PXU(Aai2pm!B4R>F+CPyzETpZC( zTew#j*S&Q*1zOuy*H_;#J)!CEz(jM~*XbKbKVHs0vit@=U^Fzqo^{R*4_JP7{hOQX zoUg42z3ZGS;w5ze7iO+49yARr0>A*?QeDQxrLd`?C(Uj zCYag(Ep(`KBT8S-?b(@0|LW4#{+(M`-95d{UTyCAIDG=ot#S*nhokuaId+&0hfvV$ zwdI`#L%(RKG^ zRe$Y;UTruEqR7G97{ZZ7n6#rISfIS(<&BUn$Dt1>`W60A2Kh_uNcMjKz(7C0vSgwz z`%&K-S~+lf#yJu#bO+VyGiC|1;EpGasBiK=knj_Ks&6*n?%Ic;3?aIge;D$cbq@B} zpkyS2c!VxoY9rEYw*1Eetm&92VSjLB|42uneT3%_tXriqA)gs?{(?T`-Y+{M{g^0B z?uDvpPS~Q;RX>|uVlyvSA;3&n*rFow&TF_%=0&;JwqHaiLw@Vu#%x;=nMwU@xKlO9 z%tY4iBIbObCQHb6I-XOL7qeJ%X9&|jr!Ixfv_ChZwK)~kreu`&kn%*oI`$u;%5sHH1f430KqzUw2Jy`~nG~US039`3=`RI| z#3A6jy?C_6Az`GIxYt8_;oXbe1QCimu{J`?UI90En>yBl(8ZY&8oi*3z+>Nk>YIn+2L} zHcddxwLZgo8r#=$uec5D#)i-RqvS`WGWhW}&^yUHgK?zZnIiAnm=&e*uDzX%;O56` zjHwGGE~H8efg&c>9yFR}ps*Qxrly9j981I}xX4ERxSQx$tP6@z_vPG{5gGVP#&PyH zBFB6ihe>oDb*+S+E^3&<`-m0ZxR;{4QWway zVK)-~r~WUoax{~WC&@tR20Fxt@3za$6=CCFk+3ix3J^4~$15B7*Y0(%!4kW40@TAr zD$7Ol~?rc(z~v zxbt&o?_g_luk&Ve>-Fyb&Z4Qf>PmZk_@EvA@WJU~;3NDg?jt?@(LaySzyCW;#y3C3 z{b>SS{79F>?ml4igM*U*MDgZpNPAd|y=R~q$JdSzqTgPL-#zd-F>+l!GAWU4oo4pa zyhS-UZS;pLz|1MOKJr3nR@N+5u`5qr%{zD7CfaK?R3bazzhZCDejaMjxX+~RHOk8&VodrHi8<3^M zXDRVnN__rZBt8q2azO{8%wD4g80B| z2!>PQ+s|sd_l_G7F|1o#4%2)5dTYx%0hMyTccnzM`s3m5mdghpzdm^RUvL&)?NS>6 zJI^}x;5|n&koWCuj!w{}X}7k1TQu!V`foh?I>TT2%%=a=9=%w563~CopD*oyzt6{~ z|6suvp#E@M{2SAF*fZ=SStOJ6hV9Pa5eFy`m(e!e?VRQcF<3T)4geiCW#uJHvf~xl z9_`D25E_Kzk0gR`yGEiLBEuq_GCE38oyk}J?bM3$ZJg*gP9tu9 zP0vT;tefPa0`iv1#<|MN?c^e!_BH7R3dy^JUZZGA3K*R;wjq+~=*j6Q_BO%cO9b8; z@U>tbyC_VJ;2kP78g5Dm?M*AH14IO0x9MxNqLA4P z-kD-aPCp(@;5O6OZroexH=;GX-(yk;V$ao7{3c>fW;&KAH%QG;GHIIZW75)^FNZr@ z^o8sW>rSXV=qNg}0@S<_44!+{0-#MBYgrS;1NDv*79(GU{krJKmqy*wJ{%_;zFC)@ zTwGTrEa!)MN2=e;Kh*2dJ)M4k1`zQZItP_gc1my}B9vz4FHUnIx7wsm5tK#W8!+L$WWz=rTu`4B!C+w*Zg6iG zuB8zcH~b7RQCW7&JJI|1@7Kj$g6$;swmNY8ZFD2jT%zs=kh>rtb9zX^xeM77lEckF!ebjdK`jNwahGVtK5p?(-O7&_H4ORA?#W9&Bg zjjuQZUKx%#5MWVR4eEj8st}~UW&aBcV`IbVK`W|VW)sBJsQ)!($n7F(xaIVn-!nm7 zNZB>~DRD8hqF)SUIUNC2cY&e0;nycjsk!0+D#L+tqwpqXbkE5+=iqx!vuY(8gb~Hy zWRAXHDwQFPl@(}wriF{Sl3Gr8q2AGGFwY8prYlGNF87~IXY5esqcNIhHf5(&TTTa* zz(7s>F~3_39EU;P;GT$Z2^ahuMHyvgx6D|lu)r0uGhw#ptuoiN%TjHePr#ro)NryE zB@>yx2F!t-8<-S>oD>gPd0kMA??tXw5;j=4ufU zpfiepe?j0(`QLT=`PY~KJ$nA)`C38#_xSNr{`WmTzWfgsd;$3%lxh)4DRxcKF1wTh zrhNYfSZsv!Z!8~N%I21`xutAwDVzJv$>#VvzO@FpRLthf&$9emk)rR?s;JutEWMyu z*aa#|=_p4B{BHOC7VMn89c$oxT3;Pz{p?b`sQq%hlR1RgX;}fbe?}tQJc{o6%|xra z&Le@yalP0+`WF-7%ZteeRrYYFN?%PTF4N(tSuCvmsom8l(OnM^G(4S@G=2PwSm9@c z!8+%QdJDUb!QMO&pJWAX%&us$_Vwo!T5=1FyB#gW1#(RoiOF-5@mF=pOwOzZOL}D6 z^ykI3EO6Nr*s3)DewQoge19wPKI*a*Dfyh#i>oWf1AV+KxQMX#=nU=T%8o1)&S|R^ zB(^+0lbUKctr0w|SQFq;J^1&Wq#j%Y4|)s;@BD(>G(QH$GvED1N6lZ$c*8W$IlC;3WkQ8GY5K9sQ zFhztjR_eY`ZPpGWENnGi2Mpr@E6zQ@aeoL!4~6|#Qn55Gk|TZ&(=^!Zc(WRFaC9IL z$FHrZ4p-MSMfL(VC?Lay#h&!3BY#@rY&Gjs+KYGT=eN6^WVkxb0fqFTBhgtIRO9P! z2FpSbpk^ww*`=35POOJ^^j)$YKt>1UaecEBV7c3#w2s5+{0pT7Gx`5voPP@gpgH{i zv$f~XLjM27)203Qclr4IKQ8zJ0st(=5`c?%kWKSwKfw)_M4Ne@3}6F~|DE~&r2t?l z09XnDmI8p^o&Z4Z<6BDrjA4xT6hW5RifqI#j3h(Kf^NLNS1-q zJwup*n%1=20cNv>M}3_1@VjAo=@Mf5i-YsOX`7CzyPqJ{j>fTgF)g_1>?9LvsFU10 zc6C}M)iqLMMg3zceLy&I$7H=L>!@9vFYfB$;jh(M;!8G<>uwqmT*$inc6JI8*IfX+ z>V%xXt99KHZvGG~5osxs9I^B!(r{ySAGYh1zqN%mrM>9xmig9!uiHDXoL_*orG;*J zBMWOQX>NWCn#$$p*Z9oj|NfDVzK8zz$%~@@2mD#`f8XKb^MA153$TB zcT4_m$=`iL{*Le4TRC5llzS0{Z#fS*Bea`QO~^aw9HJV2B*y-`E=U81_VqZONLk-@+p}OVRFH8ytJUuIvpi|I0)fIVdZdp@%MPm_;Xj&SPjY>U0roG3 zd4l7$0(E9uEyH^Z!Zb$R+Qw$lD4Mqi=vOd= zh9QX|^g=Z`b~~V&<~OsN#(nX1R@1P?i&;(E%}{MMO~3-HY1&zyC<8iO_ueip%8>n} zk_xlM!xDBIL#g0Ud1d`53DmQhVh8Us$i|)+0&6d&5a0hZlmDljw7+`%uV+uzLi_J! z{IBoz@%evR@C76Qd+|x~zP<8;;NU!J_dcbpmg0vw4*_?iUB>~ABCjT1emt%%Q ze_=>*{ATFL9J=RgSaJBfLRczr&E^#pp~$ylL%GaWer zo)sw`iTq76hkrXv|2|C`G&9UP>>RsYDPSF@rhp2InXRi~|hgJpvMcal-A#VYGcOY>^4lRBK;f0fZvGVuV%r9jJHZQ}J zEvY%C#>ptlTXJn|)a%S5&dxe@2YQ+InXQA`oC*p@{zxC;xVndSiUUyvY^+V z$)vSldFa#84q%&IILALzgfiS03)g*fu>BTr%7#o(Os<`!A_!Ic6J`#H3L>N;Xm7SUklp_V|SM<-T%1H zO#Uwgls);?^?%QwhWy{!i?t_Dmi*uM`1t%EE%+i1pvg5{{(|TX{fc7PJ|%?!UI(}> zP6n|;V#0Jd{rphlMK#E4KLhId99XH-zXO9lCfZ@IKQD5P}mz&`FwGR#f-tu#9!S2xm4LHP~^mS@tmi zh;VSOyp`BRG&R`flMe29GN3H~b zvpVl>o^&Ds$VkfeAgK*Iz>$j88U z7l5o1kaA37Lri_~8f#_A!i?*UY`UF5sB$l7k6-6urOblXtN;NYhA_^GhV!^YlDJvF zwnd!Os9T3&9EMJ)K5CX8=Qju2+q*|5ZyMY-51H;>ugyUO#hR-^vDX%tKJY1A${)V@ zXD0uD2+YM^JmOpY%jE!b^1nS^dtT80K6~_R$^UlU*>@E=BCge{2V*vgYX zJjW;}ONM*Na4#9|CByw2WVpjy^NW(HsTSAc$T>OO z$t50aKNPJ5mQ6UMs_t(Wm5XZjQ_{~y6w&l!I+2K$N0==skKX3VnEJ`lU>%sMBm=Mp zBnk|$@;RPsz{XbpE3ss~@KM7KCUb&+E@vibQgdL=(^T8fOrioaYPr;|E| z{B1h?7{MJINAU$L3dh++Fnt6gL56*nPuB=Yg?C9b0QwA;B^^ZR#EApk`@8`&M9HTb z^$(Bgo^Bl8Mt}B9w`E3QF5%zPbtq68_caRj+lBDIP`&9oeELfh=0z%QO>;ULwXJxF z3qXg@qn4a$aVKLJNTrE;M8_FiX8oJL)G%Ap&JR29J5amx6Cg1!W7oKThjYk%$R9`n z_|`amwED-h2hDmjL>WJ+m4bsca55NgIEUEB7}#?}f6?(Y#pHpo;{;iB(*aEVa5^|o z#xM>dDisWLQeDOnoDM*G4B+`#!mDI({t-@f`wA$txJQs)p%3ZjcrYTv2Cbto)_~-z zM~&;Nw98?gq1kKPux7H?9<|pZu9Y{VYl_+1je#l>y%MQ@5J5_CdIW+{+ZMzh5ki)* zMafkRq(?r5ogeE6T9h!%0Y^YcJ0)A}k86);valm4ZvB<8DbqHcGME4GwrCZ%eNE~= zG^~2 z=yn%VQ`Vc3HPDQ4kQLIMrNc>MkhL$z*>u!clcIMbk(z5MG|-*y&g{e z?!|sK*)h_PPA^4~&&TAZB>}ASWVBHTyiWE-L_CWL7VWM{{4t5@P`8fx=JdGsm`_mg zSY2DGlZb9;BFxh^spG$O-2a5<0TvY7RZ0h!R~%u)1q4WwK=olN;xVyQ=Sa?c1pb72 zlIRF%LeR+whR3`Gf+j$=7itye4X892K$>@`?uHW}oO;8;HakrBP6}ul%_HlT529x% zO!_Nf_i@?1LbfxH+~(X9jA1t=YP^Wou|{JMj~Zby-e`pYBifTw-WX@m184fBMd?UE z_Ux?sMo_C5^CK(|!(|-B-Ewcs=@^FsKCcr5X8VL)YXjw{1X{VX;Q`c3Ypf}qHhRNFn0E)g|ZjB@v0EYRLj6cG~+IC>PiEjW|Ve-9ROblwYiD05pKc1i{ zMrD2HUov_x6Te7Ovn?y^@2+wGs9lxFr&z7k$-qY7JgrvrFfCAJXY?(w%H=xd}*CyV2b(&0n%*P@tTsvLq}M(a`IfGm=KlR68`Au0>KKP2>rx&R$noz)>RJ)>ApXcEG0@jO%N}T7zDuHyNivEi zW0YOM*1ZpXyzf}w5C&?mbawZFLfzW~x|AqR6fT{ubl&do{@=GdB`-M_dbf}H)Dcrp z08g@Mki%`#1EEea^Ljz!N~f3I3Sv~^cdvY7DV~&eR~lg3Gx&Z5uOGLcp`qObkyV!D zcz0>&Ahw|DguT<=zkg3MEVP^zEDXpS z(ciK?A?Pc;2@DTubNgwA0D4(TIstx3&pJj1Vm1(7(l$L2K5IxHN-`AT>q*8<;+FS{ z7fM4RX{NyyTiWR`p^=v&NtUdl3&m591r20-`WH!>P9}_qO$)wx1LD9t4sIhxM2c1k=~b z?l~p}E1GIK^N)9~!Qr0yns_7xCR;dsymbhXQ;dPQi~h_Jpavu#jZ3v-cqn(EzydNU`{ zc{<6Po)1cJf~k4u06QfVb0!|@ltM0wWl$yM>~vR|NO##p8WlN|7Y*PJSM4PHKMWQi zSFZoG7Tt>|i5fM_>n?+FPG1q9jNn!qP8@X1W~c-OcK@tl%6u?{l99TXiWIU(D8PrZ zDX7_%Vo!aHS13$HWDtBJdF1Z>*1duapnDL?w2wQxZw|J1VA4+c_elqTpEaZZHU-`t z0plb>b`h0|MYj;cw6UvG6;^I-+0Bw0feAPwMbu1HqbINqE@qQeA*stXITg0d()@l`bjf7h z%h&ZQQUOv3sivH1TlUgGA_JPagpFFe*Dwar&{18eCbaE8m;T9tAt_jk=u0OdWf_K$ zNp4Iq3ng(>jb#qQeb! zxSfWr(uy91xisWT)Vv;k4o#PC-*C{7c{d;DidS6SV7Ph-O3*@hoa*iz*7d~UMycJ) zYj}F_F9y_TU^P3NvfzBpiPp`W!CWpd)W31T9An4?eC_PhkF1j@{+NOx^K+7^^7Jw9 z;d(6OFk)as94@H9N?-P~bGUrXQDcVqI%YrpfKSKk85wVbb|W;6Fk1X>)<qK<0y>ooBy?dm|FB=@*#GP}vrr@rZ_ed$B z+!_bX6QV;`sfSKKa#0vWs;_8QI7@@yvavr&xH~!>6rOiFjXddJ(CSl;S7$rpi?)e> z>p#XO`SikAA7i)k2399P?F-7k8PP>-8l>Ew(ptc62xnE4>ok1MKqgpw!0AGzjA>^Z zkLI^*1p!JMDuUH0)7$7vmx_2@_OIyfBdf9YLl`?ZaLd7%O|2j29TTVcR`plV-&T-_ zcF_N*C<~{_FZgbZ+>itq=KZ_jcT99Z%sc=L5Kx+|HFC#JwfcOZO`0DiNpcW1;om(FY zZhDU`Jc0Y?R$jy>ymmzSXBT$8SpJC!jjhO78%L@*$A>aPRn=8I%A=cX%9N4AssR*$ zPO7CmI*+>_uj6qq{|oXG#7aQl-TS;aV&~37WJP#HsDsOiFFB$)LyXFkXNOM)LF2AX ztsNw^z7rje6O=GS_4dO$ivr-P%(9Quk&)t3=GoDhHo0mu11R-7bqPPlBsiflQP3cX zFi7H|lHr&~S!OhVQgUrfve9ZEBPQ?~(oNwkfd{}4P40{}>h1aiWL=Wc1DJ`nNKtT} z0%;D+s@|zZB;_KVRzxBI!Y)3HuK*zk9Uyq9Z!^Cg@^et0$@k`L6lxW>4{Qje%=_`+ zycb8GThYyW)WAQ@0s-d;SQzBoAzoHY(N|OtlEASqs(V!g#(F$rH}obVL&c$+y&mJl zR%FnlOPLXP7ctWVI}zHvbzwCS8}-#y!H27gXUja>kXha+VC1rrNL6pt_!6y^72ul~ zivV$3TH-3{k2dO?j#z`tf<+HgM%OACM?vp;hToL=DK4E(R>}1axD=PEz&1M<*4#Tv zpjX~gG*4fCSK+59zL|Mv;iqJT$Br?tS_X!chTs) z%A8@|8Nk}S8p^qu8mDuZ!B%uO4<=nZZZGQo^3+|MPB)EVN2A_Ic{1%Qr9Ky{znQxb zLL@E;Gop7%9X+2v(rstVgd~6li-Xa&9Dc$#F6SCvv;>UUxs&Y%-q$yYiPuX=KaTGH zldUq_h)aH9t3gg7qelOC_{5I@$xNae;(!{8h-(I8)G*fypl$35hDm^mb)ah!v~gho z28Bp)kzsa?I)%{`AfDfdS+Gz?uD zhAs_5mxiI=&@j|o%wLih@{T8lJrqYzMEa&IVSn^dm8f{IbuTlHg@=+g;{mX_`#yRF zNPh$*tVZA`)ha5Ru9G^U{cC`PweUP?#VtB6TMr&+)m(#zIfBYOO5*$m|B~9}hH(z7 zT*i1Wn0DDdPSBSsY3u!|ULO zVKPvy1cjkjl%#j+rV6Len;?fP$Hha|2|?SZyr}e?09EiZUpiJ=2CQZWt-EjTsEbBS zRJGh9fnK>gi3ONT&g-}NnaTg5$>}$U05vE7!x~&)Vf?2TPu70S6FVEjNCo+_qLBdaWzv8+E5_&Do>zL*j{CH)h zJL*q!{6EsU9e&^GH0s?44}hUz-D{54L_~4Wx6?Bqo+-~juQNzL!@oG-&NcS|(P1;qQcie87@9i`<-#wFXtRW*pJ*JM1xhuCI z5Y9eyL<>%BOF)a(xpJ5;Elz5ts)9aEKFD!`YH!Kj`tH{I_r#$?aU+IRHk(||CK*f> zTtw{;!w-{M^uV{RMvjX!J^}Nt%nc=TxV%I+z@$5!AZf4ekeJZ05|H8TOmzH@s7e!bawKI+A1=v=3U6&SH~X@ZJv7zmqE*MZ-788--*72U#H!k68Q}%?i9xo2K6-gf|KdXiML+rd4!_fkxhRnkDn(t~fbraT_6Z6swP?h%`ELjF7P8Wa@*VSwVjXMTe zKC!Fb!jq82J-Xn8Ax(2D(B>Q!bhrfu$NMK6TC;V;8f}cs?8QYwzH^up8Xfk=X^#Zt zIa}oFkz>;ld@-=Cp|PoK#69WM8_8IejX&m50`P9gZ=qd|@cdc@1j#G4{pMi%?cR10m*&60Ceg+8t*otkSD6 z!i-ueTusfTAb2SVUJ8Pjg5aef_*)8s^*a95B*AhAd@3o3kGo~L14u&r=Gd1AW8sd5 zN~0eXG@h8rY}hS#c=`gV5#B?$kr+`t?oeVRMDcMlSp{AP1pt&Oz~-te89*pS+B~vC z43Z3B4lnn$6Jfhle(`Xa0hOM)tj^|M2kfqZjUK}0T(`0$ z318j(#gm}Cv>~X2Fw-udOz+y%`j`f_=GvqB&fo&Q%g~NF7_}%Bk~uXA|~E zP$y5u7`N1|GjQr0?;QQKyR{Qk`rI2UX|0vIPNnzTM{{#2A+I8HwJ6IH@+t&~&EaLT zHWXGv!XkK+*@`4T(ncMg00ita?4gc6ZEM5hyaZC*&80|Kp9tIwqcydiZjdRYJW0$~|t2)^U&9qIJCEwrC}D z+$t+9oT5l3Ysr=_@R`Z~T|Rj5EfPS_;s2gLhVNni=jV@>{NMNZ`1~I%_#z3QVOuN} zJWHl;$@DFmz9rN54VgZ^Z@<8&TYioclk8`DJ{o73I$&;MXyFFG&~V=-hQ-v;Fo62m zH!}%-@L+~==W7$qS~q|yKVG!9b}UmJ3eybQD346KWOHwdVP8JW__5S#5t^Ny^HknP ztAhbt9B})tPO@&8IQbU2LD#|_1W8Bl+TXK{Fw1Q@!xW~YwA;?+K+31XEWZ^fxNcx= z07DNG>sKz0)7}!$syYE7M=1pg_d)=70l&)>rObgTf~qM9o;uTM(*P zlIlF@5*2G-0*O6pJX)N&c2VDwyZ-HeX7c}6qi*+C_y2$NHrT+K3e0=^N z7kmK$!0W>(-r^8GutENfMFC4mz)}*hlmsj#0lzs(fZV*dw)A9XULJM3--;DSZ2uns zZw5aO{auieuE*%LY~BLLyJf5+%Q;?tA3ga0ZleXlR+dESyw6PfPtAHCeq9?#pb04<3AKaZdME_(KDhB*84znuPCZ2gDH?T-k`5S6nxjNxgUYVw z(u5*f=-(wK7CxU|B4K#|Cmvc0YYVjzErz@RTN1JbU-W?qM zh^c&D|G4vW=jZ@7*hVz2>(>52&a{ZhKD76DPG0G_K1lkFqb!@$vB_a{{KJoRKzcX% zBpKWmw)qwvZ~vGFO?`UYex&UYHhV=9c5wu{04lj-=DRb74mC_*g7@#=uTzN#kPuB? zVp)W!VcIb1?!5FM7YK|*-8{vOicwiHH!VSRG6pf^Lw&G091=Sr-8gc79h){ed%HfJhOuXdb zbZ}10?Kp8_WnA>Lc#^l1ZV0!BALQV@C6ht1K=fI1poVU(hvItQA8V#((i#ix|b2o+%0 zu}06fh4-^bLdr43V-`7HUrCI7eN|CapU zzXAVu2QUAVgY5&n#iQuHngGFWMf(?`5|vI^r(eLIIOaeXK--|r45e?Dvq_L5lg7Fg zU8bLKXP;-67_L_yV~jo|wJEDPY7Y<}0eJ*86X(Rv;P1GHF?0nWZFR(8(pAnnsnKDt zuM&=sV2CQwrXzDWomjG7Y7^&#nB@BE#sGoiiRB2gl$#)(2GE{Ajm|O9+xx@lK@|OT zOt?8};2DG;&&ez+gEI)^Q&Xw}zbD@|^E?@x_it{?Zhu--A>}dI61EUmOR~UhJ;Zp# zJkM&K1&HP%y_}8-iL%<>Q)>~CJSkz;)$Woxc1@AV?|Q(U!z5w)AbQ^62P;*vu2pMB z!$sB!xG+jSiND*z^{aht@8ik}W@9laLp$&Y30wHnF1}bJv zCIJlE>~i6^LvsXv;*Hb@>(8#WiFRWPRC$9Kdw99!F2f@A_A}`h-Smspc!@{!6`ar= ziYE<)IFbF#s&}I8ly0b~4ktAmQjXa=CsKtGu5>m57k5q$I$N8FUbi;)U?Kj=iq3e@ zd;YXWhjv|mvHfE$8BD5PrBD9woZsN9&+tQEJgs{1tm?({su!`gjxXq!zJZ%CoepZn zH=hlx02qAj2smdfzu?^ckbVMm6i1D#ET5R1=+R_4>H`U(S=(tY1s!UR55T8{P217S zY;qN0eI&o|s^=WXPPQ()FunFl=k>wyNr%0J&EmY=bewZ!c4m(4+z zw?)9nGg{sPI&%kt7m-&UCCE)+KgTfezfa*_g{#@wuOgfHyDVzyGCIYjmD&5E4Bq;^ zyne6bdDF-XOKUlkyL%B1BeqTmrrQ&wNGKlmWPHPcg|lH|m!LSzl4eUJI>mb4Qkil^ zp)Zz0Uy}mrKJ_59ffY%{jb>@qXis9Rxe$gZhrb*mBH~6UDkV}IQ`ocja+_da9H)5_ z{e;4MzJxd_z&II=8(t@x&61?nJoNG*1krS09vJ*aySnjcufF7q2P$HpxAkGnRX^wXAu9bGf0OG_c>D(=)8uaMC&LENj2j!#V}q>b z;41CPy*z5%%bSR#yb-p_TIU2TE}n(S!6ZDxNn>*Q=*-;a19FAp$7}YnMkkE_*1{tA zSkSHQr;BiZr7_Slr@4FocMMb3aGtiJ;-Vwle8l6xBr}?B=aR_yG9n%&dsbG$86FdnME z#;W2qlEGV(juKacs`;C}xZ*%CV3_u5Oj(^%x6N~a)!f3WcoK*l< z28F=jnxW|h>`@w$?HE2a^8r6BzH;Y5kxp(!ddA)aDmm`cb1XybT-0?3;|2zfdzeK( zZ5g8zF%+pQM}dtvt$1uyHB7et0I-2M3xcKL1R2W;shqix-;{G4>9zN=*t6>1p*>6< z!~TNbh~Mtzl-mXc0VeMo^#ksN^b$}q5F^ccm@Llyl#Yt}OIxT^P+2omj@<~&W=N%l z%0IH0s`)K=<}(H3HPcS}4pbh_6`M7wJLQ;LNZ7mZ|b zagst#H4p&T_A?hT_?H!}NX@QnQDjn>{U$X|LK6I9!Y#l{lscixgs%7+vZ0K8aR!Y6 z5YRau@lZ`eFHS-OWi0e2lmvJq$9t#Uob$Oy{rg)T04|l_(Z2 zr4HQ{Yt3k&>awg=4Z&Oh(DhnX*L|pEUBdN8(O749|M+BcZ*ONCsHo%4(c6999CT-N zfqI7(_BX_*t}(TgYiYnCVCibQM9HKQ$Od3x5bc=5g%*l0L<14J zbO;DD6D566Aw<9#R1ucFlo<~@U+b865AAI%>%q-n=UaG5HIio2BWjA{Gt6MxrW~22r=7Z_Az26UBH|J895}| z3nACINe$bZAS)0JfR2hUncf2afvRR0goTv<9L|MAX!OjQQgfNpM`vwHiI}5-N~4Ar zRn|7dGlRhVq$JuIFwI+QhQwEuF1_|)SmR4T8eM;U*7P+3GOs{mU`6pf*z-h z(d|=IfR+_9>Q9fDa@1zXEp{{#O;d~J6-AGOqH~*H_a#NZ4Y<(hDl3Ta%LMa+M1-8Xv_@s+?R_pk^v70whui?Gv*;xhJPs*!zIwpAwy)Ou9tt6$3%EawH>#5nA{e*w6!R;*U4B+J!$iA*uhJLBn4q9UHw=#U)moV-26%f2>8kf^mcT{xR(^K4_IPQ7&^OolBfYf%6Ehv!Ps^jwhu(Y_;c8VIVo);Y(VeZD&# zQ!a3P-}d&eXGL_jF&X*dF}o{ki|oosS*?PVQSIJjWKWK>Ghj@GEjB75?}c=k{%O(= zBHxZ!=eXKS_${(ptGvWn4e%MtYKm85dBbx_kj3r*?XiUi?Vt{2mRaMe)1$NED#Dpz zWfsYVkpXe6)tQk(lX8IZFhB!Hte+>j7WyUUz;l04P@D#7~3qlyW)1sErbXo z(>l`HwgXIQmv-&;Y%%X1W^&6qx)t)q*H<{L$l~E~!0B~n8%e=BX_^LJ~cLRw*q>^dIP*3`sv365Ug^h-E1t;?|@^Q2{GC=*S(Rz0MvEQo-G)H z`N~29rXH&)ykGC|;0HZ>)8RCkaWIhFHcFGE_oe6KHhUMElpoP!QeL4{VO?pc1`$za z3peR?)5DE+^LwK$oKfl+Zec6?ut$T%9|+qQLh<3K;PMM+Dha5F_VJTh3W6!VsDXa{k8F(z_G^mJXXLKMUc;D~x( ziij3nG+Vhvi*|Z;t2O&}OZKd=WXIb-x@#7$mox0r#VQ<75)Tv?ig4GrU99K8O(iLr z-PnCMgd^_^i3Gg$892q#w%3XJbCK9G7HZSsm=rU&SQbiHCp7*Bv;-X9q@HU4RF3lD zQk%UXK)FxKRT)sbTo_ZiW&BrB*cQAuayH9QR>|zN&yI(`uUK&Jy;&Mu6Ih+a?W^L; zRUx{l*$Z&yTW0GL2$zge++&pCOaSoZDl zPUm>*X!r1>-Xb>I>>Vd8Bk%&pq!VvgNCkbXiv*la>o&gT&#wc_kv940*C}H~nD*g1 zSMq8vECRUw03MqG^a>19&&Q13dH1zT(p)hAbVPtCA`yv$_uQj8t4fP42XS+|p?Io} zs!xsn`X|z`&>w+$LngY{PAADAZ!El#@bqFj%0Kh1{;~C`=}Cs#hp?-FgWuWz=`fyL zHLxL!-PBJe2`4UxZf@`%xA<4|*Rm&EmDEB4(pgAWhL4uiC?1Xe96fTc0#k2@P4Q{{ z4)Grri~phOqA!a9GCTgq<3~@{UWD;KUOa!gjQ{aHKFjzY%lIG5_#eyoAO9ZlKW-V4 z!dy$YkNwCH015R+$j^uuG)FtfKzJ0!L&39%VpqH3WVus>k(a`+5v?UE?!) zDts7f)?bhQmFC*#ah`VBu)CoacpC;-KlzmOH?%CxLbKQtdCjppY%|54G7-AcQ3ngp zKq>UPu>M#R@yAk`3;A}izmn&yME|^lZIl~_B8}`f0fbHfP_SSkxxlUyU0K_@|GXW- zZk$YqaGUkv>JSLVupYM7r?iKf1h{7|rhOo}Z}>8kL}&s={Kw!bPR4zbiHUDRba0H9 z!js3VsDFblJb#Mc+SGNtf~4=NoxC!E1Y{H?m81CL0&bI>0}`fceU-NaNKSTHR}`jY zh*3=2_px$@fAKK3Q5?cWH%B9k+=$T)$#xzsF7m8T;xuepM8$Wa z|CDCaynoYdyNX!6GfmV-nNIYfz@G2|JqPMbmyBGA@BD zv=3J?`aqrCs=e^6B?L-xu{*@E4a8_MiE}yvajzE*lWUA| zfM?5rf^UqPDA&PVOcAUd$3(0gV_M~laeRqUPz)FsZLkf4L7^85$EIM-GWz1tH(y@h zwm{|#w`qWuyx#l@h|aEB2#y*Sn*~)%Q*p2&(W#%R!x1PQC6_-W6LP|BIFBiy?%u)H z=3eK`CJbyJXb}d4#xm3ZnTrJQXL-+}OBS>ExbkHU*yYR+^@nk*&9-@3Ig?lV zk3=QWEu6W$g`m6126e&Cw#FCgF^?3FZ%Cj=W-i!;7&riAVw|qD=p2%(j)r+HPOs1?QHG4rbi-q-hE$0Acob#tE7FtlPCM3RcD*nUfDO5VIS#l>@bV?` zvS1RF%9#yI@;z(~#c??OjR2Pjgwu~_Wa>S-0m>~I;g<$KZnk-*8}-3?H?KP(hYf7s z7gV}IZ=8LEH)De)5Qz<(5h@|=m$(&pJ+pw|cWIC0C`v7BK`oZmpStId__MOBH0@f@w&Kuq+|DxIDP1BmM(vH7Oxj5G%EO_Pql z5}%ou5HLU#r)TVZahi9}?jtdH-yjF6pQs|1WFwd{KsrU85GPED5DpEAI#RO}8tsjX zR7f!0!VQI-29VujSe2gjLS*i=LsQa$PFb3ikuOSma{$3~1(C2DPV|vFaD2yyJ6le+ z-uh3+e|!Xw_fHNE_Ky7*f2ex#Vy)^$RgLF`7iU(dw6p&csgw$LguH6eNc^k9n0S(T zMIhj>ZNa4Dj;r582jV&f*>FybBi0`wgE=$Fa+z|Y_^H>(tSpcI16QLSHu&r3V}|3s z5hPW2FgfRq0VJIy8|-(^nga zCg6HDJalF4;Q2+Gnv{7y3TK1Da5Nyy3NVDl_XSXkDsjk9b6uiNV}zQC!L#%mn{OO7 z;+6*|JDs#KvtH52ob+k$g0&MoTG1Iq*DRl-h(Q}loy!WnlntF-;uxg>Mst)-Q`Q{z zE{@{kT$+)G*=8g7febriH0%lXl-V}S3WytYPNnQmgqUja@HXjRTUkno0N8eh^wp-s z#S%y7EV3+Sv6o~CPF3?cNp{#1krWSu(fEcD>@_Gzbcm-V9IU;J_s)M6!g-8}%qdRE zD9?xH3yFQoD>u|l%lmoz#~0QtK({hrJ>lRZlOEFX!7?lX$0CRLF^thFBf82|%nDml z=)-bKRs3wc+ZvTXLp5W?z^F>YUo06E&D9Z1-m$XJA6emHXeq^NJ33yNIiaLQHV3eb zidF~s!I%!0l70pO6eeX*oD{Xf2C%qXsen4IVw*Rzvy0{iP;)p5dPFd|w-{-Owi*8{ zNY`pq5{{z9bcP&t#XVQ_pv*;!C?|>#At0Jau;6G|ZdA3)%xXb3ic^)2M~N8F0%Bv} zZoJh0e;Y^P@^4$qzfqo_=4RvQQ~OhP{6q7BOd<8P=g5k4&zFQ;rG`tVq&OYAxub#J zzDO<9uQi8w>(bRzgreLqx@z=STV8)HKR4s6go$T=l~CAUbD}EK3M1KSO+vn~6Aa1W z{Z&5a-lurFqqA=q5Amj3zkql_tiYH}r9nc;20$`5s9~PY43n?7%tfrIwn=rK zjK}?G7yxu!pb zSkB`4K&f@C{5A7f|D;ig9gu7NLrRFueYUTH#={nTvQ}m-L<$prfcOX$J^~6l#^nbY zI_dCTXRF$=(D`s3ZDc*Aq06pmM_WzL*^Fz+eWywkoM2k_GS$5jmzX(Q#G02`=j7&= z^@zaVu}uozaVuoHxm{~syZ+qrGavs?aSQI80dxlc|K$0z)x7=x)7ABw|Nk1Fn*Xo) z|C;}=`Ty^d|CeLwu1P6r}R1WLp!CB&~C~F z1mE(wEQ%P^B-zDT(3f?}E^Qt{s^sJ>?z&kaP#>-2o(5WBw+ImI4E{{f{xv=bskwJLl@t+b+|OB{1YdxkHt&x3atxr=zz9(&!Yj6g%V8P z@hn)`-~jLxL)~&+5En5Zx@pY1M=$4DX0j<7RXc#qJ}KkJC2tD|gLc?pqAZa^$zts? zjQixpqNDiheyTa7j!KeZ=&aYE8O)*-qw^oImBs-aZ_&6L1VOVUO(=JX1vJu*24ZM| zhfo#{t>KaJSeA%O3h?cjmFWjhwlM99e~9Xtl{y({E3K#lIAzg}Dnj!RT7lSWoK+!{ zY!T+%L;RHhZuOCFqu;Vat{L5)cRn7EHXc7diH5(1gLoKZgQ#~Bp|8Lsdwc>S#p7{! z_Bb5(A47$oF@V#4nw&;NvqPlGu#SxHGjyuOr6h@vMpK~F&31+(97BUr>51RYvtX56 zRGJH&?GFbgGXNCqkb~60dmy%(mbyD#oO`u=+UCHj5*_hX((gqnY(Wvtpy*7=3=SeV zdD0Np{RsyBHiB9%ZG{ORhGSG2=;A1wDWB;}fFFLixXR0BCVZD2zDd9CkX2Zx6HG=u zv?K-q#;TFR8sVJ{=EuO|7Vwhnc|ZiP>)JsokK4RgfhGL@=$=9eXVjqm2c|?_%SP;K)CpEOWI9f`CMZqxKNJ^I5GLLh`5?0Sza+8 zpCHV$Feahf#)%5+82`q6#n0O&yrGjz5BWd~n6m{;jxLJA!P+M~;<$cA&fa;`@OKGk z#g*9S;_b@az&)tKrza-h04Iud;OIVVT8+90#1B60@VY21=nm&kn-%!Xg975vp$Kd< z(v>-VkuAmi`$oy!HaDy`FwUEXjz6phD71OQDmutaj+z@lzA+)+Vc4FdTbtdK1kRI)_+1m5&g ze}3NX@?rK#7&~GynY;6=xU3)0cdx)>IfD$QRB_oZ%_J!5rc-4wQk0}|UwCvRm(NEe zK>4I2hQp8cCxc5o)&E6EoJ`{0g4QG{ha*aEE3}wp3E3Ae9Tw?=D`Tt?_Sppq4TVkZ z_wMA2baGT6Gn*}d3>(@$=P#}XW> zC$~ao2_h{RIcz>(DPR*mUus4EyDdkToXA}+oh9OMa}{}*WgL$8E2HKLG$A^T(`?M5 zF*(7>MF0U5ogd1>=njO0NM~S@q&9dXfe9YqSv=}T+_0T2;$@iPvF@EENl*OnNJY;< z0*2!>OmA8X;wC$Y!eK_9b4=f;7d#eJ$&m0);k4n5pgcl}hv#A@#S@}a!Cs4ZKWdT$)1SoG7ix!0{;g{C+?E0+JG z@FdUfp|^c;%jc^}6apLL&w|w&<9Ek+{>Uo9cnQi^w&ugR;P8{g0}Re56~7He7Xc!D>?tlMmcRed&pOBF7B+odiCS07e6k) z*xFiKe){D36ZrOIb$R>6lNT>vJ>7Zva(8vHSozL_nlh>qI(64ky?FKG?)vVlC(Bzu zZmlmr{c-i>^41F&d~)BTvTb^xiet(niQV%@WX^_%YaQW%A5zRvWBy4kT$^Dd2S;y7&Tu!jTC@7>|K(h?J(3^N+F-gVz6Iv`9|5mx!J|yTI>^ z@nzOfQdlLv=ZZ75V*q)mJ4Uu691`ziIwKcubZZcfi&uL$(o?zu#7cNGuM#2B1s1Lb zxfU9BIgsekq!nbl1q!8)aR3$;Y*AJC4N{ca&v_C|kXAPYKVUYWZaojQ>KyP%h1mt$y$C0)`E|5_qsXGT zUadeD%yG-TyS1hu zp#PX;~()Nx7 zz>M6o1z^^un+})5OjwmIMZYi*cuiD!+oahFvy5S)OY7M1TD%^tOq1jOJe6?ixYu#H zdo0y>ZXdg2RJ*U?OJE|urF-ory30XVy}UJaU9nj)XJwV_u{W(zSJiy8_Y!JV%oB2U z%{iKvz)YVHXgU;NUkM6ITk`BXP05Sk0Q=q4&9Eie=u=;XhE znhcW5=&$;8g%avC=%CtxF0qY%J8xZ}(=p=NWx8HvZEz<`1EmO`*S0_i6kDT8Qn)H( zg2D~0L!~Z(5EifWr z3lCT;Wow+vExc&5qC3n62ZXat^nwprlyZDHoxrM$AFN_NpmTJkCk^SbDyLw0YV9LT z5iL=)lxDHa7E#~jSN25aG0z4SGEnK1colpTMb}O{I#tq~+RU0#sREO222l<%Agkg=mNzGv+IPjd=*|@R7s{g{y;TYL zO2=l5yR7l)lzsUzR;CV{xybc;s9NOnFZ9x`TZk-h_Pj?o%XV{Vc{4+-adw1`yG)Vb zvwE7WG>L^-BA5HZG}(z2=j_sJ31Deg=EILO)%g$Yrrhvlb{|S7e`?bxf($RRQZS{) za#IE`6J60&HdDUL8~|v!NG1$!xP%HA(~(HhIjfbpOw% z>(Aj`&i`}u$@AL(^J{!+|Igb0v-bb2{Xf4;|4%xWKHvSb5b6*2ogMA(u6~^){;|$b z2~Wf@Rs?r58G|YcsPt+Tubq@E6=m%71QkV;b~883d;~yc3eAXOuH zM_%*7Sw#Iv4j_)Vv#C3R7T997ePUQ)Hv&Ce~sH-3DHhfkMs zZh(}L5RDS zDaMyQ)9d4c$1e@@w$i;T&ImVF$?9=hxSZ;zR@Q740_2&glAAw914;4S{0x?Fz`zyI zF1wj6n9___ROAM`osn`d3(V>+AaLr6P%DX7gW0}6Z!2SfD(ExtRO$nkLloY?;c z3Euh|=ZH_fb4i*{?VGDde;#?hmH!kWs8WqpeY;DzTjA zoT+y)#D1*OE%@QQEO_+c2ffEK%FrNpRF2mve&+E!WsK+NSs}ft)M`~Os6`J{S5Grn z6Kq!kt>o^=uoW6*(O^VSOK&T$J<66-iJPUj9S$iio*pEotgzhOVU4SE>oRi|>?<-T zA6plsCpQ|5WKcO}<0^7W5g>7hFl|7#|7P(Ez`%@?E?Rj!n`Ntu@LIM3xrlyKoU2(& z)LFjJ2uQ!IBZmDbnCl6gDpSh7maJ?6?|sjS&51jsW} z#cYu!*CDx@!In2YDwTFT_;HSWv-Ir4*(qPy98|ZC-I9)$9m+W5>V;ew zvdoiPOLV`~CD!!XT{PwV&>udEJzEu zgZ^^TJlARyzI0AZxn#*QNmur*Zw_SyM&m`GGHn9qwtACcQ~_ zOlJ!FV{YgL!6*3t&)WhkjnIqxGSU|8Tw2l_-#AHt8OLQ<{3W5J z^c!|)1o61>uAeWf+6EAodjlBkx@FG@Tdw=!jpR#~RM1J@zIzRt}l63Z|5)S~1>q{a|I0Wx7qn1AH z)AL~U?z3uTelE6dRwBqE)5BG8A%9FD(o&!Sy_r#SvIrYtZSR2zHFLGaYBGg{T5 zI1z~Ep_}n2jPW*y#s?b&W{{Ex4$;@@6mW!& z40IP2jxi{qm0o8MUcm0w!TbOS*2Fu&&-Y`#%-D_DIC1^lcHEekawk4sUdjtA$1#Nq zl4#r%ZtpxC$wXjARa_n9k{x>6hx>FR_E!etXvF=)mpkIzW|lC1C^`meb~e+>GWitp zo0TPV66zF~$}R>r1m^v8Kq`;EX$Zo6l9bMu*qy7$FhQ{j*C=WXwp~$ukbsFjO#Xa{ z*+XpsC8kWWu{vuMz1`c{4J`G)Lh0&CnRBRPqqSM?EzM}pHjh%jf_flqMLyb}_L+EK z$dodV+2Z{C++JNVhiO>{iZf-6_LD(RS>2p^+^WkblTE?P>KEp|D&N&=Ies1Xd-I6n zysJPHxyb5z4Pop^TtJ!%?ee*NPYVh-Acl)tn$KHP0H|!(Mt!|`?@Ld0{$IWB94vk> zORs9hX{xpI;v1q`KYlr)_2Xx*oge=VNY;<{O|dMw^P8ktKh8z4?s?T|di5_|a^DWM znt#>JCRXd;5V3kO*Rm-Kjxw8Cz4*+f^WxutTD`b;YPJ4tQ>zzqQLE}wcV2m#V*N{( z-M2%r=3jX;C|2-p_mEU>w^?Y;pl(mWhSBvHIY|qONwEP;n3-Piva(7xq zmQwD!C0=^w?{g`Ai>qk%8CV&_X^w^TdF1H%ZOPF$wt((y?Z7@RnY)VJU7Wi+$y-Ln zTrom>LaPH@Y9q0E=lt2-j-XF(+dI+ak1syGjOqLC&jG;sSFV6NO#ZQ>&vjyQU1m;v z0M_>AB63lcqsz<=Aat2kgyXWTk8`W7B1RV}!HZG4ZWp7A?gdJtba`Q}8zw+TF<6%! z8muB*mp3z77Xg_WuS@II-EdtzJyXJUUD9AW84ac~a2G?bc%{O2+0nS9JJrFs%soI! zEG}zNsdqCLms#~>29q;p|G6#3ie}LjJZi;r+Og~^>3`$l#9QJ6aR%s!>ox-AdNRr9}Nu4^1UO0@SGBu=wnA{FGir}rFM-<2;AGeAi6~b1P z8XwLGU}YcVcZpw>M;jHPP%bV)B#f16PR8$+Aq!vkQF9J#DgTbki~y)6;H*jp>L|hp z7R;nxu}KVu%F58z=rZnMOba-DPbYmZ!m(2N%kV5xNAM{h(&B}I0V48Dw8Awq&VDYC zQXpV{O0Ljyh0aIf->*Urx{v4a3A>S|6MzAnat14udklO5u=N6hUNzC=hE4P{=G1!$ zFy$?DD!IaM9ydcaZgELR_Qu|T9Im0^D_9Tbbatk=cdSDk?NKs9U@|<7&nBrh*E;y!}5jtj)aS~2|w5?q-HU!fAi?=K=m6_@ptG9)%*aQIOwR|u&wfRhB zGMyqVbAqb5snNtepU4@8Pm!pEd#B_|B1Nx&+0jyvGHrxfXg!@Kle2S4!UjCK#dYy! z()10g!x9W7si{d{O14ytKG}x~DgmHsYIK>n`iHW*&S52B23p{Gop6fub(ucg3$@o^ z(3)VBAjNg4RZ+8iMZqv|4d5vkaxu<2J8^g1&@r?8W82k7iO|6D7@@-A&Zko(%cR|a zrzA5H=cbKGMN}g!vAKRrRtGE)?2@Ztw16FJE;yq$vBJ#g4OA2RCg=E95;@Fx7-vT- z$KDdz*vrf*d>C^(GvI>zRXOaOcmqwCELAzg9r<>}vLI*nHYRHKA zS%SHy6J*m3UnUI_Y+1olj^4o9BV)!Arx9zToesuRDX}r5mc%#+AVyb)3COPxGvi@6 zm6O0`9(rXXY67e*c1Il@c}2K&hUiAjaghN7 z)d49((K+~P`9}@$DBE-+Wi(8=YE?abj{NQSZ`=F3+dxJi6}mg4GW5C|ZKA`j8F9hc zivx2A%J@ganjzk5EgaeAP;U)_9uEP#Hznr=4GjIP1Tpz}wIhNwRA3N%XH6zKzQa?* zIlc;FU;Nk>p{;e(KnVjgwWb%koBQye4|J#<9fTP{_bvms()^EONx3Mc*#W zH=myq7rI^>3$LSLl#+fH>r0?Da+3(C1d0wpe|*hv05lz4OoH*?BX<{+fKj9#-cTB3 zO3=*FhuArWV>Yp_sDBeIfH+L&=L9!spVZ@Ap7Pfip79Jj2P3csiT}V>W=;`@Gl<6L z2?*L8=BbW|S4YIFBjVK&@xG^sc-BEZPdGd$uG<#v_6^?Zes3dUSeFdDGZg_52 zT9S@kHA3J$Ey@Y-KW%y|rSZ0~A(OJMLz?D}Xqs7Yz~;6NcR#8i9>VG|U<)y-gE_MZ zK094a%2u!Ey`03D?mT1L3;TjsDLXRf9Ol!(v#t|O#t6RAaPguLkej%IR9l!>d)3IF zhW|ief$WMr=Xd+?KnaHVo+z9YI-`+Q-5(2|WnC&+bu$Ds)CZkP(aqcv06+w-Jx&$0 zrq;3rTXd85VHkrz)1YCuKfHRi_i?vv!(jlE)6@7mQo@z&Cfr!~6aEIylHgJ&{hktM zUw+sliibKq7@~-_P#_33;QywwC2Qn=}`LNKzS}B>*3`K#cd1H>0>qN+VdEp{EZykCwB~ zVgldCDZ@)3JHTTR?DK1&-yBWh6?_Z zXnYl+RvHOf2pr&5ifqr2wvF6xQK%RMkz|1Yk-;5OaGE~aOrEMRD+(g+4}~XU~HSPe|sWzAk~zMfq}q(MHgvq84vg^c&Ksd}Zkg*P@R>S>4XKZHVefs+&x9TuVJP-Mc5 zD&<|2yXc#VtJWs@R#V=;{n zs}0~U;GJAys^^Z-sDbhvhqi9Z(Kj`fb&@5|9olRBK;o37z zH8Po|pz$X|bRX!_nRGLX9-Ax@4xT7P_zX>t^%1i~g_0JbRHaz+y39s|UXS_xkgZ9T zJrmtfTf{WemSpA#D7Pb{eaMB zm&GK8W*byhGI=mB-P+|r?6C^45Bb6fLEG5l5cHx#UN6!VktwKc2y%+^l|V%yC1}}) zn~P$K!wOxzI$3lwL^nX1AcNVx_w={v#PgpzLyOxXUcE|}SSwF-nsZU>4k(jdp;t`3 z$Zy9mxWtmW^ojVS!D!5UN1q9XJq|hQ zGb<}^Hga27??fz@TcVY%P}%&Z)>ItdvZ#`SC-v1j%|ppA_gr!2zo}BQh=eIf#rf6C zyNS0itMjhc_qfQAYTAo6La>&rc2(@pKrT>FZ30XNaqcm_ zz{jAekO%PyKpgIVJbbnHW_SN!_toA<1f!nt-2uR1FiJ{O*Bo*&r2g`m8BV(j5yqsY zmpj>aslM5L6;X=O(m~f_bnV&X!=K-Yex65Vu*xr)KSYz(2l?@d;BlINFQXMVZ@?XG z=NZT&{LjK31NzeFbGloFKTEQB)ecaAy&WgreY$c}5pF(dC z4&ze{W zLLpsJPmeZMpB>|^+2UK7g5{stvwX2|MQ&rOcz4}VgHgrixbH4aJMRF)p@d^0{uyu; zmtlvjHS!HmBfK28RQZy`wS@+p6xK4(PRVTHhjPhXak@S@f^pNGNeJGQ17<95x7dV9 zt#jsON+6Nl5@#^vK)x8q%fzkBSHMZ%v^Qz+CLV)e+i$4JrYn8!d9zul&kIT2w))H4 z{;V<`YZ;c8fR;=B)Pz>31$8T8@dhKbn+t7P@Hz6(68l3U={b4}Ug29{~H#Tb{UMdTRwx+kW$Nm^S?GscdU1-m%MX>gDJbc)Rs(@6~Sm zkghAOUF()=zyENsz1voWgbi@Zpugs^^c&Zh_ds{RdvLgQ@Ot-v zvPtAEtv!FT(qgl`7;RpqvRCTohwUl5CXAAW!fV*S3elW4NiV$KB;~O1wgeoIN$oJX z!jO>4C~BOHJQMw!6NTf#veHg8!eQ7?&Jqgmq;21qJUkq`YRxuPv`0=ux{Z+>CnJ?+ zbd)9_gb4UX(*yB%j5l{dfGa&3a(at#@b7w8U4QQXGgbeWO@{q=c=5S9z*+bI>+6O1 zKTn_6{-0mv9^JLv;i$-D+$07Xr-w!HYZFa2xnV67djwS%>G@VnCv zn$!3e8bad&C*l1U$`pn2h-rVf`-X_P4pf6y{}g^z5&p2-)%IPq1(@n%(6=FQiNoH) zf>sg%G0&9!Xf}pl4gYa68Uy34oEOAt0dUM-44UPCm*8oEJ5onlkHVhP-ALX6lY>d~ z@FYD53tQ1_$vc9*l%ikM3)$_+V#k8L{!Shs&y5^0(I`al=( zmS6j}V0?h_VQs_PhSKsnXamyeH2;`k z!K;v83rAD=|G3-#3iiKGo~^Ie_y7N*PpSPc%=?@o0LEbJd)tpO2XSrjTXX+4_g{1W zHTVA|xPLy2Z*3dE^c<;lcYzi&5}`1Il^L;*21aH_5Ipc2UiGi4|B|%ZipE{ z&tbu1RGDOa?)?I6QF6unCoof!YaQG?$nmZ}0BOrw-VZ-qT-}X_g`rR3V1=bC3^yq4 zfWsFSdsYdoq{hDm^Eh4qdlq*eJ^E}GaHjv)O5XqL`O12&|NSbToc@<4d=B34?cRQS zlqMeXeekILdavgCYOb&5`f9GP=KAhc=c`WOTQh#F*zJz`lMMeu7x@ks=yV!B0QFlE zjyuDcGQy2#OvrTQDySum63%vR3o|U*qHQ z|1{xqu>W0V`+wSh{NZEG_}7eo&G^@hf6e&cC*!Y<-dmY^R2z8QRSrN)kJ=1`%s}GQ zH3T8dSVkc5UYUV})pj7=u4Mzl*zI=nrXT9(J&9pwmD1Nt;R~I1y4{>VpCgQI(kL*T zh90#mU`;h0QSZG+|JU+uS|S^Fps_=p=oRTCG@y%6@MKy~BZ`!YodEQE%RhT-fBDZ< zzZu~CVyOmv#nWfxdbZ%J;50_RDuBBIybKVG_8y9=eiJ=iR$QkQHF7(VI>7+k#p6cv zXnFN`!ES()gT2{01nPx;5dhNf-kiaq1%7fj?4u8aQj{&)s7NI;}Wy18)&D(2nwlLH^2O1Jd(4@?ZV= z7C%$@|54bz2r*vDs9Kj-{o ztdZaDDhJN;|9F~@|Gu`mg7aU?e_#IN%76Q6!ULLclHDlW%x_-%VB6#M5T#$+iQ210 zr&@HXMWqEBu5%fEGNJGd|XnUnt;Wbxf^0jBeRPoF zR@R@?_FrG+Za9kW#*i}QM&*1w zzVR@VC1;vmBBoR&pd2TixQFR<;~vJ0HYP`~r3Mk41?iYJS#Wio@!f4W^!DIi6~NhR zoSLNb8?^slQIggnhg9CtMq7Nn8((6kdMt)i92qsB2t*#48^oTMfV@zE* zmUy%tP2~7o=d%?Y1hYV?V8gI_20;EIq`6QA4+c#6tKlm1padvyJcN=AY$kjly^~@5 z&xuKN4u=Eaho+qZzVrCspJY*bNyyQ|=No@KKAx?~Fy)-oqobmGaD?hg0Fx#N&W}nb`3&F{9C&CfhrfrD!3j2TTJp<4 z`(K>G@kP zw|915z5Z$MZ-0OD_TBsa|2}9Re)##9kN*fyy0Ayj&f{M%`h#II`e&MrCzn^(H@~f{ zu046W{_OdSA3K3II?Rq%juGX)c^g<(XPNg18teFvBwsxWmPcK-?5CK~6#CgyYcv`k ztJLK^D2uVw`)FNvqwn011i%|!DKW2)zp~(H2a6ZkkEi7*?j0XxGg1O90Y88ZV!Ieu zI!lZ(N@V6fHo@94@imu?RZAOIi!jpPVSM<=`@?ExOiLEJZmq3WetaAY0Sd{^08W&n zsv~RXz_OMTFEiI>&qV?BDaIILm_#1aVw;yLXM82{h;qkSr>Q1H5%ry6IEWhf z&7$5&kkM!|=EZylWS1FS9FT0!7r_&p#DTj&PZ1o*^VegsxI}waGJAwa+zgfD_{1G9 zhXa9cY)Xk_x3T8^*?J7$pEd2ui*6?TU6wE;4I3JHk8;>7jPoO3o;oUSri6ZDGgU3C-LJz`h zbp6+?8}~LZPW~$zE>E)m`tQ}JYd^kN<6Z)l%pysrp+j-d6hNOm>XV1<%i;L{(PqTS zB32pAp^ z9S?|ePrBVnT0AUQL2jqRlHd?Uv`$TTIW2i-lQ0#|AI^8&t1$bgXuhpYxL5*I8+zsf zw`LumF3h`bp);k6Pda)U1D!=0M!gtw#%;=kk|=(OL^fQI8iQ(~$};`TK`b#RSAHgP zoB9p9s4hM}I^OuN&6ejI9OTG5KO+(^bW(%RguY9MZG2ir?Sa?ROU2OO41Y1PQGT#G?#u@nQBb zTl5}!ObU)RUMwSBKza&Lo6Tc27p+f@wRG{Lt2n0Oie~*~(_3@xt;>6%pVHzx+IXrv zVA1o!&<(73=nx+&@Mx8*h8gf%<3`nT(m@Zw)QC+t;Z> zG;Ev7>ZeRonMvxLMwB9>(~G;j)iR5Oe(bz$?{!}7ZXJFAfe*=8FX@tya2THX8Acl- z{xf3%0BI01RIH6bEr3Sic?>Hu?VjHR9NI_;HL6agNgr0E!2760(zP7IgndQ+;19+$ zjs`7-Kp>(~crqqNlV@#ONqS)!+QaO;0Zv@ku}c$rT=4PiuRtsGOkrx=WG)9vIbt1 zSQ|mT&BO%@q(fmXW4zJ8#4suQ@My6c*YowDFT5zq-^^#9M+1?nwtJgo}8Lc7=&<1{e!W^ zxK2;DBmvCy3hMTw5VH;ql1u1~;mhCNThLm;gGl&*n^Gu^#x8 z-a7W;T+%$rDT1PFHaQuKzBnWt4wGJ#V|9^R!rJ}#L}^k0pgxMpBK}5@Pv~V0h=hhXk#TmPq%nBD{Ta?D6Qda<#(roOMPVG1Vo9wQ9LW1tLqFYS?L|HW39SK>O`DK6eNiH0pIhmAsVcsm1E+x zJ#j3f(sMvZdMMVfxGuDw$w)9TaaT#&%S3jKhkP48$l?xeO#fJ`81aL``7rF?{HB(% z+WT^c%?q8cAaF(FZq5+{X6DJi(2OlS$#8gZ+FYq%*N&27$>V%cFa~v*WSHb%;%i!& z*qY%4f%$=UcBHhmW~k^U76_QHlGP074?yxgx1>GeKCHxH<{xkRN>&Aqeh_9kS@kNu zevB7(k5^XVyKi?%cz|tSz=08<(g%^#ANtg!nY%XHm1g~43n*fpQ4wG5hL%nSoT zS`GPuEZ8l;!MVCn#6BxwD0`V%682K;oP$skVO~Vy80l!XEFOyG=^wYWwTh4YcNFeN zerPvQ{rla{`vd&xa`vKR*G-o33swDL-Y~m!pV}YZzTG3(pcsQ$vxiMvdC*MuYd|QXf2MD5m2A;4bfN60oBEmD@0)- zHQDsEAYikoKHj~(-tDyacegPynJ*-y%+WVj@;FDhRBS3&ZwceHB>6DTB@&ozoMhY41*cb@eK}?*jhM3;E3ZxVe52E^)LgjRmYHawZ*Y@Y5=uBO23R%a&o{Z zq1DvX6-gWy&K|n(E9Z`bu>->|GbOAvu+BXveVn;?(U!>lsZep!65pCJY_h%`6O6gUJcehx$<+;EgoaN zRj=X%(|uLvSZ~!i!GN{pl!-nM4)gbxl|!bE+^E!Vm+pJ7S>BU3$?!~%PC;e> ziV*jn&O1t3-!c-L?2`U$Srlk4ol^aJIt4>-|IyjmeYN%B&0*&i89ex9b#8|xx3g8) zdO90hxlt>utp;6WuHGRdsx>aKX7!3Jd;P@BQrYXLS`$$z|7bC@iHCFqC zB5qy|2tEUp3pz^YnuUO~N8N{oV

@76j&)sW!i3t*WCo&DdN0BvLXko&=L|g7+cF zu&7h;mGE$M4TMdf#@$#cGw4*wY0uCFk#t+4Ep;UpIw#ehn8guzbF&1pM zFdXaB2$A1RC>U1&;B8Cys`E}I9Tn;w`0dXwL% z7nq`*m3*7{UBb%fTdQc~Fukx=bIGOQ=(Cs$kR51Au{M!aW&j;W%S4D?+gvnk9?Vq> z2t_4eug4pLt}Qxt+IIbfGd9^geb~Q%Q{ZZ7_O z>6rB1_l+*QSnO!yIH$H{Zn2}RH{820Js%b!Eb7zT58j*wIC<~r;%C!7=VsNVP)!vl zMnDQE1SkhbuX*e(I1n)&Q*JzeLg~Ax%eNO=t_qO-64k%pFCf({wp}P=rv4GE(K+rD za^b9sljT+!+I<%CF`K0*)}DPJ_vWj6J9eR0!6D3nzKZr87RR;EGtkr#X-mb<0w_D; zEmSwlYaxBoKMGbTxi?d;hI%QGl$J*t!Q3r8z2ZZ|y7k$6*FM~O^QQCW{r1+O;r^%@ zb?IyXorGB=+#2f~b-Qs&MXj>(NEg**v1h+KXh07Wz9%hyHnbq@Dva?{xc2&a4+i5) zJ^uy-gVyv4sBYNX@wtLK=W6(xDVhQ5&HHz+NdbhQbmi%F+CRNNIOIwjQ|)8Sg|Su~ z%mPh0<7H$t>Bs@#Tr_f3|FDmhQ)Hxs^D-KUGo=M4)AGQfF&C{%tZ?KtE?VAYQ|=I2 zmvnn@uC5K#t*yF4(5$(_=^Y3 zv~ZSkyQd5p)zux+&d@MIM);_jed`vkS*+#^XeZ7t2tmM1tokzwYfKgng*=6D2aeaW zWvs8^r%8UGXQH-Z1UQW9l2zUXWOku_isaaQ7CIDlZWgPQ9ajd|Wr!@P@JjCFQECNd zHdc~q&gOkCg?hQPD$4T`0X1icLQ8^0jz6mw{z6JZJiB_HH~^t7VIM_5#W<^raJdBu zVU*D&DpA#Krfyb61RAhn-BsHv)9bNyBaIR%3aFyul&dnGCJeDORu+mN zx?0LxQN5Bi2FHBDUMMohD4gkjQ5oY z-OT6;+^wNkVN^nvMp2BdQZzcb3&pHwQOYDjri=jX?6$WL_V$S@QB7&nH^3HZY{&7q zPvT?f9Zp7gVpy=yoRIZkXd13Sm}+X*!2(P~18lXKyPXWx4$Em=UuoN-3bG^xe5ZI7 zt#&>b!c+`Z{mRPQ$?!Bjo22NhYq<5}DO$QRC~t7;OL}1lWLd34L}O_!bxQaTwc(qy zqZPMa26YE2r!lF!9M}e`P=jeOV&Hkhn?m1UJnWb38&7;8FyK&aC!~9IT7o$MAFBPX37qLyPC}x zLn*&m31E&*R8Rw)T0&)WHt8s`j&=uZF|V?Mf%e%qF9uGLIW5>=7>0A~Fr2{uWfs@> z`oo^GtW!MKd~AfeOrK^-=M0EP|6vcOe#)^!1!UIgd)YLjdDDlz&RgJuB?*Tp>uur|t!oGG@XZyC@2Q zz3tt1?cL7d?#Dx(Oku8BGveyl)qJyfIA*69A7g0xB}QzbWy5a5`OocGYImv5uZx}W zXL$9EKzIP!0;DWS#XJ_c=OP?3N|swjosR&!_NPt0cfo$(cLUnC+0)eF*CZYq?AvN5 z&$kfnH_50kr*ITAB>(qMpQwz@{}kKLSHlv4_-8fzPeVlbr%%BCdwz2oD`CUH5kKNu z^p9WO!9ViPyZt1K8qKM9x5@&k^7^*aN-np2w|a6Ri?XZM7>FK)1wXlg;>HHiI9!I_ zgn7p>%isvFWIe3)C}2?cOX8x+SPWHWHGzq54HRn%uC*?RCn4MTPjHBr&Lgu z+4ezDqMT4oH)oI&-DFeSpTk+UsAqD~RYYfMho~L7_3rhDt=GH0@)f~ZvmB`G&8vzs zlfUxhiBGMO+f6h)!>}W&4`v~$OZOH_vKWT_3P_mR@5*UF?4hs|;bj>2Q7&{wUZKFm zkCCvryKK*x!5Tpw~i~>H8ETCQ)n@i*1J|Ktsp8 z1e7=!biykP2##3_-e4K;n>-Nk zsH3WFs#quu>Q=~{tJ(A%N~Pf*MXj8tf+x2|aD1aigu~tug0OjC`5(+5Y|qv3I>)iK zd@a?lvXyZ+v=g;&DQ~BtyZ~Mr49cMundD4VHJ`%5zlyK@W?7b|B~-GLb*tkhp#pcX zU6DN*mhU+hP?oisf=2wKNDbOXSqm8AEu<+7h)bDxHQ`t;u38wDWTK$a89-WE#<`de zBN!M0*5-}WC-YI2fQ%mDu7d|h{>7E>lowY4JBl^nxhZsUb>wGqS=FLn+=}`I4f)!) znH{RxM495_=;f?kO~glpBwXf>?;Ji+8dbBSajpDOY2sP@4(SxNCQJYavd&#+mrWSi zf;!2C@3l5r;|K4kWX&JT^0dzj-7q@z;94_M2kZWI3v3ufj#~_ly9T|Y*B(dC2QUT~ zQdKu-S%_OJLGU_86(Ylwat(p4`v2%iK|HR5Lw^2e&iIe%Xz-=tKR#JodsY(vakY;B z_%%LN@xNihIsPMj`Ip6itiwOn;UDYpk9GLRdkp``2ifhyKf1|{+$fIX-)C5N6*b`f zWt66IFS1_UF^V`0sl(|Q>=wzRP}S3ph4(1(NI7lrQZ530>>TXB?YusC|6#x7iSE{g z;|N1m@V2oWE@Mm|RVwem+j4oQA{6=ol-ErDYaP1HU`ZfZT@Lf9IGDY3?A~Xs+M=^VmlhMBL(+R)OQl^ zEMoI~+)9h0yK)uSON;|!V3hD?pmfVx#&R+iTf%S4f+G?4s0Vq*3s(X?gN6}d)a!Gtaco7$HIp7x_KlW9cD9SaPegh|SwCQlL&gyJVJp3!M_ z{o+~Y*;DTgNVnHsc9Wsuh3No>6dHu1f-$ilKYQ_vjBtH~l(ztm*QdUHTJhF0k|k1s zt;e|H*!IT^`snHL)RNL|)C)BV?_0$xO~(Ypw`ap=`QN?DC~FgCa;Ksl{Ae(?<6{Ky zf20IPah-N?CvH-~ycticK(~A-Z{3WX#KYUvo;G^f92jDW`_xjga0)d%l;jEvwA&X2YdM zj}|2@#bVQ3i_!ezO{AHo&xKrp;s6H_Z z;NEg-;LW}0Hf~W%BAvl{LLOnN4$9H>;5t=$FO-{Lb#rE2ZHj_ygo;EwD{Y79lsJ&| z%nq5W!~MyaLPR+lH{KkmiN_h{%S`K*(B>Rqm7;zctz@)LO7TA}Zvw(WUk4HaDe-J5 z@v=mZeGVl#ppdwKimfIpYSWklJt8!#i`YBHiTce-uUGM~$M*8%Q`*p2KQ)1ktwaBc z)vr31jr_*-$FHvJGyvI}vupA|JI1<$dx%$-!K-%g%3L_eHHGYKeSYaq>P=mWYaQ*! z7{<(jY&_Ura+rdUmYK#r2|pplhs3)x6$VBE14P_F$neE6_u>|Eey z_Gw6#aw6g~eEj#3*N)Ns1nBs-++{b4?KK0r=w>SrMMf2lX7{V(tEbRCr*qzN-^1>y zE}H7RTHd^Wt?R1l91gXgoRw8${n`G(uDCt>y4om>rc`*}KD26Zg(MqKZQ{c_Q$;l~ z8BTA+s-qj3RtL_Hy;uKm@dsj5e0pQmaTG<$5P>tT0LO*P7-PYoOwj2JHAv8C7N4MO zqE$)Juw;Py7VgymN7a2L-J$Q+pj}d3#hxWqMdZEeO51OCx8CL2p)j~5Rdi6+9g)6q z2%?5)X?q2-7T`d-6J?sEd)o(UG@2^cEjzBzKUbS@f2}l83;BKlYBj|NKuc9eB&>X|ae8`yQzT$Guzr zr@xTvpG@S;4sto_;ot%TF%+B1gVqy16Syi#;OSc3TV9q4Th3s{PLk`*;_zp8L6*EeZ#$hS0qx^&-iJaRaQh|&4A+bS)<7&!jrE@%> z`Cq}ZHFuziEi|3fxzj`9EV!FoOrPn70e3oD+HH6bAO4O{{%}c!V!X=$T(VIs!4Lyi z0wd3JkX#j}??6f;RC{x84){AklIMSvkuSR3QRmX@G?|PxsJ>lN8KPRfZ%+mTU~r3N zl{98F!>7b;^H8`z7eJw5oPt z)|XVGyW@1n;JooLYgYAWXRm#Du=nzV;lFm2F##{D(pr1l*3#s_L-~P$rDt1Rn_q?k3)`!FQ z&?`iNaEx1%aRR+U6jZkH5iJ~c@uo&9?Ou~T##UP05>;yN{Vdf~r7XUjS`D~|S9>4r z@$aWVw5~sAYAi<6W-ZI743#bl6i-=_jPoSP#G`S2FVqG17DrzQJ!jH^?oy^f)~e#s zG#T>E65fb%JdHM8C${*?Q>hmzIn)@BAlN+=`+=^RSoKQx#Njx^+fHQdFbhNfQuK?g z7mtQrjJ}|5Lqr<}g6u_vH{|0aA!E*=O4rx~$!!abv9*T@C);84pIN+4$qJ9Hz_WG+Z{v+3h$Nhx*T8Ul6R# zz1%uHW~tf(A1bH+(ukML;3&37sh6wR9KsbrgC(5DKL%nrJUo~K`>G`2zOmdQA29Mk zEx|AI8t99_JI8xi4ohk(i-Hb?baGBy<4}%UeXm5Yra%)I&7r{km|C58K=$_EY;EuQ zcC7}w9+G;ySqXsRY&suirIK!C6%dy{b{MU86+uUN9rPP0oa7H5_QT;Bo`03~iUPMK zZO^EMv6S6G#nl~@D(|;-Ero~KwO;@9zJ2Ih4V{5hI;KjBq)rJQXFJB@0*Q>7hFUKs zi6e#nqLu6{ssQMh6CHzU+j>tMQF!b2(WSS9wvwG_J~$GWrevELRZ$9X zvgl+_&IzX}=9+Zl%iEPqo+MH(+t0?f^NGQ)rDR8q?I5Bs@C2?OUnnK*@7`i~(p|*G zxrixNVP$%YdOhR2&Z7MeHMX*DRkQ}u>nrCNSUWf@j^t_gfGwE@;f>c%k_)Q~x(Cz; zO!2DQr{>SjsRQs>HB4sNQ3lKh;fzry!GvU3++PId$u|!GI0pni?sU)~uhVH{QUA0h zh_lz&sl9s&GZ zw!A47uiQ~}Q7|DLX`=J~(XI{w$!`Bd?LJmJg&^7onxthvCN z3#_@oeeZ zD&kzxN;Okpl+p&gsmG2XM{wC?Tyy|aI?_x^xpnt!)>YFeIgKy~YLvA0`$u#ETK z?H;I#JG=Y4?{?Y_cUnwEhp`L7R7|<8(VEm`Wcg^2rqT|utJ3J=%*}wH%(@GaVzFpd zY7CAVfIZl=08Znlv79i}-@^5|jN|h|s?Hxy^&Ud0-Ytx3SqRnY@Tt|IQ_I7qivPrX z;ZpB0RO-wyskaG{YOm;vE5x$+LgZo?m{6k>Xq5J4cW6c2!{eQk`9!CfhYy1&k?-p8 zMhAu@_5xdTVkv#4Fr{ic7o8lfXl78T1KS@{%r_ho>1-$~;fV?$ySN8!IEp*_XpdS$-my zN|S@uP7JGn_&<#uH|uko{ExVm=7vXqF`8YlquSP}R|#gza@_E|c3^m$<v&~BEo6=Ak|5;{&;zD`AKSvgfISqTZve&x_Z zy*QjOwj;xH<L;;WO+ZeQWlIuPnrzA#!wO$qDy@}-nq$@=DO;Ple3&xi zt+52Dxk4f>p|Y@=tjotT9UhYbIpi}r55%jM<7)vo?0|knkf7`p4*>gxF$BE^MLb? z*ug$iv|%(!M;IH91p(Ods2WGIzufW!7|`@(?kf|sL5Q3@)3(f$$O{%>Rh{@HVyzL1 z7rcXV!L1pF^7+rCM@ZGM37|kI16s6xk=x=3^`yT$1)x_UCOEu`n1==67-9>7UK88H z=cAH(YSuBpI_w;NA-cCQJ+xT}w@882=e?ULXcF=Y&I zTsMym72y8nD2w7%a;0RMo=yoVP*DXpeAC2&K(9vVjE~_b58itF2fO&M1ibVlRHiS1 zqOEfP5wW>w2cW?1g_H_i6jiAih@t5$Lj~1!$t;#_77Cc{dIaqr=Qb$?ua)$fNY$+r zle%X%y${$M)E?)l8v%E&=(lr4zE!L5(dP63xNh*imHK|}JO0dh|EB^)-02c<`u*S9 z`pW9Fy#3eH=PUL7-`Dt5-2W{s@CBWIKxtv?{EzjGV0|N4-w1v?H-fjm53~-lTVD!R zq;mY(=y$mE57mhpomZ$qRWIVK^9D9`i`22CWPpKzbP!)gc9mbk6bYJ!2TL3Xj6Ah+ zR<^K!+j|Kpo*~ZAtiMA6SbUw@#Bb>zC%>(!U+7u@U(G{ZR^CWzm_ZifACOmk)M(?T z`@j*rXq&5S_nIl?P>6@&Sw%XjL6Sxi@eI{}^wka{hQc7rTRK&_i%hgE;I_wntnqn?w9McnnmRRE8~FzqNm z;wU!Xfe=ZL-JFT0fYelN=ohA};NwWYI$AlV;EX27V?P=;6xuM1%?eEeKGm4z=hayt z7vCiuSB?-HENXe<QHQxeTlLK~qni4uXONtbU$?WB{u zJm3gB4@Vib@10D}yubeGule3nmM{kEN>VyBP<}1p3yuaw7ls_qPy!d_!k5w6s6&l< z=iw#AghlRl%zMO0#!7vQ>RISdl;%JI+i?n<7M`rIF;1fK6=o-0946@?>@UjhFk2^+ zlRV0SG57kWXy9#0dMuJw`8pd{0i5+Gq^h!2Kn513WNS&n0*nf*;@zR@1Zq&)Hds$8 zXNp@}7TQHX*pSd}1|5D5mr(rTXk&Hd7-Qqx<%)rkE;jA=k1m`7nZ%KVj$gFAOSzic zFB*9eP0;D2_(#sLVi$u-0hbdGP0B)@>xEczDXDY!PE%e2OjnqJh!t6EkPj&DZH(8|Cw z!zNPVw7}cu`vwknk7G@f$(hn7oP0mNJW!1xi9 zyxgsY6=qbe@hdV;aO&E+Bg0CxNxQ2UiPzEi{m4m{ROl4Uzb@7AWez$xDZW_%CABs^ zT+yZvsEb0AbF!C0WOTx_(`^AJWmZN*wGY!+Iy z=SiJ(Rsn08Y%tb$l^m~S5}fvvuxDRCs;f~^VI#r8A|3U-fg_l9c}91LbRUIjk2Ya> z1V}eJAdF5h=^0dSc^&3ZJEhlEScTejs_@+;Uvwd4Q&krA9xcT3Fm*f?7XK|XqDF<0 zSt%*^Wf=F77vJC;Dry>yIijYzEztLoL0X zE#81)#fz$VS%Ljv6ge`3LwLDKVgC83G6Lrv8q<$Vk>Yc z0_ot%&p$VbmfN^k?pu6|swG3{P~R!D9NWA!v*m{kWhav@Iw_-dOMh8$r&$5h{j;Gz zytr}-i$#LY7wT3%{=tkVm$2GSvRIbG7(dqD)nVuJohhTppOKq#EV(Hc9~Kf(+9qH2 z*-bBnYrvx!s;yCOz=mb)6P%*t3JcS{FR<+>-vIz9q8T+c{ zjIaPSNT(D1H0NbF=HOJb-O@Pd&>92sDv1W9!-qi4ianN0##{zfXoD!E6tp2G^^#*j z^D8;tF;&SS7?NV2kx55xNl&IqWR5wrP`+41aw9j7`7Db|mg!-h`$W|zGjn*AY9%>z zIh1&a_gI=aK|}+N;E|bQ_}#h;Q}y@aswMj~+cd=+`DN9#DS@i+r%jhZQSdiuVm65G zsj|&uLU@X{dRL)sWl1#MO$y@lHb|pEav3$q#<5xcN{k^Z-iqO5)50{~`SWX1d}DE% zV#YCGpX5LlkDLfy-~ivWuR*CgAm-YT#bbwdA||kq%iQCK_YJHd)VZKoW`syD;x(IL zh7v00@^2Q<)iR)8Z8PErHss9h$UQG-7jMzo>|T1i*zy*GUqLQp9G3UmqT;!O!ZLHS z4hmCT6Id*;)sMrh;VIr_GRkF=aLTNk0Dt36 zR}d;MMl^xt`jYopadVNw_%D_fV1rjIlB7{-iSwvG+Vm+_0}3ccL^gJYfsz@V)4SmN zAj{J~t}b#Io>hpWbASbO?^{{Vs~doV@RwIYbPe2WW~~ERXRBy>@bTlv1-2FBe75jc zX^Qb{0CTD}^u~yIcQ_fS#|_Vs(<%(v{PA@3R#anc4*5nzJ&EmYL=QA}L2-%|^37%i zkoXMK)pWxw+FUCs!JDti`)yArx)S}nv&}dO!|0~N8BSI~Fk7?KOmexVY*J&LQ5Z=V zX%{W6AMM7|RXW_)atf#smcs-D_W6QFHK(ghwH?pHjC--x@~)dGQ+*N+b;jqqQjw=j zxJNfJ$`8hPv%Ed;n)Q5}uAWZ>MHvB2mRN%_Gse16{L=Dt zdYM&z0zk<5Yfj&w(Zv}X^jAb_TD7JV4j`UPysihH&a%d)>u}1UX)6Q8u*?`$lXin? z=ukB+_ab|z62Ci3jPi{OYynTa>Ki zQ@&-2Hw)NFvS=(`2sdVAZptOBDvV|>@W$5NLa?eyhLCFeykh%&hSw}Fi!;VFsG*** zO0ZxoZyiolNEI&vcBjykvpED!LQMs^hMCuq(-2dt!L&u?;a2oiT#~dOrYw2V)gcum z&W%YF-!Tu#c8Gv5$<(w{sMlik^ezJR(4eHHB0UM%VKf3Etc$bSzY#$(D|SJ+WHHSy z8wALceax~nhz2Ldq`Pzk>U2PdP`qXg$w~ACC4bxEn*`HgQbgr=9Cpu@lZc(H3Ug5o zlfu>!+F&#|Jo8AdcqL;!HNz!))85#DcG326F_&#+r(8f=Xme5S!^Fw~CMeDA|0$KW zGF7eV-lN%kN5$B6(8Z>O0K|AhOhQ07q&rnIu-8Yt;$EGZFgGtL8V$lkeQl&!$msYLHtDxLQ_14D%4P zW}%FsOBrCjq<~`dvTXYC0JV$6wLq>6v8s)$>(7__+#>!v-M!w$0DNZr_a}w;@6Xrj z`0rokGbR4#*>5`h=h<)n(&(SH9e8aAUfY4!cHs9O0$v?y-%SwsuNms|^WwleVLX4U z!+3t9pWE?&cZ&f(oBzw_e?+dK=KsFNr;h(#vw$@VShIltn_0kni~;`*@PO#2_f0Z^ zbu9O9@H3VFJ3ECn4F~MqL%-8!2LJc;>FN`g|66_jY;C>f|Gvh@<^R-#1K$TfmnE1? zw_vyjC$1{WrWvo_eZW|9_`v&d{jq#qKn@VytVX~+0ndosWQuo9=ttPU5%<5L&laP= zttne!p{9J_L|!CfP|?2$7WT$$D>2C?1O-_GDsMc#*dG|}2ch%%Xu-_|?WhI(6!2g@ z=vl4Mthw2mo2|Lo?~a?b&gfeQv_K;({YaDk3fpU-kXt<+$$?$qoOt6V&Z&JqFzF%) zmj7O$3_DcsOG39JlpbsWas(NANB*nl&oRcts~=bB4@?d$L4LAMe^#E@UZ2D`qlNuL z1%`qTsE+w=!S+wZq7R%$wygsD=f2rGgkHS$kGX%{n=_yk*c15kWG(N3s+%E(IMTou0*3gs=Jab$zvlF7PQT{#?~~Ivr}5`8`b^=OV3UWIzrBr2 zYqyK3SMG=VSH}WTvr@*NhT?Ml_;^7xlYp+jlN+GL8%n2}xOK3<3l`^#<(c=b;COWb zZyEMx8ud&Rl}_hie+#Bj9G9Ti57ZMTZliuUhH(v4d}Zb6bZUq7F@+{0#5CqY3QQLX zpm5S8UM#0*1MK;^BEmpRq%{4GJH(G4{N4fNLnj&*(3VF(2`tuzB|7`tPt^fZjpPc?b%;w1(!%X7s0kBSoNoJ?h@WTvvdk%<_HGl-Dyp1jI2y>HY z`puF6mG+_4`x6DiY>E`1dtG(`Ky2?8^7Ha>WC7E276+iub91;>-T&oG?o|G7aMkVJ z0|Pjn|65&u^6Y7z|9k%QNzMO#jgQO!;e^kr{CyjZ)8r~jFEU`NdeJXoN@23U2V;+# z8?3p(nj5US!S9Y6l=Ju&>fa>j2^Dv}Dlwtk>w5(P!O1w+X^H~DUv%ulT zk&-G?0mVX9@s%h>6y>Bx2DXqafIkj!$zrR!%c1(b-h}?xSz(HR9L5+AyWwen5)Hd5 z_=f5!^uK$az?fT{u=C0elW9I<&KVg=E=saTFW_}TId-Y`0kOV77t;ylM+hl$ht#&( zKE(qIjv^2ZqFe_|2$n2PBe6*5AoF(1C^r~|y0F|#DJf8D%p}do26S*q94$btihKf_ z04E$U)Q}c&%}I*aVwe|>-AHjnRR>`>Zai0$s1SdtRRjZ}z~<_=q_S5?QIRO6TLRpT zdwhxHT-7f`jKa9%mlS!pP+5IUW0z(eMSxZ@d3UtDdTa?v&Jv(20=-vC%h*Nq5Uu54 zjU2tT-{v>mP1Nx8H(Cga30&MN^shtrVA)i?^ z;$W*7yb1AFoDi3R!yRLi(6Dw@0NM#soLSz z&QO8jmzI_|j+6=_N!@T+{u7KW74|4Xq>3GtMrX*~0k1mLaDIWB$yF8rTQd+MBP=r#Ad*6?cnhKewf*OdrW6Lm8)(ui+O!#xo5vhQ>VQlk^mfbJNXYXYVA5TpyLxpuS z2!y?ZU59_H>`H}{2tU1f>)t5-^!MF=P(YtIowr-tKkdESb;@%*B#b}lt3&?NCnGO? z`eb46)2DqYEt8h{Ww1II(T&@OgXru%Y%7etdfk8?wp7tvxV9LotvF9Ome(Pi2qlq_ zFks@Q9>F8e|Mba+KW@VqJQW&lSe*j_p%@a00uay9a}FF=s9u-|^)d9&1^eHVD82a^ zL#zU1)i|6>is}`6NGQfj*m*$_@hXQixVi)|diOP?ml#T#+MqA~FK8ON{PD%J3Mgt5 zs#sJ2wOCLB@8)Kun8@A?QKgBoKe#kvnA}y%f@OWA>p zcvcnc*Zuh0l3?f!CK%$1O}A}1{3;rM8SBK@_+dHJ>I(+d&On?+r3!@@B*g*e)}Ev+ znD@eQh@sbE95)su0R2LNvtarrl(~LE_lc59lg#W4$oNBwHaWd|MWn zzB=Am5^!?ON7op`pVEIxz6oG{Q5%%Y7Qkd5GuP{|rvpjnV@natHU@+P4nvq}zL5c) zL?FIX7V9+W!+^rE-J(aTPI$s+qE4c2HDjTAiIGjGwE)IYxR_Ca6k04jIDLdJ+s7g_ z?0}QwPxyOt*usfzSL5({q#H94^r;(x>E0mgS1g&g?Kk!sV?&gRvJW$_%>|J8(fe+} zU82G{Rltz90W7op+Exq^vJ=qZmg>!M9{x#55sO#=A0f1&$o|`u0Z7kwn>=B;O+VyAeJ_xhUuK6=D;;P3ImeO-azGD9w8%0)WUB9evW;*ZMyN@3~w|@qL5wf5(ZheH0&V|f^_V;4yKfby*Z8>izs`g&EVRdP_;H3eo%&e0`L`&2Dl4@q z^{}{miql_KTiw<7to1!>eb4$m-m~V+3-+yt)ZNGb3md zS16A9%u0epGb4A{5+)WJCSI;u3!j;o0!hCmS}i6#@KV_m^^@enf_BQf7^){30L+cA zd%0DS{(K1t1R;iXGeBtG(d8=uo(#z_$-~rK8PMqqKABESS|Hp^NqWHYXK*Ou-A;5(AqW;5e~p{}@2BjCx9@j8yxIMfJ!<^*Pe9$bcXvJ<>^2*p zt{yejlJ2HBBsdbppsk@nwcz2~rrdMGW=i7-^@4EN02%Uo%7hL3mZ?rm`m`MYC?(T| z!aM-X1A)_t-dtnMBHCyg$duJ{GK~L;SNG#+V5MfktG*m;OJ&b>8uCs7g!h*bkPEAS zY|aqC6WZtah!sh!XyKcFIyf8t1KE#&I}r+1u(wTjWo)X<71F<6oK`~G**lmQj12SW zI>1bNn91dl>Zi^xC&f&vk%9G8FD=PKEcYvt_>z(s4O-G+_Xak%^d*7Nt9iFaAq?L< zJIiiAuX-Xq*Lz~Brh=euw$J}{9VLa@yd8zpb_6`C^j4|&8j5xSOFL5E&0(g`8p?0u zPn{}|N#8rx!FkkC?`a)(=**DU$E7#fS=7mZua7a; zlZHgOY9=E~Z*DN561%!>i`j+~US?Jeo<346@38d8Tu#X>d9|~Zz{r;_I#hGb&^00k)PzaiX0QdK2g52LP1xk<0z_!ol=qX0!DFG{w ztHI9fFG48ov3`*UZtqsp^Ol32mNTkWt}{H_YMy(&!(?}O$(*L{K-r$vcRR*SWyQTs zDJ+We8x?1b%u6uj5+OrVzRhw5Tm8kyjYYuU*5OZ!1>Cv!5PHWxvKliR$I`4Y2Z^VQ zY8LMpGXAbF8K{HiJ|WnmGwK7M=Q)PhA}vnyQXTo0P-YOnkMytX+RdNr}%BJ$w@~3a_Lz2H0+XEUvuDa zYWc+wWh&h~tcgPSw)w)DS@vC}LC~sO_{@N8!d!#M8gh>FUJ_;08j#6&XqXaUG5|4; zEtXZZRbuus$s%u?I1@dP^3ZG5L#Ync=07kOJqs@ULb$dW@JqwH5qx=L-EDOqX6lZ! zP@&ecZ=@bG87D~N z?rvoCDzW0Nmf9FOYT6vzoKw~sql4KB+ajbRKR6{bD(Dyg9S?nFQ7)Q27#iLHJtRR~ zOYQ;#`R*Lp$`S1ESBwEB>-M8?py4>gKY4GFa2+tv7GX>+Kfv)YmduSoHZ$`s}Ct5hbVe%OtS zQZcK5Ss29<`;jr9L?O7G^4_wS#e=vXru0tpGZo;SAX*l9;lJhpFDTLAbBBOmWMfLk z*Ycc)xu3V;ZY+ZF{P|}S3xF{XY0q6xo7Lgl>#n)T)7n{90)jDKdwxlE$E#y zTLwUB|4L>HAKtQ$x0$UUS8%pWw^}ZMatd8aj?JvD-@~k~&p)f{MVzhQ4rA;2SzVto ztLwR0U7t3q>t(aLe%o1Hztya+)2y!Bv%3Cyv%2!+^JjJC$=tKL@}z*Xl_$5tm;(W4 z6+__P+pMlUDVbG#c*{QCW>#08;H;W)wOjyqm{m+$RW++8^9C{M_f;e8mPjTU2uRzz z=?kH9p5DB3k8ktbJt(Z_>*deg#af%t)}K3krrQ6;_Y(eTcKrXfV*Jmib^h0{@&T&F z_HTi8TNBS61?qkK@tOz4zl*p}wLx-ikX#!i*9OVopFwhb+qh3i!0*p@m8g8RPy?N- zir$%bS&Z-Eu{e(l<>Ri_ynBydC8EH;jSJ)bv{LlOdwh3qyvNhM@eI`4`w({L50*-k zR?8Rvo}cOQ|DQkq8vOsW=Y{yMPuJ@FUti@@%KyW>&yxWdR!7b4*W7;1?bqCX&F$Z3 z(0@91@0R3?T!gATx)l+2wc@RuT6$qYDRcNz;f&Mn%BAf+We1*m|6HQAuT-D2JuUXw zbJ7FoF>rPO9E7p}jBc5{SFDDVY0R`7V@{cZ-4WiKPpz%qSDKF9z88|<_s)IM^$Ug; zRFg2jQ0$+WN2p(RZCppCHRT1hZP!5YOH6u<(TL7IoGW69Ie7j%C7wT@3gG$kT;(|h zB(v1MKOt!<|NrF0voITcwg@m&|Nm??pZ|Adth~NL16@MvT-!1C5BpJs3nG4VyGpC`;-{;K|PPq zkQWhg(V~dP+U*73t!4!l7hpxeMIR^1EMC!xecgy4IS%Ah#r%l10WJDBY&qPlMZU#PXb{yG; zm-|pehzZ5Z<@i_eWwgAlb?^)jZ&ysmyhq+*FjzeHm{d`$C3T?S3pmymH0WhYb0%jr z&^)2fT}e37hh^0w6!YPV(K#^E+qpmhsuY}$Ms#uzt*6=TIa3ROZ3c6IX3nk$& z7{Pgf6#|Wd5oQkjJ4A4yUIjx7O6Kn_9SU$VccrUwW0;6Q4C)g)vxNwLO z3xt0LH1wMrcgTy&XNx66KB0wG5vs9tN$FsT&1MDSPVy+@z47z*Yh%r|=pyM-BPk^; z8H8~B9S(lRGuewyCTBD_0l@hPV-8*d#fhbpNNOl>Bg4W$(pnxI>B8 zYpbj-AWWg`7hDd@%N(eMH4e+mz_g9dTZAcE8*)(7`iV1>Glwdvu?c^bj5vxJlP0L~ zIvT#`XSmlFbz7TAN2RUJMH8=(0hAzxJj-l=RK{t+4ep8gVK640Q^rjs1yjK79c}cF zF%F7TwiqjP3C?~(gD6#yxTy|jo@AJ#sH|bHr;%0yMunbKkt_+vG*wwK7bn7a$63jw zxBZRd0Qt&MqnH@W-9oCa8~3a{oa4>x_=r+}_F;DfhXX|oz>+>@{i3b=lVygfS{>YT zvxDi)-S;SOtmYgu(|c3&5e=JSd)&cE22tetn!c5V9oxpRYbAllKs>Zy*fFzs{tR2x z%$HN)uDIZh8|11KUPu{-<^#!Pb5NHGoJ46mgSlJ5l6w6alstOGbp)X~@TGXMi&_m3 zYI36>ttKuPWA<(96i;Fh@_aVgAT+p@rdcf5Dp}!Bwb~Sadxo_vPqgFI{;6OxeXi1< zcw3`Uh-ta{z%RUSdmrKGyOI+&V|D>~D~hcVT}J86Nz%I^V-9sxd*|UL9+(~+bD_z+ zb{-DTqTYYu_bUv+9baQM+o89*5mdWTh2RrYK3bw zkPLE*7U>Uu=lKZOJNXYpH&rx7Ff(Bw+Vj&`Rmv^qFjZ0vc*pr|d*!wxA`1f097`^a& za00DW!?kL-Rt?vx;qOj0tPbPP)(!7VqF1E$j>G6E>|TUt5=!PW>;~J&&%`GKVDN+P zU}Vg{#~q;l7DI$*0WX+mYu=^NT?X?hAiveQ7 zXzuX637h#djbLPxcpOa;K{-OEU{N4Z^KUDT)`|pNGV_R!xruk_M#o#rX=#3 zx$oN&t85%=@rh!(YNjyZ>7PG#6nEb0ipehksluVV15fpXSXLEB^?ilDHez zQp>D9#V-WzW)2wNQFBc-iBb6nUx8WkvgYkguI$ij4M!M?1$AC)}Y~-mT1>k!Mc> zEg!1x*YxWfLa@`Cm+KBQF~0H6W0V3kp!dTVWbS~~o#@U`Lg_;gy|el#i<&+A&M2U*RlA`wNNwixyiFpSVN11gP*e$?>vitHC-8JQ`_~ zXovZCj$`oIW4P0w3E;>x?giUD5PS$x;&N)wrhv5PEx9Cm*Nz;RPgoUYSxXyKC4cU$ zUK`5{YR#L!+|8DJnLjTH;<^JV<3lQ$e0}yu4w||6$%q`4u{IswJI0emy z>>SP1)}QRVx{S~*Y(zLSoVQdct*$kdQv029^rmvD**!;lW}vLSSraK)i)vlXBb{dB zJc*ILd@SOHxft4@bX?=ra;~%Xs^3{dTBD5; z^MQjw$qNohux^)J{@Q97L4Amh@yTzQ{4~U-46b*_!p)>t+%8rpDAN?DDjfbQ3x{CSV_9mN!7%c1koa!W2B}z! z4(~I(@aiXC$@M8OT#KWzEfS)DDqUO-2N&36>p%XO5%uu}Ov;Evk?RzH4mHl_c+AU4 z!Mg1+<_M~Cw{bghgM=r;OEVg<@OncuJSPi*OFEjW2@vkk@jkOlxFBEA?uN?4wsYDb zr<|fPvRqsI@4yVELdp1&=Vj=PKs0N#^akML*D(}{fKT~FT9x8Hw|qke|{80dpY z-hL-3++M5eT+8SByzYt6qt^#c?vj1NDxt2(>r81TyzuC^uYn6tWfB6{1)}_U+V*0_ zDDJ$%k@Gx5JGzo`1iin>zA5luO4W{*D>k)Kv@0EZcvR^L@>*jL)GLa}R=U9R$0A9i za;**BlCNIm%t#}5w(JPH!^TH93%a_Wa99S_CkSJWB__`&5WHC1zwZ(d5KT=0dz^&1 zXKmG`b)ld<$Rh_8pxJxeql$hw+%|7Fe#xZ}nfHcgN@Rv^#l+_CvhJ8Z$<|k`p>!5- zioQS!$gbDVd@AEsfd!9Cts#FzTK|zyb>O=}?6rB(!`?jG`zyQvuU+oDrOOo@M~8sZ zX-*7>OGD(1v~!+9h+Fsn!kHC!fcn}REmd;BrLbK3^~NZ%=RRn>g_SLCZw@pQNG~&5 zpG~}f*c*^}aH^#~jeP71gmZ`mLFResDlIe5sCezf8#Rrt-T1sJ*;ATmm10*zM!DBv z7|l2sYi9RF-X0^&u?x`Ud{g4&39#DJcD;G>NR0Vs!8^~38U0BMdS2wY+aFg34B5>F z9Ox_>+B+{8pknoDQy?)O{lsSCSWf0k3qtt)ZFzrJoJJTh%?u zBZ@G3#3#8sNL1TE|7&>gq5NLoZuaB;erJ7s;i>a$rt#^C`}^bPEqqTP^O_xz=eo}l zPxijO!8p~4H*w$N@P9GQ*A4-|od~;E2YM)E6ZlER)rL32UMa(;%;#(dzji+JUWs{O zA(7B*>8ol6sGK4cNtKbW(a~E3MH1H`li-Se-C%5zm9I#godEU(gc5VXlcJ76U_2*i ztR`q?QZkSu^tSDV1p`b7ilHyNg-SO26-7|6sY;{@!B{BwiI5QdE5++UBKQ=)IAs;n zL6jH`dd`94HC*5gRuU`WzUCQji~PF;V0aUyUC|E>86ucjQ;7BJW&eIxzg1P@@BC6vNGD^gX@j5oUa_C9~_7&|^)32_RQ(Ix;>-RdZcCJy zTc)tCP?iq!bykVXf|ka1vlby*98I^h%8WMf)lCtjB%pQt?bnG<=RE!E?+W3U!9^72 z>fB2bfh^wt49rthsQbJfe!)}-eTN=G#mLz<81jN%9lD58xZ0wszSa@T1PAUP9?A*l zdUkLPvO%)+!$tIUw^=el_{3tv*uJ&=s~LQhH-K7l-6GhJT4d;H#7fD@Q|Dg{NjUq7 zC)LwfM;+yWE=C8mFJ#dyi8yMQ^A1~zlpFQN6;gw~o?1{`J)pgHKH*)6`vNKzGyD4c zBax-ES4>tLgqN~^XzN!e!~wM_Zw$GQN);5L&iItpjmkxiWtVcb+xz#U++@qnxc_ZU zOVE)+iHHUAW%5~FYG<_`=e(=;mUx^3oM;5yv@0ontc3NS@x)eCBt=V;zEzJDtmOPx z^xFX)`{YPLKI5WdO9k`X!lghVv?b9a;TZ;OaSNiED^m!a+qU0kOy>rZK`3v!e|=Dn zGRpOyA4O<0;`?2B#>Q&tiYN&I2QLlwm^bpibkL#oEWbnA3Z27v#4Ju*E-DZ`eWm-1 ziFTYV)r2q#peX~$woEXZ%4s2oX9LKkw6frX$B+FHlXD%d7Hq%kS`lGKQK&xP=k+e@ zNK|XB1c+sa$@eC`X4jaIJa|T>&u<1X&-MGyox6?>ou%SoK;_oW-4T3mdLtHpjl|&P zNY)Kq(tpmqfokhPE|fZR)7=#*E2k3Omml<1$^Avwc4k|}uqHB_Q{YDFz#%lY#=a5_OcE@eAwJAF9zas&ovh=Ud4 zNh6^n_Xr_OB+-a6p)3Bjcog`tzp3=~vzOYBa$LY5zu-H@)a~q#3ufWGR6T4Llx*7W zSPVsl9Vld~i+FU7{Td)p=2UO_v|Y}4AZU=6E3c(ODzQZNDmnIv_AI zY^Y?!-MEj)Vd(uA+Qw0sp|@-Y6bw_)lYY;KpsMq3XG0xZwmFqsj|g)Kd6w^(bW~M? zpmtHxHYiac0&N1CInB4iyfM>t;b2zdtyM>VpGRzfVUC-;N}K0H~39mII$|_ zR15Euv#oGVS;4~Xq8Q$L*|?TI4RkG!BzWQ};X?GJ-ro@7X<@p&)W{eGO65(2q3>yb zk|0N*GhuN0D6S-qy9gr{3#7|5Lv**fz4{P0xs$uvG6!XBnA9a@mlaZOsa=__Lre1{ zpE95gS&WNZaUfA7zu2*nWIu771>)07>Z0^qdeX7E1G95tnxDy3K_d85u$OawQ9_&$ zNf#b$x}{b0IW49h`U{moYD@C*wr(PpB6(ilqNpgC67gikCD<~e?QH} zK3?w%TTc))_ybG29uB3>;(il(O_o`S>VT8F#6&2LBtG`5g=q)bd43(zUZ=*{M=USq zu1M2{w9KvWvrCDkvsgo^>7kK!1U@O&;}YR^23-}-=hUjyMgkMx6~awEwS5WK7wo9i zG_VM`>8PVh3GGc9e3#Zc6~=CHuLMSX)vo&rXn;0le{hqv?hw>h|1viz!968P~_Pq zbSK2H#-m)n-gN$8^s1l1UhhQCCB5{SwG)-p>d0T?HDiBs|119X?;SUdW*4uY>slWu~9mav3fh^ad`MlS0X`GC_RB&Q0fz!CI!Nz9wf9 zE?;kYnxY$XN!CBkq#Q=@P3UAsjzVt=!r4g|)GblC0GOD9O`$^&7XRA{nQP;bv%$@D zE#3_kPRG8$bAjkfyj=h_O}G?Bb9{=4rvTaENZJAs<__PY;)mW3!?Q z74@iFw9!rX$6GX}IsWc|5e*tAL62wWH!$+b_G;TAxYsr3Q`Y!9E(xD5E^#GpkO0Qwgg!HWf@I+D`|KNB zipx(TSEr5_yNUQ~o-dhZzy$dKN6q^261bGrq zGzbY>fX-eJ^b|}2y38h&+5&8T$Yk*&)hJ=fRiqhD(MDWl1IfqDRpk7&rx|kNs$s?_ zBzV?od)h$pqF+f|ZFRdPL`IQ*4T;9Tc58Y^)z%^jnqKkISF;;lxkuQdc#q#dFSGc-VGhK-;x;mBZ;_^IMYM)kXF_hkY-se}Xr7V{f} z#qSiRm+asujgxCvwAwFqFA2P(+yQ1?te}FD@8XXI*;5%uucvn5JpC_R>LXt;SsW{o z0cZ)m^FzJ*QnA0{Qp2>a&4>`i)~rYH*tHwuzk|9p>9LvLFlAu<{%B+-(lW6OC?t#j zrF5Qcsb%lO<}i%BJXtvGJlg*E@}b>Und-&R3g7B?pW2wSmG{!(#_{(?-GVpP`YaBz zhExFc0E}0~OI|WhU(q|fz+Q{KWKVU&*XtJ~J(4hNjd*I?aOjkd4p%T|zkB_-rWkV` zo4t#8?Aduw%^^n$;Kn)r>%y5|876%i09xB~PwEF~{BDW`yr}ZS1A6Luz3I42tYS0M zy$NM7AJQcAQ7+?@Gf5U;a1k{7N&D^hv^z@;x9y3X%Poy5$us<7#4MPl5}HkXa^2AN z3;u#W7YC?~t_m6C9vGr{KGI`J6NV<0;WSKu+>iH~Cj4UgNBr3vH0oRTuk^jS_v^mm zgN#U54Eokf1LcD)GEq_BPYE_;HxRDQ&jRo^@qw%e#h-MlKE6;CDfxY-t*A5GY`M^A zJp_qw5NzmzvZ7Z@QpAJwn2W^E)m_8H=T&khaNFetAB}wabo-d6D`h(HB_#zqa33C} z*=*+cA#edfH|L$)34ktNNpIj$?eoH+wV}}U3?kC$ zeOrWF!N!$Ny6~23zFt6fv$Cos#VS+rTuH|g!`iTvI7xQ{k zDE^57(=O&1)yLKK7V*uO=6RxY^VzBKjn>7Lf?WIj5+w@pXvs*~ZLyFgL)}`>{N1e= z3|?mx2l>Umx3)cvzPz_)FBMIK|J#q&@3%LI2~{k}NETQ}b^S=74DbLc zbmWJ9P-Qc3Gh&v9IgeXFi%~uu@YdI>s~hMv&+bFw)r}K{?4Q z*RXt7-|fN&qye;w@Rh?`awgCaNo5}$!BP}X=`f7Rcz9w~Dd@P3C)R*y|5@69uxRv zSkk+wta?cOvdmj0KcB-R%AzBpL@`i!914Ca?gENC<5y<`5I!Q7Yj$y;p0>bc`kH90 zN*xVr#|(JaqJf#qV3AijDz$uCj9K|j$0`@b;@4Jsl)UrF-#Yn6Z)4%F#hqO|0ZDEU zxx5;T3xC|OFEk92Bi`_DPpRyWe+w?!-ATw%GLIg>;&;P4+6KuHN7CW!alwpI)WE-* zf^(m}h;j4^BACGMX~1V=DI=gdR!c6MRCbw5S~y&jD2hC{sKsuW<(13XENTUC5V-V=zO7Yd-Z|V1N zQO?{C3~Sm-nL=C;RyN0e*m5Y!(&s0EPqlbhkbGf%(+sTmn zH$<}Sl#K4JKM+{_ew)ZNzOz?rJ57G>%84JJJlReAYF4o_mu04q6KoUoKho0N&bH}V zKdXv_|B(tIyP>=5gTmC*B*47hnbzPOYlE)Lm4qG?Af20K z(HDW2`qMeH);ZVv-UghE#dsY+NyhX?wyf~a`f0wShoX7qV?~4%dwuvzTuWx2)Bhm! zH~)!O72o4u5KhD`iKZ!m-uf`(q!%hpw9u`TKv2tRpUNsEV(*;(#6+bRoa2h=uo|f> zjVtV8x2Fw?-1hUoCvJm9PxGk?;yV$+@EC_25J98ffEv7HimX2lalxYzk6VMkE>&eS z9(99=zOpcP9~N*H98hT!PUd9|8jIZ#ToQEpb$z(r+}gkFqEo3|@-hJJE=TBH!Z9cS zmZ|g_nl|bUI{n^Sc>*}+EL^=~gFK?%asTv76&_GKmlk^{UVu}NHSxSTKe05{O5t%_ zH?^~pZcgqjqyGej;u5s@vy~n+K4agc#*Po@Sn@Qz`fqY8;7#tVhe1Zk&PZr;xuvt> z79faRZyuE9*k`89?*|1{yWt*&YAy7Xi9%*CIi%RES^R7TTRTq?VxzBW7c(|cfjhrF z2lqRfhd5BTqk&nK;`AhK2)%m$oE3>sybz8XCRURpV&#(3nhrHlk1U1NI65sM2cMe| zHA;yc!s=bY5!I&t>(`TinOQne_BNJ_jV1MiT~ABsY1Z;eD#fE7ofxp#F#D% zHO5hsh;6_{ebAXFhR9rQEtVu&7;l^XWS=Z*o!UEslkY=j>FV*@N?B8tS479Wtiv#{ ztA#f=zuC(1oXtAB3D=nQS3*Uy2`7dqxHzTNABY4;!Yw=YRUR{mW+VvD zP@v1>E{55F8;`rLZp)g7;`HiK$NSgSEzgy07mZ)37aO3jC1v0sjtW*o44Tb@Qch(! zA1@{Z$4&!?%3o5Q3%=vDv%5yDY<-hO!BTLOVy%;8j+Pi*jOnLU>W)q*ZqGaZ&+*L1Ie`AxGPy=4fZ3dBJ;d}XG5(nIv zQfPm|+F0ZLHwAuViQdiq3kQ-!IBunfs=0ZG_Tnshi2T=<$(h1Js{n-)$ns8$@AkTH zad*yr`=ZV|iEH0(9 zh+k;UtnRRK8X+ANWGUGMV%{-&0ffLzAgR;K_`uB76<_W2^<+r94a4NseaIP{LG036 z*Hi!DTYKfBa9;6;*6JhG8dVXz{Wi3^lk*c`<>w3Ku zo4+~k^sxbdANOIp`+z=DUl7~?Q8sxq>wG2?FtZ+3oF-NI(uTYS8`!K z(2MP;ax*VZiuWrl?|N2@>>j9?@b~|Uj|nJ5o_M3-mRBdR$-Z*3o)1pWq5=tTHSJkK zCGTex(P*PhCGlNDO!Yqa_HAOqfdz5okCA_lUHSvJ?r}3# zdoL=w_02(w6@JGQ2}!YZ6AFrp%&&wSr8zQI_Ck=);5fPRUeaBN%(gT2t0lC2)u5{=O_M~)s)eEJX+!t2tK?XTk zyC-0(NGmx`o#l%LX6Y4kS#1GqgkjN%3rLW0v z(!Q)J&7^mtZwiIDsn#EcM=P(G3}YIY-rEy^H>asjoQ5|}{4)sM9L<$NTzop@GPCs- zcZ(`H?BPE4)?7d(txBh?+d0!CVD}IkH*gDkG%_7#7@#NA-aJBMs#nzizmFM&=$#AMcwzEfEc`lVehV z9!g9Ir|01IF**IUZ;crH?}UqzV(yiC-(G_sS=cuk}O95MB3fIH3y-2c!>}uEP$2U*Ixvr zB$2yd{zhub%s2bC5DAwIC&QvBcn}W*1bvZ`SB44~gH-JuR{Q~+Eu92aG$*_~;t#1E z!u1hfeaNwHiQsT7NY2eQEVMvzS@cI9ch_^-|BTq1A=k@U=Q* z3CKrufsu4IbofoICX+NqEqdD&KoAE@4V4f^9)O?+X)L!qP`R&$)SI}sG z@mELuajOICUN?C)a=Z;$$+DFOS$dqlj^Co6&m6Yc6wfF&1e$2yM152!L!zz_eyiZG zuZrHN3#v_@wt*oS_4qr2Yjsb1J)YX}=NybPgiN5oof|VX{Rn8uAE-AD8X?%ZWdy%Q zF*;^mZEi1wOT!>h%jF4r;O4f|f5{n3($*9HgTxX9rf(Dpe#{+2_kQ6uSnDtejtWvC z%s-meHZh_%9*PDze;8u$#ic~!hbqw z&n9_2E+3y^zIQzoTek3*PcGmFIEY_dUXS5GcX=SBgyD0$gZ|Ksz^ORnJB^3~SRe9w zyaSMGe1Z?0zrxz}vd4@n-aolq4w8o#x_E~KN(E{j^3dK%imR_RI0gEM4I=@uR6jl@ z)lC{s-&3mH<`wmdY2UL7My8f4w)&U*YRz9a8*wY+X~H@>&?C~80V*>cnJ}MDorh;* zfjbMwi@8Fj*t})gLb+Vjwo`jm5lNc;Tr_eJ)4;8EC4WKwIbiFj*|c|kJ^N-7I=;X^ z^QV$<@X+M9Vizl{3ZKiU{BhU@UiK{;Bw~2?xJ}ib={z(!Qm?f=jj4yB5;tbYBG-f_ zrz$VfthgGx_4x;qle9-S%Z0TKlcVq{osY5$BX+hC`A!Z+NBpn%mOGExlhs6xv2o*3 zlZ!xhJk_nO%ckV8^y>%#jpoyv6%d^rXUjsQ@V=qB-B^d2Iv-x$6}LUZblh>*GF?U1 ztt!d@H2HZN+~3+qfeyHb3Q@a;E$w0i_bmmh{@F5RZ1cu(-E)56Z&?&21Xmnfyn?Bx z1il!k1Gyn26n`-Cx6ptSw$KGbjMUYML z-t>O0tl|Gv-+x3^5{DVaQ}1~@y8jlrSeV_)m^~C2g+=){L<&})mc#JW`eJV`QYDXy zF=qad`TB3{dSq5))-G>52=ExPH?5*YC>)sA{5dpR9Gt)cTv z^y6V$u-0}he(Thu@I2YI&mEpN)_ka zHXf_7jlZ*!WguZ}G!Spg>1VE)iFyo1gSwQ7vSO#bEQI|{G?B>j= zdnSN2lPbM2|8wt{^)kwnIo$`0u!8ews$ELwP^AWMR@v;*>Eg5AkWAV?vc-gw$^ped z+=^YJ$W)rSW>~pB>H85>>V(<>5?X#lSMNNb!k-07vCTgTJJ}_Y+8=-2sC6#Bn&^kJ zU(WOonDDovH#_S!T^nG2p-vnBW!jbs&*iWF;I96V*#m26Dj!SV_c$aBR(3hV$txM& zpwlnt!r2+iA~&o!&!U-LyUEWfCZg~3dg&jGAS_UQ&qC`O-zKt@fEW8nbo9Eh|819U3oBP}hXEC(!v`Y6qTH;i5Fh0jtzqF#Y5d*pW zFp>8@yH`VsLP3W6BzrG z4=zxHy86T|YM+B!DHp3sF;bCav}G1)z@K54L!uGpN-fJzRLtMGGTs6DJ2ZFVCM2Y@-yQtv51OW@uEsE9UxVoU<2 ze0BXi#1^TZ;YPaPbCVT*i(dnQc8Fm>FFR#*yq9XOVM{vlvh%g}IAFbt_Y3B8XW>MC z`Tld~z3~SYN1dnijd1KE_l@xULHSwQF-g7mWeHz-T=!q;=7Eoz>DIQ_C{X!_rw)4P zP`Scln0?AU)zaIoA_J7LNuRVKP2M_BjKak8teg)@EQv&MAl#UpvNvNIz_O2gO6pT3 z#Ty@%`+k2ug--Y(B<|bo(-R~=399j`b68JUngk~l>UZn)@f8+ywdHC*q!!`l9=En; zeDD)ItPNS~hVYd2C++z~wY0?9!pl$J1-|?GvirS5cM13Jmg?6mi#|YfONldkuG)_Y zU6!`Ps7ng8OkD?MHX)}A6e~V(HEJR8Xh4)q4qw%MQmQnjqM?PL8F1D3?Bw%f4?bL% zN8U1v0A6?lP|(ce)^>af+y`2*{kY-y?Ugj5dopfALZ00nJ*7&gDP?EH5i;0l75;y& zHh|CZX*$9c_LzTcl4DwuR$#&0Fo3x1hulVaSkfw01spb+NJ1 zIvfi>qt4!1tb}v$E(wAA?IjX6F_}FXKdIH&|m1$Xk-u;aLQI zL=@(d zj3@ML)z7EWrJnI@fYXZ&<(y|)J-%p-NZRJmNF2tMk`BZe(C>$DE=fimdgprSw*@NU zpwbP>@(H#d!+&a8bODj(Kf?~?8`59@URk3c?i@MJl$kw1VFU!b_lwlapp`-J#Pu1> z+)KZLH{a64CE*UR<@@L+NG3wTQZtcW7U(`Pk<6+ z=TT?W(Rx7Y{Xshg{Z2wR^G)GN9VLRu@cu10Am=C^?%~^a@`dJl#QEh@2=KagB$n*` z%1)gQO^a(C_=jlzDuCVH|)QPU*Ebs~Af$qNNMK=T70UYS1m-VYh@0SMn8XD{-8 z-tV>YeWgFR#yi04b|FsCNqdji0r^Ih%a{R28!2$Ju+LjXLOeO)dr z74}SmJ-sgcT%JLmPR^Acltk{2KiuNS?xB*Ph}s&%d@5w0NKh$K$$1P`r6|PAUV1fN z6;5Jan_ICQb@0iAa)3tnBPttPmm{uDuaPmK2BX`gB2gaYmS-KBLq&Qg+?`q0MJ`KM z_F)4&_FKct=HgtwAFSbND5>9R?V2O=ghE-J6K4Nr`i@em4sx@f8&;@#sF%%fsqK&* zboG@H>=DO!CTg(bXJKvwy0dblcsYIw15^RENg-W^t7vd+ zs(PI6AyFk+a21?dZbe5A0a2yxZ<5ff>D^Q(W31^JUsh|>a=#?GTX~PoqkOy6Y@Bfa z$S0c|L2CUHKrezX_;o{qz+gpcolhuOp|MJ$9}j}1R{iFsEX{dzLy-Y+T8Po*vjm-g zdf?V(gUvLqG;rIZ{W6ZE)!-|Pml&Kgf8HyV0ROrM4>_P>B<@SfvCfTMM(OYAH^n0t z<+X`prLmBI?-wIQ8v*_d==BS-c_Q7W^-?+FcUj=B^^6o^Zz;aVH}Rv(oc^N}d6TiV ziqrEhx=^GpEr<^F$uR8ErKt6d*bT$B6jH^KsvES{iJ~ehX=NzUrR<}-zrI@blyF%; zF1`K%~^*5qkbD&(cP}zq3YJ zxnK~o{jztkQT15N$hQAuAqB2u!wKCyD{Z20NdI=S;CU!kNacCL%wL!uj^WdBt|i(R zD%zSEvRH>=48~@Bu)b<^%_YrirS7_)Y|Zw|q7<(?dyVd9Ybp$ZQwrM&N7V*ut6ixg zIsf+!yK|A&)S4^QGX9j2tAWeDfn0g50jGNN_-wH(S_iM$m+|s(#-@}p?Q;vWByjLkO%HbFyy(pV0GRb0V zG|E^RB+$ON`Enjz2C=pz8bLA3Dh3lFDya;IVYh2{uZ@>&aw(R?uOqXep4op$Iwn{& z?FiK;KYG#X9CdUYt_5NgcXz?qF_*C7+N0fAM&DO)DyTay{92olrY`^En&N26fBslF zQFcSAdwe`iE7~br>5gs?MxrcLg0TM;b3qCTL`?^(N+7-l1?oC+7M1C<))~+W%erss z9lNLvf_!37=eZ8!BbGD5BYXGHu-Y3#ncRD3^Qd(1QMtWxTY3tr> zb?Wi{ywk12=Tzj}3=5Bk(Du39+T|q42c{AlXO%qJKVv8jgE^uSLH55KEKEtm3w(Iz zcDI(Yr5w9a6ms^0tsE3cw9vn)l_w>79ZI=aN1<*s9jfhbOtWF(P`g;_j?SHg_ea)4 zsAL+xc3V_@ZHe`^-Pab7@#Hn@1-vR3Lc;Dz%6sV_aA!Q?K_C25?Qx-C9N>EPRD_Z+ z0uzglk;jsF1~W=KG+XsQbr`s&>>OY!8=F5_*-i>#4;UbbMU~uM9!w*g%0rC=CZv^c zt59bLG4gRqSW(4QP~RN=e~>pxoH1Z>3?0KZVYG!Q-=M$@4_9`m+2CAI-<6~ubsb?o zw_U@A-l@VDUYhI?oy{p2GqC5)AUTfo$x<#>L)_pdWZ5dMwJolnWyT*0eof2G6{SVQ z)9Q4p`3r4c>8@c!1anmwG+8ahBHhD<`xc5BUgp@d#F(04sGw6+TVU{k-LhM%HT>!# zu|HGO=`v#~x8hh_9_7lTl&+r|*HW2fH=odXn&rJchRIX!AV7;ObglebGWEe!_<@99vR3-YY~P;(nETZbXUgngq>9 z1R)Hj?gC*!*rRZtHAcw1nC<2-m1(#_zO~phcBe`jE{34c@KRz+v}G<%@obq5Z>HsY zXn+ItYjFbwOyDUC*c8H!m95QGv+!5+BLod$*0yET6e#^5a(}W%Kxw&4-EJ~oeA;($%JeZ4{Cx>H&0uH!kD zASu*dGXX&|y5bX6JLrTr|K)?7Cp#aXdL^+RR1AAo{j#H;kn`q-}OTQLA^M1t2pPy|#?6;c*VXcn$y z4$&>nvr`%)#|9hIg}i)-72YqWmwr;rXL4vH;^94^K}SYqy>ZN+Dd_ko;-~D9)Rity z$wl%Tg*p6&dpX`(2_L&AQ$F^t{ZB5m(&Q71q>?(P?&aR`N>Vy7Bx_0N_}ceW`^)5~ z%o6~*@xvaFcLeD9zFbTC^I2-v44C;L0elM>vWM;Xr8i6c5jcZ#bm=W!cPMw&slZFK z94Y>A)+s1_&E4ly4R+R|j1uapTe4O5v}~b%0d0VQA@%mGTk?esX<7gHXjdYM9Kh~{ zeb3VU&9r-4y%=d;H$P^uGcwxZ%3QvqAlsC(7W7LOiv)M?YF|g0SG|yAd!YT_J?Pjcn|FEou-Knrr*0oo`g?bM}uCy zeOR54$VmMbqW!WGxR6T~+WoO*^<6P_F7c7$_Gq`#;w2i_scuHwZSd#?-O^)m0?J1M zQrnMj#JjX_c1L4Ua^GIv9rsPC|3O}*0L=SO5P>&pUl_%O3kmXmJiCR=j;ve@d(Y2- zGY5|KMt?;j7$kCU_v_U_W9B6qB(j4Tv6X5>I)+6PNY6lVVm+9L=YW)HL|jd+ zaYg7nmkZ4PC)wcK4ra0g}R|%zX5S>25D|KCasls2F*ZcGr z=eETK9!a*HwxZF&d;bo8h(c=`j~CgPF%R+X5f=M;GyN%7PowP%%^*NeUtgDQ5a4^? z`M+8j^q<6k+iNZ+s?k|2{huuJNojd+ZwpU@IO;+vq>G*7(DFfVbtsVAtDe+!Q)t2o z${`IKRH-VC1%UJCW_E(}SV@|Te}89H|JGm}y>i+N%wm~D+U^f77@)9ntph#bw=sZrur}bB|K~B?`D}hqB>o{WVL%+;MxnSD28YUh{;A$SI<9*e!TS zIm;*P{hndEW1)vdAi)38Y1B0-}iS!gXJTxnvqL)&`XW3&oL%Rb4i+H8yt7ztszQY()7+ z98cg41yPATgURpBFz&h3&-;*I9;O#0MebT>VX}#a6T@Ffv&=985cXVLTnN$oM2YQR z-!ZD9TB1!(?{>A(_xl4v*)-G^BxRI;ig#{upK0e>QT;eY-W+Nt)|@#7An@hzWpxX< zWtjm8cYzSYxTxE^{s_ze_lX(6rYs1_52|0kPSMZFXcY0aY(N6p3AO6aO+ z!ZT}4s8{3^8NLLdf9HHazC*jmZ~oYBK2f#G($gAcp7b~6lp($^dXZSqGCJ1Qc^L1N z71;2kdKsxQy*W-)pzSPsUc5ih`IU_LwP8E~WWN2b#v1>8hG_u+s{HM*2oGfdX*@qm z`kv!^tN#-Ke-OVncx#s3=*{r`P0hVl#){F<-1GL3?$Na`{wwzhCs;Cp)p1UkK@u?3 z1hm+AN=l}wWvbiE9(KzO?0>wCb&G9wdl^bze?LEbN;Uw!mYtB$1`q&%@zC?1m-L1t9v};_Jf3iI}QulCUyj$U`{cK|U{rEX+ z4FPw~K3(|$AFT_&p79^3pnLzxEU;0Zo&J&bLk-)F)_>%^_E`7iP$%Qvm+RriW1lu8 zlISU!=;_RObkK!6NJ`KE3{9tEng&=@kIC5Dd%;smd?d~PWKd=AvijmT=xn@Dx#{>FX;|cO zOtH?06#Yw{>lI<`rd3wn4SC3l7JoV!E@m_?&6z)e2Li2x+pL&Tsg>Hdlfc52z`hmS z@LM}`@8oN(l?PzvL_V)(`4h16dUz@DQw-McZLhcG4=%Bmtqtxd-q?+Ix+w5wxqqp! zmhJheu-$0w2EM-$fAV&OxfzK9NHsMrPzg=i@bt3`guNl0nSzK-6gLM4GaoA!EMB+7 zF_#Xi^0I@!;GBdUEww&fi{@>NgAidzvKM^aQ`uHa8$!Or%#a?M>3GHeySm2&n#<_P z6#sk47SZhepJ1Iacr?NGMQ;ER`c6}xKR5-T|EH&>-`rA;ZGxBuG8QXQVA!rG8GP}W z28aM2F=VlO-zLd!wS`cSSp?J#y*LeWlYIxC9rW60>3|{q|0nQF&v4;BN-J5gC+D_+3<5U4(oRSj{Y3 zzTH@P>%1{sKcA$0*;}}8#4+K7IXu}oi8M`mp&(GKh|u@}HzezrbRd$4_{!Ol7usp6 zRfP;`Y5Z3{O@l@6d#;>0^*N!Ugps_MqT(JR6W?WeLP)s?ecAtwX+=huydgaB6g&4M zf<*oBifD+Qvott+QDyD*;5w&d(ZW<=_`D)5Hf23F@qYo~)q^=54peM~F5Q3UH-cY4 zp|3yhg$3!N$^k7dZ{(QC`)r~w6r8d~3i^6HqrUt9n`jELk^eQ!(yR?PcdgbKVRX6h zc2YnjC})&Sq-p)C(p%aPR7caG0&MYvE@NzgP3K<7mnJ<5EMF)5cS$eeLofYHC1sHF zrh<@Fpjx9?zs-v(40)U$9E^B=&{|l=;^G4~J2OWk)Fu*bad-TY5B64+8}8##S=wBa z{AruG3pW=}#LOMlzV2@*cvbrzod+m=aDZ|-szk`G9e7y|CtXk=0k6Fh=@esdL=_xt z?>`Zj13D(6qnxwQ(LRGwm4!U}Xwt1-l0N4jRuWf0P;CNMN-uDf4-u{nAX_B;6K)!d zYpcr`>Qa_D`VooX=N|$B#iMs%H{cqnI6RDm(~B)Snhk_UGI{P)Au(2ovmz1Ij^OMl zSK9=riYZt6ga}uW(9tu7GaTg0J#p@LbP6nI;R6AmFhfCBNy`r)z2N%`!F4(eO(=uA z`Y}7H&`O1)YJHzR`{Q7k6o^r?kZPS&YXT2(j%Si`xJNWby0=1To#zRcd!W;dzq5fO z8%}OgBEf5D)6vX+6{Qz9=hv9)o7Zu@pH|fU;bq1>LSJ9}(iWNO*+)_2rth zTc+GhSeV7pC_MYxt*!yvCi$YpXMbV{Tztg;osg2U+and|n)gQnqNe_jjJx-3%`y+j|4Zy|IP&e zowUR!^u*m0+qJNS0}fJAI@zC(vr7DxS%^9r&PW9txKE)G0dM#ULDu%rK{k>+&@oW) zWOoM$bBofZZ1rf7=+YrH##s1Ioy&{`d|uh`xV=>$@cr%x5&rmg=*Uim@WI;(L#|N$ z+R;8@J19Nm4)ug+keU1lf9io<3iyB!0?~P(pGHt)?ArO?DZZ9=7}p|}dzaY0paTwl#zJ##v!}#*Alz)RV!|FsM}CUi1>?ZgGDIq?FdoEThw0j$HB-X z7u31-=sAc)r|Y(Z2PF3L(Ymx=mUy3))qzHNsF0?Zs}#H*iYc|TdR%sw{wD|V*fesAtO^}Tp-5MrhyhAxP>k_bqqAK)L#Ekm{mv)nLZ+)=huqmxN;j!+`t zih~n*AghgkT6a`+y+Oe?i|50^L(V0Zs+K#P`ZQHh;j&0kv zoi9emwr$&X(y`sK{_K6ue{n{Qnxii3s%ovd-uXPXXe~z8P&c@0o7BV4_*PU2Ya5DN zF5zzK!C%FT3i);@*3Ay1bFEuqnP%1H#1FpW8RcB!(~W>dD9oKOB}3weZ5JSV&l`zf zwa9DjIM>47Kf9T~Hy#AVsBF3XpNTs8eyCs*hZBR6i^ zJG+_e8LolJN?S@&_IHNjSX@oAB~QhF$U7uwX7jO$kxg{qzXqrDeb!z&^{D6$JCaq4 zpSiJ7WVJAH#AtQU>u&#tmKhC7Os|!r7d>5=a`2(mb2xpj3qh#aU&QgjDU+(-rSa|P ziAEU64SQyFK2kEcL#U#y*o?(QOT zX>I5H#cb@X^$QyYS%_*3>vvZ1lV_$~1$AXnuG%S-o{{jSll7$5q=cC(iLogdUiPEe zBE2%^<}z0vP{kLPY4bi-f}DJwb?DvAKnB#oXf}kzEHcB>6uW00K1LsvCC1`Soj7%@ z{QOMN^wJqU{%f9I76e%eV!ilOCPc?O(&KZf2cBR!v-$lQ^5%RoX9O7`rI>H;>;R69 z{2`1bgHHD3e7-ujodOj;T1ukKglM+4lu$c_i1(A;62Rf2zegBm;Q`w+;w=nWvZx9O zW{?pHxR5olz8_eE8!?j}ibi{dz=-DuH8c2141%IFRx7+l8meAI6q1Cy?o22O?{5!V z3$0+^FXo{$3E*JfB(Bd$g%nh09UAYP8jEw`cHvFt8LXFm6aR;MWAQ)-p z&9JtggdU+ua&gI=x+aY`6ww}RaF2%nuT1%-B;I z@bZOE+C(3hP9CBg=x7D1`Vq$v)plP)w zMHFoRFuO)$my)rsjhuHERR3+!>OD;&Jyw5y860(SuqaO0tBv` zXaYwF(x`7zVVxCp+9T%jJz~4Gu4Y9+t@s=W^xv%xha1t{1fc4=y0U3}s>9Vfz-rF+ zMiZj;1)%3)m<{T9?m>p+t=^f2yv67->&+A>rYD*L3bu}&^p@}spshLtDGLv{Vo#AB zBtPtJf+GJ)+`av-bKGvCzZtQNkA1sq+k^Cb&?qwZ8c0KcY8PWRh}Kk1F!N4QG`m34 zl(spA{T&)c?j*{Q$!v7V(-X;_ux@$0ETlz=ZqRP|MJ&eZT1l__kh>IU3n^p4BIke z6K*@lp+twfga*i=mXk|+iyj2`25pdopG$3^HNS<;r>wqAk|Xt( zNT?dhNLXBlF`&eI;xPQ9N#g?oS~rW+67WmU$2_@5LuBa{#6PPrQ`SF5jWosj7tzPz z2G-gI8+|lz+UYd-L-uRCYLKp7zx1(x5V5_SNfZMO4nNGpE~I(pSoO_V^`?S{srpFK*V)d$0GW+QLn&n>Rk3ScNg$41oo@=axg(VXIsGvp95 zniu#gzKb-UL`bHV%uv{iN+HaNAtgTrCtd*e{?p-cwj2<9>9*+li>k~fIVxG}*2oD! zufn_Y!<6rjU%-|Fs4`Gc%yrSpcia9VU!Q8xwn|3?(tLK3Z+H5xav7Lx}qr zh7dR&{)yXgNqf@)e0HRRs2OnW5MEZkQnm3uCk&W0?T**9`l9vKuuo_ZAD3tMQoLrG zb)i)_5Z9xwVA&!gXMV=Ao-47i)RMco=Zr1r7 z7DuX;;KA$nn=R$!eI-jyZ#;3Ep)xQ;kD1O?2=~~V;^QgalkBGUYOEv2Gz%vUeg$XI z9^J7=slOOV+kr`G>KGv=!XuWTOKX_7I|MZchW{D(U))l-N<{F4#&NW)U@U|GgVQ~7 za>-2~a>b{tC0+sO>ovuXvni9!V6bE~gkr*aG7%Q?SVs5q8=k9G3Q7dg0<5 zb7jFMHYOElbu@|n82ht$u8dpn6T6Oa9$h=oHoMd->(M$@6`9{Ot!Or2H%AwIJA7y> z(lkb#JsjIubX{yXu8~GO*9|vQN8^hqUvjzvoCS$ohzOJsA!yhXC(^2ey6wG4o&a$g4Xr`0J3TtG(eowvP!L=1Bi#5!UlZQP-3>0ktp7ZLXO zr#CP%pVx0Jq8r~@mCW@u7`8dypvNsd!Z9z=7{Z7EO+~}KnSJ$a|NOjjiSL52d!sCi zR+D;wDdP|qEdc7&hu_-S<0q7vQ3E;wzt5!8al^YFU#z8v+YY`1Gf)F06aoUEoG!eWdIt<=Pj-g_YZm-Fb|LHWb_S zp`d1I+j5*$(2WTH9BpQ9A?#2?VUl9~P;@&*bAt2Z7LFC$3ubOVjA@ob;5==YR!mi7 z77W!FCD(yWoS1yBqO9l`l)5`6T#fAIh~r`6P+#L?;0G;J!_@(XLuP3trkO6qk|+U*Q%%OE&5#u63zL84%PGhiS<1p|jbuue9P_P=U9`n3H?i%O)(07!%rlN^w2?m-qAEkMFfKQVag>@*!k=3R}Xi~%L((o*l=sB+;8J0 ztbnd`n%lcVyu~&RpusrJ@s}E7xPardGO)?biy13>lN616H{_>ZqwzXx%;mXXlhgq; z{a{8(#q(ZD#LQUHr3_ z)lSi6eRN-4-g7(N-!lR`StD}_+whoF^oEE=(5WK*zPycBSq;n3TS9UURbWQj@5!JKB*sD zAAYn6hx&hYTrHEautY^1u1hTG=7#?zP3 zXsr(N)^j&0x>nXU|Dw}2u0vZ+E!u-%B|#m@sdhJ!9|-9+7U-&7Q=q%5k5zqf;j7-J zifw3Eb$DB8lI=qOJb2E6=WUjE(N#$%u#n^0N9#M>8 z(^O2TuCg2c;j>FGwZT7Z+1HQRi`kr$2Y+7g9=%cjPig{=FBqvAkvi@ViN+TWNHmp3QUvEu*m_2zajv7>-1&)Nt~6)A${@C#G)D3adbmqoR? z?$_spLhuV&wtn>p*wJHtR-T#nOvMa^J^WE2=_obXn4zq9qu_dN4%Oz1^?K2@PN+iN z`r#R=+G?rQmNvaRf=QxHS-@%AX+aI>)egP_;pmj6WNN2l%JW%h_%4=rs=8|f0%qb0 zn=1HAvHTTCUH|XNx9tBp`L5cw=jnJC+LeFUVK3L1V6NxIC;y?nI68F?&dC*Eeg}dW zrZKMnZ4W7x_5^h99H>Xi!|TgJ0{*JH_EhyvEEGK(X!~FEt!8hluP|^f9@`Z8wt<(b zLdusR;0i%@@?<|Vd_#nEZwC_K5iVvwt!S(LshY_JIL&?IFDlBXeEY#O49+TiGl_g@ zYis&{k>-9IgNqJmeZ9jFsDBkg=ZXLwhAKvTH@B((Z+T7y(aUaoPCs_k^73fF*^|J$ zX7pHAE9zirBN_xn-Mustb^C_sPJXwb;)hX2dZ>HG>%b`+<5A$q{fWxsv4L#loACaH zHTRNI9LmI1A^le;*3L0}BJ|U>=SoE@E)~^0d092L|_Cl0%ll zqOQiAB%S|-SGRyVhktzULd=X+!tGRvX&nEBYH2#+c6-7`txlUWv!iF9w$gQNGo(vEQw~M?W>!_zAx=5bfO&PU* z@I@3{cw#pa0+O2aad@m5P>29-){r?0>PRyhT$O)QCC)&W37z{MxW&4G?q4QZJ7(B9 z9|kcB$I+uE?6o-OyXyg`ziV!~i=QNx&=WS5xR`6^q_;dC9MK#hhcrP;uY6zPy1_Ns+l5$OkhHQ^Vi28HbAr+dN58s{a!us4EDmak>NRm z_QZg6K?SR0ta99ZNJ4g%5FQ2)(gXiOF}1AU8K?&Do2!csx<%r?4&_HwuCtk;7Bj7G z=x1~oHpcc{u8C;2jE(EP^B8Qg3wCxz7h_p5ZbT|=i;fS!-ckV%DTNh%iO7w%46i+^ zHvlZ?J(~h~>B1(-byibj+Cu^?)~SbGef))m{6=3emZiH8ETfvC%8#iJhxEOGn2; zosYRnSCkdz7RRRgm=S>|CLM?BS{;FdiRM9PK{~n2u=~*cplrm2D+;GKupEfS$utFQ zH@4N*igw~k5G5tlL6;aq^H?5fJThNv+-&^KM>ZyvBHyWBOg2>gs8&pYIvRq1ra=;q zrCv%uEME{KlEkxFf9ro1`g#P!u&QBbO0TBvp4*eC{FT!_VoxeKk=V~olb@iuojA=3 z!~ODR8TGwFmQmAEzKXVFGxVS>A+(eiKM)tLNY=3!wuQ5edrJ98C0C-~`@KWtskV@w zpz^a62_#f%C{ppmI$;Yih3Yar2XO|P+_FZ$%91BUSv$+iX;`#0QT%QSy)vK%HD2*KdK z1E^e#vj1ANJaG=h+XM0o{)i@%J`u#<(}zsui(8=ncaq?4(m_%T=oT)WSOldUvO*-~ z#8`)p~8`ptj>1qd7ug+dY0jMnuMMRgvXEW%IEU)FWc?!)Omvmq8G9;MLk2YZ0IxXP?zX0 z5zVHk7f%

r_XyzXpEM5?plgp*>h{VQC&qDWU@z+S~d36wF}V@pW6cS;F}{GfRIV z)V4x7N#wg^d2>ox5J;0}Pe7WIjH#gH-BkYm%!D-8JRZ^*%9;r8k;!AXk?IidHK6N! z_1OGAUCJ^FDLA5By$)$k=EN5b+!t`kVjujiqPlpRsnlxBBbTxa$tqudt->_4#+N=|YFQZxM1DcHP0H?|(ZU7J6fjaS@DN3AGz~0nX z!LZvaKuSi>)uwHR>ok?Y9u+zP+rT86U!)x$PpqR~vzr-((8iehB#7{>%<)fTQ=4I(k^VTzENWOdtUt&I>WN9eB#@q1|BI&e$ zp67sWsmGFf01PLF(rIsH6Shx}(cK73>1JKamS{gz`S~C7Ye<(h8Y}h(L5Vd))u)6_>;c50VRty4{^9{mjFRcpW ze{lZNRAVHgZ;Ug!slMSMSW{z+5uE_{D($IA^ijq!3943r%n*u68?(aSH`sCFO82d% zi-bLr#KZ{^*>>=u(Nvi)z`^IhU?v`ocUUklS(d8`%aQ+HF$nC=ilLk)3~L%sgs&iQ zag?3(PavR!b(zD9%}Ogw7^f5+BL<_=^SoVZ-NkhNE*R?QvYxEsttXy#QA{>yCzQ;8 zCgu7#@%Lgpdm)%4^=CJ&GxzuKuRlcWLHja>SiN5REAWYLd;6V|TmI2o_9dVWmEQjo z!D;A8ZT~^6JWva%?0?HUD%e!Lx3y)!R;M}R zD?8dO>uL?11MNl>8RtA)z5Ba;3xXkcz+mI_!Cg}6=s`&v@9O!A{Rd0-cV~>mdfL;s zj^~pNAHny@>-pmH&(ITgF79_Y3V%3EI>t|=ba_huf6F$-1BLl@nlhNR^e#{J0@97n z$oqQbcsyInh9uz1bq!nS6QpH^y?UC)$7|e>?4zg|5B>MO)UEO~puC49^;{ z$nyyQs{fCPFJ-*PabVF6L~!5zi6rrr6W6LOhP;)0#!5!`*fFri?YhgKcJIM zC}>Hwg?HXwiyFy^(de6138pw~SIQsBD^bxYO8#Q1x?Q49FG{DX_`EyXc0u5&{&}I9fz~p6dV{AZ5`$&n73{OrFc zI|`kO3Gc~d_Ok@>j?V%V9=*wO0psfrGo-m7&a+Zca1hsG2VUbbutz$BbX>UZ$?piJ zJ&L%mQbiiVPQM}1r2@O=o9JK}A}ohRyHv6!ZT5 z_&hF8GYN&mKK9=8j)qVVV2Pgn##*)Gm%VH#^Sb8D4B85iK99Vhv=@KrUf>RA>!pK) zuZ`u3Jc*$wT=e$qF1ozeR{Gscj(+?0++S)kecgD`ef!7$grY8B?8i=`zyH7b5vkrg z-HS*kCe=`nD#~Q+FTCf_cj16=*+e}dYfM*AKJIQ1fpinaA8cNuPd-6}tN67)2 z8oija#2IaPN>ZL`@I#TArO6P)WZh8=F&(cspa>HGgW`ich5|nDL~NuHiD_xczwz10 z{HD*0Gi~?fb(MYdtmkKXQU1v9u6wXaxOMB8DqP!9=Vc%!=6-CxH!O1oe!_chZ-zP- z>4of<4*b%SLWvN_$;s|Ik-_wh50x=|YW83RX+@3p`4RnG=AdpmyQ(yA6MBWxGOCMa zaHZX<*^+ZDr4LY*5n5PApObO}L1Gi_hkE;{n|r`d_x1}^2)P2+Cadqp6yfhhG~kT! z%psSj&9iEbeg?M8qG6cJp1n@)DC(2}5yc}E&1C)gub^RGt>D61XNyq=Q&7rp1_8Wq z4GY+F9y;9DNodrtQR+UBR`vCWMNuhG9lqT#xB{|TDg?NT6>RzqRfeo{F97k|6w&SD*$M$z7Zr@+tBr;b#~y!_IT&%bVQ2DtEb#ByoFS+sZ=uKFoKlBW}*`+hNXl9 zS+#s7iW;GLXD|j7nF{T2AJiH*TCUajI^ec$Zhidj@k(th4Ek^Vp{=3{60fF7c;#Kh zHmzqmiM8l&mL!Haf^<&Pw0_{x&q8@1@)K1kxy+e%LzUQN-H2)aI% z8l|M~Hs}xLV4VXdNIDZ%vGv}Vtha)EH{tKb`eLDm>{zi*{>FRbhGMw92QU3_3a%%{Nh!PP=&S*o_if@zmIPGbE~VWF|Pe{ zm!5X@kCHmSL!omiK%h_`u*pF$e<*VbN7r_KlkUCEf=aNY|1G<@g2WPcX^k)p3K%q5 zHbAI7Q)*els$I67?Ew($rmcgtgpI*#&s_9c0qi%W4lslO3@cXwSg?$^sJ-rH1svDA znEVmo6<7LZN%r$mV~-6Wr^UIzC9=`MHh-mve!wOgXsAQHs{rTo5tGX&Ytc*lxh@3) z=@Lo!(N!*Eu}#K;6L7>qID$-!IRT^~N8y{17z|i?f5Dyre)0HY?C!s>rZc))Eysep z`t87kb0Ak?-GJZqX5goKy@=t;8esGcHn%UGP2g0UtR=Mrh8!`?Zy+C-U&rs8NAp>jXa8CCoaOtUv}4=-k5^(~ zU=%=Lt=Nv9^54=cqNtgz^_(u!s>4|l-3^0imE8I?LKy=LjG&!j1hTC(jd zrxrnct|D+7k=tvpUG652>>`W8GN2zXd~I40Np6P{?K{(}20}Dg2PPnWlBeV$$vg^? z)_{C182p}&WVW{H`~1L9e(^wM#cyiivUS*IRop}rac7z8BR-`&gj6MAYTF>|To}{nZICB1xJch6vgwRbwAZjRGz=Qods-}gv4P{ymHLLTDUYyF-D@-M-A6^I$ay%{ z-zE^_u}6CH&iAUm_1=iCCF)d_^|s(Tz5%`6NBVAEkTB@JovVh3zqiX z+rz z@dfyA)OPz?Q?;ZT{kPocM=hJ*@I}?r=2$&b7yaA5#wpm_pmddjE{U27C$uQPP}=~O z{dXtq7J;DO9SUw+57WeJY>8ZJ&xBL-jhQsYhB2hkT>qRCqY6B1`&JOT?+%TR-bOSo ziP}ORVa7+XHXYl73Qj#IQvipEX|x0u79`)`AY1zj>I6j%o;XsHeju~&n&}v{xKEV{ zi+Er8T0`G39O8cM8Q(MOcH7?v8Id47>Z*0Y0On$ht=d8#z2Y_7&l#k)bY5%nD5K6-n}XWyUf9J!=uzd9boOwlvy!viF}hfEn*;_ZL4-i)|5hU>H6H7Js&*1 zx2ErF{3nu+KKuucyNhSBCMmaz6jtu5N5<_>l>UnNA9O@cd2U+-hyk`WD`m$Gbksi6 zIRaWckK@L}t89?a0pA%6)c+W$vSBd-d3(<=Zy%!j4er*Ag@p{oQMpDj0;O>z>|`gz z873Mc*x7M)jA4c&i@Hl$x}{_!5rK@bVc^KuqGqo>9n`Y!6DDrl_m%Hmu`BZiE>&Wa z5l-nvAdy|s>wU!4HP%)SlQZj$9rssP=Wn}#f`l088FGVVQHv*E>`+czu{?Flh!uwD z(K=i<5NDYwc$un!jVAJm0rrd8uW~$kOir-8P)V9Y`**4+sqrCfS@OnW?x;1L#D9L2 zeTIWni!M=lHsSyP``c)nNmc_rXHF2E8&@*dKuVE#f?=OdmYb7u;ZOEknh-Nh%$><$ zrOL;X_7eftz9Q#|BMy|8Un15lzX|77blH{O(0TK2Cq}?9M^f0q+10_?bf|a9hS0W`)W7k(U=9N{wwwV6m(g5a5S9VZ9ak|X z21y^!t#-cI_)-R!ax)2T#zh&|=l{7X&O3hZxWD|e#73rzl{IL%QK@Q}n0m$kG2Fvn zZ;k7}5nu2A%b@%Q9o+o7{K~DQnO%u`%2hbN|Hk+cPzcDsz`+1%VK~G0>3}k>0!^_2 z{XFtn0UZu~(ozTHZJucV4fs=wFgc}>;B8}As&z#_M?h0%Jm>Emt>H6cGP8v}!PB(y zjqV>)7|hYB$dB#(R}u|4#8VB7AvtDIxvp)_aCA1^gPAnHB+J`Vj3K5l2chY;bvl>B z5E$=#i`3#c)bvO?vZwKvP5sPMjXVj-9fPVP2^;sG*KIIzYQQM%MCD+ zl`Bb_n+!n>9f`1NEL(-C_XR$>)N4=3bW~UF!^srgQ5K?3>kX2YMd8qAJr&g=9;ADX zq#|iMw*rHApi4ayu&&LoO;*~T+VLtZK=7351&D=xL2z^urfTvxHg)t*vrAgk5~3^z zjIC^Is_^T$ZF2mmB?(~xw)}3#o5eIO3_d6+tZe7Oolz2@*DjF3lXvl@^N+==u9xjt zo{kQ8+0enTCxa`=qWT)5TtSoWj$otuEa9KbbSW=tI1-lwSQ<)$uBiUMIXwoNG0m$R z)9G~Bx16!Ytu>u5FLl*^ncwa#`EkBMd-VpbvKXb=uvBr?Z-E{*KLUps;;6R9T;cF< zf9O}opx{tfC!7X`q0?zbmPxPP6U-w|v%Mai!l{^oO_7#!)9zNgom2Z;)^&H*krfjy zu{*D^5n~0rymF++w4T)#>TOX7gC>r1Mxw9k@d8_>?0XxVxS|Qr!Iab!XH|w6PZUTG zlqu}&C25}*dlCbwjy)(Vt1DqA3 zWOPMUw5x}`ly#dgZEvcBfe=j`cn`~tm`3_Am3Xzz{rM&YCBe}<)@}A-P(8J}Ml=v`8UccQHi4=um@M>vC1>2k zTY$WRde&hMUW0SHwFh;`OnbMT8@vxZzOC#g-CA1mo+H#G9gW`RJm)J971+LjZ6g7j zzBO#cO7K3d>Nf(Rv2gyii3LrF0kFGcG}ym}T&z4|>Cb;1Rbuhm4(j{~#1W^S9l~lg z;a_+-2tkW&<+YFc9k|T}Iz*mD-GHpJO)bMQjKZ^5*8NNguL%=tK}gf1n|y^vRPjvR ziv#{Ky$vu}_$~uXfYfS=yPyj#^x)|YQ@acz3nH~pTy zMx_jK(RO*LjzDhA-SPsuH|1=$XrR=dI6aLh3?b%MEUmoo+qGxcRIC99=U*4oB!sU^ zic{rsuNjrj_1F0Pi}&AfJm$IU-`;2>0can+xzi8dJ_!m$--xm=a09tjG3`YhY5$HB zcXSpqWxN*2y4P6{?=rj@@_Qyh)7TQ`FmmG(FXB(vK=P}#BMxN8?f(3WYj3*(ocW#D z2>7#IKlpAl^nQTM9u`cu>HV$lb;FF;?>n>oJ%B~>B1T=sg3px>kiQg(--M{4E4^|C zyYO|tI7I@cXDmP99WOA@qv_cjX{jP z2KvqRX8@WUCA(-c>t$v$m#U)vT(ZBXI6W3dK!kN85QS_ekO*iU&YZ#E@s}OQd@c8d z4*o;gRZ8jVwyoM9aXrJwYx~~BVwy5kS*z8Tg*e=}2A;=IH9p*QbES2o7LRr3=&t(% zBE4|aF@!qPnY>k!LF)Ug1?ji1ywEg20!6BgiQInxK#^$KxDvd8rY_1W=vZ(e> zAT}4+-u;3S{B%`#VPY>&$wf)E6+yUdU-7cg(#7uS_i%McUvEH*!`jsHhBeL zi|rj;mrT{^xWEE~%RQt$yD}uv4N7$VoZFoxuFZ{ z)9cgYQ8Wk_q-RVN;g&2@8>vZ%QrMmZ2KKb6Y&)hW0x@v|q4*do@mrpOxOro`f||6a z5kqI~dPwUb;Zep$p~~#)s8Y)<)g|B2zCV1!eO!Bx=ogOIA7kX!1EDwyK1D;$QxNXi zMLP-0T^k&P)5IzuN&_Cea?}8~)(d%|Bj+ONKe5)8n(aMkA_){N=V|Fh2MugmW?`g+ zMw`r!gefaP7{Ifuet9jFG7S8Y+xw66RjnyGst#;L-G$T^=$c4$AQobFM4}*jkMhPwui=o1z z;BmuW+YWf}V$Do=+`brDd+tOZYrXOa?kBpQHjkIx$f z+9m><_KPvs4GNoOvs}RtTF}aqpfs=V;-jb>u7pP1jo3GD7zU5On=lU_W_xc~629#P z{60+g1>WvG1F zkE0v|PB@^8TJY@9I zUhVtV)!>b@3_6~s176D`Oq*XK2QO|{$j*)x76r}r~wv*@Uy-dv^G`-yP;q0K|B0C$Cb zvttQ!N7(%$0lS65DI11J1=U1;NC5z3qE06JCa++K+8UnT0B!$ku@20FQ-b87#* zk4~GqO4A=>)K!6yxjGF5u_$)4RupG_@t2xlUla;F>&?julIyPZZtkMB8Lq)zlb3G! z?M{dLA>~vJ?{wk1cci;m*A814A6d;uRvhe#RdF_}1M74#EBe}-=07Fi{xQn-@-{X5 zeQRWz^Zh>gGlln~>xBUC%WTSq{|%>_)wc&_G6XDcPB*E8gC>a@@1+pUA&wzb}=~j);e7JFg1ACy62+Myexpk{W#1gG^GC{Y;6J zC%=uJOL8>yOlMSuMMbobQGnxy*p7_12V!~NTgsXJt6Y!6DrU<2wg(ve^Pjq(8Lag| zw^RfPxi=yG?h3}9)pWL+vL}a}uCy*wSXdL54~U!ssG>NU^cc73^+kwppcY47(#%hx z-fRat_5B6%jCG)*4_y2~I1$k3=eIzrcOqbJ@e{ah4y6L9I6d)0X)M68cz$i7q~Ouj z;OEc%JQ-fQOaBe0G_8%PLmv*tb*bLUgwVCUAfSYD_i zeJQ6nqv-QtLcRUNA-*@6@Sk0vRdE#f7Z-kv3UC6Y19pLL4ko9$PzUk1Y1BpdU2}TS zpiXWnt~gQT7@bJ*Yafm7!Vb`%0?_(J0Eq^&F89`ZT?}w2r%A?_F0dv`9bLqR!$yMl z7FkuV1(Ivag8d$OO=#Y)x=`>b$Hd12yGjgwa?Z}`2gj(ezUAGXFT;z>>TH4@y|=gU z(oC7k%1s&>ef!!P`sym18XMn682j9sPojNInet>mpVm&@$xh!HAfW`%=Ag)ilCq$P zq?xwW&d9o`C#8T28{FA05tgWcI~mR5!=X}-#!uRb#RosQQ%0Y_3VA~mMZ~ch#24$n zmDv|UKtd_32s-?*>S$3*O4|IV<}}aQW|)vYrLWaSABS?o+<8A`4DI1uKGpJFJm_O5lhlMq3b3 zmUdip3?f18pKBVPiMB^tTS0q+b1e2-6JS8C zMo{$A@dj1?A2uSn5H!dio(qJ1#q9PCTJfYJ=W8{7c?pLt*y!^LS76kJ#os_-_73xx z!-ed$IbILAq|JtU&Xn}cD!~=5<~d z=3{4*Udjnb+~T$YsSMywY$_RK^KH~HNY|k*?k@K?DE>oK-!{Es#rgROEWH4o0lbLgiwwTYbPDbKchXUcN8Cqc zma>X`8T!S%eah>Rj9u^aRT^*)_gCJS z%V{Nd3Dp=i{OcAi%sO{&tm~ricAzGL?drh=A+~vVcRmfgp4Q=L7xH2FB%Pmg5z2GHP^woIe{c-lF*fcwdq=RhPZ=RO-a`q*KMiXD_N@ z=N&8m&mnSi82SFyIkG(1K#pDnAPE2}5GNA2iP)(lczroctCrxjHZtzE04_T!w7y3C zp--7^@L88|jv=&WlT2{}i6bqzS~;lf;ofZc*v00#^vWGjG*p2#;$D2xNlFK4PlN#Z zIiYZwixznwd;t$jgTb^iQay%LzKB3L$Cx=Xj$5cxdPRv!riOk>QAD>d?S?oWRX{(Z zm1JO`=Qk8Ah9^nWyGb!pY^jm(1v~XTReR(Jxfad{l!&czJ!psSX`K5R+S=jM|UxS_8R1BW@tQ-W~dO))$jB42payGN?M>Wh{eaA2PwR zH_Co!R{t>>B1~H3nh^mJlsI?r?W$l1!dsnu2j52dx?6&W5pxB$D{{THCNQz|%12JF znH~bwLw7VxU~9wMd+p8gW?H`4uGYVjbzf|vd*?GhRFlL6Tt*h;qG8=`5S8<(;v(B@ zLfh`1F&UkPUC>J=60KyIu2bZWplw=DYidm955+pisG^#0x;lx8)$&1_qG0U(f@+09 zaI+WqY1Nt5UqZF6gboQ?D*D#tjvGfLwtR{q2G)Y}i+`xq5>9_9)509znISYo@aIfd z2miPf=cos=VpPs**cj2E5UO9~2Dlj6IVTx`rrr*P5>23e$6lzPck3W2oeuv(ira)a zh4;1%q4x5!zOhW*OL)UXh8}FLKxlw)%LNH(BqrYY9-UY4w%3OA_Lh*R1&E=Hh*)mo zFSR+~!TP`SnZWi-u+{OdIJb@rFlr*gzhp#OMZwwR;&2DUb6EqCKu1+JvxD`OK)_Mr@>Wacb z@EaYKsbnMkPjzeC}J(`i`cJD-dFiXuj`@%Z?ZPut6#-vU&~OPgSxH}AYXjz6PgIspdrcT79*xG)drHyGz{}2 zl(G|KKmv)Ihl0445^qNd3;0x2Nx=zxJf#i8_DM?5D>09c=0=3n@{O5b3g*!ChhS+K z*V+t!7ib7VC?YnmRi#deKvtINp^5Q#X{%lG45kIR3>ENNgIkIx-NrEcWcArplMv+Y}BCP^(O(*lrYG8V|3!qZ2pEpApJ0NluuOyq}~k z7j7b&3O5oamwGLET;jehAvaUv3#b5=lgwukn;!=^cq||kEO6 z1UKYgD=+nQZk-L!g&O(Bj1fWt|1-!PUH} zQ0x`&f}$bT=h!E0TX|?R6f)_#UA1QH9_zfQLuN?9LbsfNVt+8{tQBb2pCpxcwqm5# zXUBzcgGw61z-6;VVpR6$GY4d>V_yB%7>y|U_vsek?9{ z%jvF`l*gCy(u4y|sIyvWU{*XM#>g;PU1<6;6mQ=HTrE>7zV!-7{Cq9b4>%oV#a-d4 z%24Dv`q254%Yl9muqjOk(22^#?ooUD_AAseFD%;9rI`0Bu(Ix9mjv$}T^Miobq%w* z;mGO7R3*M#Uyjfbla0p2t5@)e)&zCeCwLX+dgT^)4A?8oeel!jUUhQAhl4{~CBc|R zp#~=190>OaFZBko!{*lUG__Up9fpoTFRVl3)~y)r61bXfbJ+~<9aii@_jR_FF$!Cp z?N^0e4v=$soY7jI_Zh0}Jt}XBi44VIVJM}Xgj8v!kg?Ik9hVCO z_wTV|*v;-ZC^0Pn4dAzk`lFCDt{R@c%G__UY9#$o>HGkbyn7evNaGq8W<&8yD`|Y8 zWib8<@L*OgVHG@vdUWzf!2*|a^(PG}@j@9L37sJYRo;C@dz0(4qzVBtCh=~#9%Fx4 zdPNUULSoalb9_sP*X2loP1>lAeZHE4o_TGL)p@_|gD+?XQ4FU{7j2Mrk)yxNQelTV z`&FP`I;tv}wL&(5V9=Y=7O7}ms^*4VH+kejDl$U2dbIV8_yJvjRFW1jX)UeGz1{`G zX<>z6rUB_7;uL(T)7e{?G-2TL+EpHq+w$J+(xtGO;=#~jI&M5yJhr5<1^Kel`Tb<| zWYTW8!u(QXE z#7_lcYLi(`w-D!9p2rcjR^lf(U8AsWvjBc0MnVlsFic(==do|IgoawPS2&cdc1&MbY)Xi9S2%R4oN5<6OUXoJ zSk2T%UR6w_0kJoUD`P~dzb|A0!pX}yQwjF!u?~0Bd|548TWo@cMW}XQgBG1Il>BHD z&Q^7|(9_*DzwJ6Ch5(jU$M<85Y9dM-ZY1>lfg0=aOaui*mag9)@QFVVy<0unApj_p zMpLzf8&xt}$c*aDq{~DKKYL6y9WR&#ETWk2E!rn&-d67S-F8;d*yr#QQx!0vDR=i6e@*Di>3Vp0o^c*9v_5{zld`Pita$i0VPC&80 zb@SlIqkru0L={@{QSJKc=k;hu-Hsm7BB(?w*DI?l&leW9Uj4YCx(mJKIuC0&KQK4- zeBrA%JBNpxKkjI=7@UrKxq>R3s(b0exnRqY>Rya2K+c%qk-)R^~Dz)FvHC zK`9S4bhQ`5j75MfWj?O@+Qqcdl*l@{jxka8`YpPR0L^L%p}!J>wm-=86t8nJZCqP| zDY#>cGp8eT%Okf%YK75CuQ``T=GN#(%gWxlk|)1SG`@i~sPB#_syCDA|$&7I)W2P?AS_+7d zf*4?;Es#}>^U6bOLV*(0wD-ZpjamQDucGlGj;le9*L`vvZ09^5Gab=eJhwXL?n?p(pD{r-Js4%Gr-lk>|vlz8ONBOsnHbFDZvT+ zYWEfF=La<)H1!Hr@|Go`s<7UCunK`I&YLikR_MZq{#QG%4|nd=5x2em$JRxhw_N0r z_J$G;aGVTB(Gub)0$@r;p_M~6_!Ezxcf}z#??m5W96TR}qpK8ew9%K3%^mUl_+j&K z2jfGG&?1sOgmeK|Fa@O-nWw2P>E+0X|z`D&_#1S`qtNRl=gmw_OV~AT<9(USPcRw z$I{+-G#rmw+pv`@v?yzdSP698?m^+OncgS+SPT%CPdpZ6PP z8WMT#BX2%g%Ztn)E9OZ{UYh`hA{gnlF>x7TLBko=C+em>9VBcAvax5EesVXAI`Jsx z03@9bS}%?U+7K~XS^}wKd*^44;gX*BF%4fjYC2Gd1YtvfHap|qFn1WchDRDz1rB5Y z8?KGM)qMR%?*e*C3s+qIaa1jwGUYuHZRTg~i7N!y^X>@(n1$$kK!^H9^qYHivmRC3 z!{P7k-`m&MzjuGX`5gfMKIn9RPvO6d;jh1+#-mbuVfUs7KwUtU;qMm!>Z131+V7Uv zbnk&~Rn@yCg1Xu7S^M|2J@~yn&VE1b4S(-+&*9M;{5SgjH0}T1?vH-YMrXgDr)R(a zIy{H}68fjS5&N z&;l6%V4k=Kx)tt4)jLZayW<(qBPwxqFQz@wU4zZ3D6%%konR&dwS|@~at2fmA6e(|)dhAH9~LcPP}NAHRK+=aEA{BRXepo#aJ@Mvw||Q+ z;#q?S20jk-J5p*a5^~prZt5dS* z#(nbML3gOWa7-B2jk-d1M#w$^CUcA+p=vuCEs4g#oP;xzk)%fb-!lIcy@Ugvz&0Wm z;j;`*P%N49ZoXnanIU75a;gSaPU#I=0YUl<3DVxHQrg>qHb!yU4GepdO<(n$ z9xb-U+=T6Sf#(ek8;>+}z)>d?H#gA$&InR+$q_h^9i@BOf!*#7*yQHt7lo_FZ@<6p zb(PcWMrF0RQi(_wfX?DYk@{ZGE8l;&@GVDl;hRHgcHx8Sz!9)f8x;xqvi10J1z!EE zAZxEw7(L!HeHeK+mj(`o(!m#Ur$_Ofu)xDw-fp)rk9l6tp*Sqst3lS;sNj7bp4C{t z@*Uqj(O9>fD=HcChtY@8x6T`SWICkm67t_4n!8-;ApVavcH86&Kge#df~eMX5nC@e zv7plnM&F8h_byNN&fdJ*vL0<7y?&GS(;gaJU)louQox*2NZDm zZz)VUe^FtNnK@uvNC84rg-6ZaNCENJS zE64aGIAXb(SC)AM-!3}{QZxJYUwRb-76${W1|*{G)V%pP*Lvt4<&Ji(8Joz`PW-Bub2Ob+2K270LL2;#*ZnZMY}lA4m{l2YDrw^195in>d`qW{R!5^oDHqFn5v;zKOSGV z#522Cck|sZEex9K)mw~oiPw8uo3E)1nxtK#>>$_V?aM2WoN}>kPES)13vZ&~I3wqJ zimpoW<1MJC81@eii8k5YVH~0Xno#A@Pz-!bjK*DL^GXh1xZ)u5Fk$reB5Y(Gf1F0$mll3!>p*%#m+pl*Ppy7_P)bYm*I% zY_~h^HU@W#9p_JLt#n??u@yqE#+JPf8eyDFR}9$4i~GP_j5zWH^f%-|W+~h~xIqKO z;!XPKTNTkf|E_ScKdUjd54K!EPaLXhaiS{Uk$F(2dOjfju+igPoaRE1kowdcE~}XY*|ca_9t2>F#itWM@DuUPhdGZTIcY zAsv+}RRB6I;H5r>-c#A%@Vz8=Yj}`zvXi;tke|5W(RTsOuCdpiyTWOL4|Vu=iVB7& z`=FC}P~$uwqk}Jj?hK&cp0SNlsu*NdUsgG?oST?JyDKsSX@(6*J3gt`J>x1%2ZJVh z2;a|iPyi^5Hb86Wd1{bI#{ZVK5a|3dUI|>+iMn1}IU96`PP`7^dHc_7>xZ8*2Ei3q zXXX=@Cmuwz3h*Jw2gzBIfjm0v$?BH(yB#oID<9Xl%K>e2$C^}mE1HqYNIjwqLvEkFxUBgI|XKu>dj)sFFJZQlA6ha$kpV}YFMqc z*3{5Or6No^x^V{#U|EE)=St#NIx{r$@Rqq*Z?}F5mL-x(8J%R04%rTWl2Mvj5Xr43_CRt@XeoFqfwrpC1SM)oP*3mG*7RljW&8U2W%uRH zO91$C(CNNR;lGRFuP<+BYYJ6{FE0SpMepS;ZB1vc8n88enYCZ0?ZM0TID2{88@}vx z&*9M;{5N`en)Y9|`=giH=r1DCc8eSyU-Et;dC@mb z)d!)O<~4vu6R`*C_CI1s)-IEULGda zOwlgY7YF!w$&wzXf<%)XCL0mc4Oc9&2#Rm5Wg$~VA1=Ws8Ek>n(u;>RpX{jHxjaDR zG41gs`)iB37=(y+tbDkoTbw?KxMT{wn+M=nfrUACP%UYmH?jLSgE2eKRiTLw@oBq5 z!MFl2xBy5URX&kPwiz*>9MkF@)+x{$ zr@jdv5R1{{RR>>c^en>XQGAAW&lFuuCF=?TGHdHNSFJx@T|Y5##-zkau)4Q~+Cxve z_t#L^z0`wJK&M*qD<=UAS}y|utK7OyWjpqCIN)}X&2+eu65escWm8HC)ORzJr!WU| zZx+}yo%z+fob=Vg`5v;@o+eOESEgdIAG?E7kWZ(UTeb?$ZN7`*Ty<{tJvaNFn|;sc ze{Q_}<&C%h<3Ijm;pf$rPc?m2_ITn3$6mMjMuldm65*9&uB~04A}|1tMvWJv(Qtiv zIfu30i}Pkr1(ZTgcDbX1N-g(tQa>-Rt*kuJma5ckX}R2DHCRCO?-Q(6wLz(CwP=+u z7&J-+b;bLg4bR;CDr?A4znqZ?+h7KRLjh!rZrIE5b$pt1H!4z0I;@8K<2dbn=UWNu zM(M3<7fh8M6XNfb1@X7b?sINPtcbs}jflTpR!SQlVxv-h zP;*dL|NTKtp|)P#*r=AxC~P{xm{VWjtze$~oeJmW%`?5@I&8Ll*8Yx6k-uHmzo(3T zml9l+j@^2F&m&h|I=o0=d#a&xd&;JEqw=5z1IcjCx6`ctK?iwjZViV`tIa0Jf*D9C zgJE8=J1P-dt9O7?t?Ry$PHS^GL>^HUtwhU=TMI=FW-a4uhX{}XxNpdPPsshwHXQ$U zIoQ;9RyT?k<@eVS+b&(YQf$udjBm1je_b(#@6r*!DZ3kN5r2<(P+8Vgm= zHUH)CW1nIfKa0`(4EFAKHE5pO$urcsoqW(v{?B9Y4y4sJjsET-99Z6?&u0P8&5IGd zYFhogtvx&g(>B`*oOv+SK3-#k&}X57f6LY0{|bZoSr-vTcYbDe%X`pMH{G81j{8z`ExFbiHfS3jZ<%lu`s5}0Xy3kLvZ&| z#d;{=cq`MvIGU8d-t+`DX}8|CLA@&KuHIm@{MqKi)%Hy--jWr8np07Xf492QTxmX8 zd9wOVbM@QhP>DbD1pW1$XY2~jkGUhw+>vJPNHce&ne0e&=Pmz9&NIb(kK1#t6V5K9 zH96zRrYh7wVnM3}F-;4Qph1>!%viPnWFx^s(*PysI~4>8`c>QX@^A^h-x0(d7kVeU9Y6fCa_%eZGozaaHd{z32h z=|9KGZYSxF(z8@lu7!uUhHRTvi|3IVe4|8}PDhzl&S#7?PMlRD-fMO}cTT7KCW@no z;{s;GGgEoV7C&KcsI9KVg)8w5D)>BYb!GO-DimCurq(EWI3fF2R@!q_qN0BUi{A65 zGo04H-DNZ63OQv#INNsa_2_&`shCT>9_9|^id1Bon%ZHkt{}9Bl2uKnjUMzn(M2I& zNDU1oyDbfTm`@s)v#1)8TV(F0`K8@7)hipVpXP@7AOGPtS|ExO*pNj@U;MFsjrVeD zv5Sog63yw-gMHss3@lad>G}CMk5aDVw-={R*E&yDJ4>BsPufdQzkdGp($}ZotSz?KIlK1JMZxucfsX zL)ECuqiozB&7xO#aWdJ_ztr8HkCBRJ9^d4$^~$|^ntst6sXwEusq#hGK76lz0BVC% ztZQ5lq94%IluKgAp|HA~hVf$wJtqbCfvZWb2ZENXGSv-Xgvm})HW;Rga_^_eeG|s-v zyM1(ysR{uniGZe3y*LR#m3NBT>Y~$js;TE*9-4LN?KE9eF4<8t?uX)2h5%ODdLlUV zxHv8`9<83B_eco)8s6%}z_kQX(HwCgcc@ExQ)aXR3ne#xmTrQlf498;H=uO4lj%sz@hNtqlk`P~|!D=3*mNt`SRNYQa z$LG}u&Kl_5hk*Q2sU9TAC?wT-kv%X+7GA;FOahUq;N_^h3B5(_?**^$B=E$_O$3f7 zs+ufDtyQ+T0893zPnmQg4^(9s3Mkjen&WX@SUfMqGTZg!ro?nzIX=#qRixP2W;{gm zTI0Iy(j>)}SQh>HZvSlHGdyafD(IW)hhh%{rla}L7EKzF$Gs7!R+IWg!s1o8Dk@3F zD-i~fNsPV{SjsiE(Aki571XVjH?m=zWWy>pw<=&k^{(J7ik~p?oCwFU9w_Um7;1&E zN3c>@4RlXU-qY(;y6l?Pe5&N*ymsas$MY$fVEg5b<;y5avc| z9P?r6-e}<#0J1ec<2$I3(yDk8;{c@wh&#=XZB)Aj;zUUM*v906E0s82h18)2{%u(Q zPBW`{7}cPsB!;wl5{8bVZZIFwFeoTF@lLEF#3cA;H|1&%zQy$4eOsLF;K+5J@uYa>j`Ea4NW zD81)fS~7cQ`)0v#XprJss&{4uYQ;xc(E#n4rc9nQfzokO3?MTLvx8CbYn<+Iw3^vq zY{mVpi==%y7>^DwVst&+0M=-XLFJ>M6C{3MBN;(}+&9m%L9YePht^MEsWd|CxJX;e zoB3*@$Qm>aH}i+A2cll0;-~*gI-)$SWD|nCc{{nPEhbupl=Aab0-?7!A+o1GxulK zt?y!?Z0hQ_APyxYv^XtVVh^!qpOD(vKVW%3I&rCVLgnIVJJq`7){TYQ&|q&FZK+My zFy@vGsRyxKw~gY0m2MZsg=>_l!e^SS5*&UQJ-}GNFk7*ih{V4WpZ9^xFeY6@PR3sVZMUXzpYQmVXfuk&-BLjQ2_!^S z;VsD80OHl=>%*N-rH^+YPU0uWp-&|`^!TnHmVOcmC1!#%t z!htJ2TKohOodXD%eVN_80i(R1q(1dIF< zi~nVh)cg-v43np-1W9+Bs@Z|v_W=XU!R9G?MqeaZLLnu|e;3ai<>o!^w>eD{5+$*E zO?2e##mh;oPY^X|f-vabQ3Pa}DfkD^Jh+@o1bW zyK^y&(MzCua$J>r)x&qMUhV#ZaX+nQ)Y`s}b`JMmzr*WQzahsmE!2d-T-4!W+3!0a zO6Z=r-5z8eu@DPvHa20R*pcQi=62xJ<w~ICNtc^}z%BTyY0KlGwQ;04|uPzdb zT!X5g!3eFYVC{@C9}-Z7pII2*miW_R>#I#{bElyRr}HU}Qyt831lrZ=ji_?Wp_!~7RFl#MkCajp9sYmyMngo6CkC%QNAGf7!v?y}2L6%_p z%3Uz4MmJ;L*+uOF(dX6<7vW!DTS(Qus{EA+RLW5bm%nuG~{K zv@otp7lyky%FY;v5NR>BoD$g(H601TlqJ2>OGm%~Pk;!A|6u|oWjrJT2J2WtXR}tm zshI;HL4(d3GESEX}#b z1=$?uO};W$9176=q=Po25r`K68zXR2TnybGig`f&rZ(g5ReS>nCmArCBfvXmh*nd7 znrd(I04NlLMW5{`qclJ=Y?j8UWx@jmt6*z7iX)z7L`w!)ko{s>osIi_Sjyainz7-l zmzeP3hS2>IDFn+$#q??JXt}7aDvBlCVo-a*J|K9EfiYKy*{`j%KOB$p^=k38e0f~x z$l(^8xGF6CE$Zp~Z&XiLTyzBS3%agug6Y@nX|wH402=n`qaB)3Q$*=(UMi}gbxIt$~el2Iw% zc@_b97bX)Ny{5HGdB2RWms31^rBXJ`(@_BYEWEb-cyt zn=<}C!?>OO$N6N6TThG z*uM1`%e8q zY8B@tt9Q~MCav8GQzsdHJu7!w!IL|1&kauykF2BHk+TExKX(W1 zaGLu!ql-bm`7=h$mvD=9Q4cT(>I2l#0C_2KQF+Nf?xH000+nGySQ{-7V67NkpjmmQ zS7uG4PKdg}ZTR{k_1C!&aYkqTcpOQNKGDI~%)5p)YSB=YH93Qd>iz#X6_tNO@jqNg zmZ$Ab6X5gK zqcx}dU4|P7V#?#4ut6ub=&-O>(c5j#yKc+T7v~6_#R|A{Cq$Pq6PbN`E>uW~(6!RcP!i_u z_=d!~k+cwT%dsfl;Z}q3N8>Rdv%@jt@%UHfYU6Qt)Dp*j9gke1pAy%r3`uGZd%M}` zL^52q`)RAl;5c%ktEU((muyJAXe|Uo43cd_h8%gwxF7BGKc?BBk5T)Y%52oR=%?LG z33zu7$Xv#<@)6o$qT_hslqT0Tvo3>Mn6Y)W?C;dPscg-X(*gfx7?+aOS>A(gNqg1V zaHXu`QF#R)(!4j(tGktt;`XKK@nZDLFTX@@6OcJzdd@IoM;w7jK$8Uwau&B)@OVj_ zIZ1%fi*q|n(_)lgV4`c3HCPbBTTmUW3JA2*4HsMHX7*rrpL492bQ+WOnb7i#4%d>7 z478(?Wp5vXWjh08k>agI`8Lw>ZPkR%w{3(=##h$Cg<>TtU!{QlOKp9o)P{6Ww36$j zJs#m!Y`{hy?Z8jQ+M75f>qe1!T$rz3Z~iENm$stHgWudPVoY@veee%x_ADxtK(6tR zY5=rajn;*G^uTM_t){cUd)43(bj?e}UGXYgN#yTRUFV4pR7PwE>Wd%?;{Osz|2?E> zgDy(#qYI{nGPg>ErSiS}W4i@X|9IGfwp)XKo5`)-rJviJ)!inwB0LnKYcHBBa;&3) z;W){2jH({1Avv852XwWJ93zTsELCc5cnWC9*Bg~sU`9AG=onB97cZzOxcHS!bhp?x z8g<{QafwBBi}ryLuLjUQOH$RvCC8V}tvxK83;vg@f=wieVep>-cP&mc&FdNvLDmlI;t3k*@z%*JY8S-KYzv# zo$yQK!n3s2Ty3r_(+RnJ1cY|Emv__tWpjA*X>F{)=lRp8_;>ZivlaUrd ztE^tQ3;1J6^f&DYlRl(zH~km;9w@c3kwmZ(NNiA#JQ*;@3`I0DeCjEOzw8U+oA!o- zOf>AUr^a2%rUs;{d(&Ll9dQD)F^V?;1?qBntb36S#^)ED(+OoM=$_So{0Fvmo^;u# zH|iz8xpkmtbV-077h8kjO_rWtjG|h*j=_;u8~E=U{riOeeM?;icM_wESYj1M;79{nTQdlhZI{Rht;(H=I*Udsq> zcjpiRzS%k0`U!q+{;>Oc_vjyu=+*AgTZH-wz-~tSn+Hd`Tkl?P9z^@^4)*sBcc2T~ z0Ojr8+ugUX4xpu-H#={Sn$R*li*|m7AJO4Yo3CHf2shus7!Ih9TYLNeIN1I1r=#d6 zimP=P{eazhy}S9t>m6?H?LVTe*PFX<8qxOVo6R40sNNpHIiO%I|j%cN)>=!R{d<=GDO-G=PYO8hZrB#A!tQ0NUNzfkOEE z-QkYu&-TvdYk+o$)$PJKxeKjUmV8W!e{7+*d7ibGS=0}n!O^oP&t4GGSzh@D{vWNa zu0L5_UwIPA(F6oe=&Jbp>fg4C(S5ef!ba{sWsSG{OohB@USW0&_Z@b$_@s%vR`{_8 z8&$*$3mAwmdIL`=MW_@A2y+dai4%8+&Z$YUMmE;|b#lm8%uz%Me|TJ7-rxRVoee;= z^0yf)vo*kM=d=-3m$yL}{?S!X)m-B?*k>5lj=t9m4H=zatJCcsQA_y@er^p+C>r-h zLjj3*I9t6=q@%6_G_5P?cXqE61GV}v2V#JnHw;d|&{N^Z%~S1{3DDM68!9w>Wf=|v zvLA=HWcDWkDqJKhfx9>Crl+M{dcFID^of5scf!yCt{&)vyufbo*Qq@b%nKxge3cCI z6Oqr@PtL?O%Nec$S$PjcdGGz<_RcFFb;WtcgO__avWv>vCYhZM+iYJvmP3;M^ zUN2pnS1kA+Uf>BZJB4rs-rG`_>fukY3zZn{bYb>=QtZNQ_L$p+mh|Cu2iVG6V}V@e zM?%S2XNU;^$mftHk6d4i+!bT5x8Lk-zk9tC?D4IUCOuXej%V%htq@%3F#wogh~<+# zXx^OC-NeWp13EC&Et&47#}hO^f-s564O$=LEVp#C$@K`1aG5H7pwUw+;j^N&_jk_q z2Q6kdTIQr8y^`|+4ycv7V~&J}qdC_nWzei{qGW*aPytXDXG%8aJm1N6jG`|k(-iL! z6t^t1t@TKoq-sm5&~Q}$K&XOFalDb9?(6s}Oxl6r+iJE-J zJNt^63}?pS*9!qR%uR#GDL2D64^|9$b^8GQecoJAA*+iqHNI}HxRXd91I$|+Z#LiV zzS=oFqGgX#YoXxIn;&+z-EwuCM}gjLMPIX~f&^(d-n9;@(Co$i^eoBs#WgZ4%B}`h z-;fQL2mb^(fjg_P*C z;nML9FBX;UgU|1_}U9fJ;ExIgYkpzy2$XUpB(dFZ+ef z0nfZG;aZbFwuhw__hb7&a5>l4>FIO}bBm3d$OXBKX&2eMnatOvP?(paLiEpT?-k8( zinmWlF>%2=JOn8d`;lA^Hpph33`S64i|}BX^l;@ErDxPh&(2`&fFwpUm5k8hsHWJ# zNA;%a^6l{A^OGPDp0|>zsJS~|D31$rwk9q62YWx7#f85d*;=R;K$ww%t8DfoWB+jn z?ycf*ZYmCYU^%M z0u!-Fbw;*#!0x!HHj?bn2T~QN`9q}87(L`{4Ew+V9FOxY|`NYpbkzxcwb5UJi^22O&El91e1K~v^t`& zY||yJ!WluOyzLZy&0HAr-DDBT*d(GAuNO;(Po7;`QDVCQP9UphMX8n!QCVv5S2id_ z(|AoztM;1rQqNm(Jo|j!T(xlb>#l`71-8KnP?a4>p`A8uw*k>K+l>*dkqi51AUiK) z@*mhXlXhg4hqw-W0RXkmAiUOp$=m`KK<=tp2d1~^Xkd0_VY$Li`PAjAK>Qn)tJ&v& z$>plMU$$KTw=UC3+Rno`AZhq(SsKMYOz@} zK7fO5d-p)ShF|I{aB!yl;oiH0t)0V4{SP<-+k@V4kb_Kcc;WQP@8#pj8}Z{wr``ey zv9}zglJ+9~O^-x=&puRLBRp8TLeX9GJA2Eq0@r`rkZ^3Kxavru$D8=O0(SY{_{URvL1 z{)-A(?7Z6ql*_i=H z5CylVD&J%!W{o)7oj1EjM>_|o$0+EiPOM?rC?X_bN2>07g`p^jqvigfJ2)RWxFI=` z|zrU%c7lil&W>0)v@Uica=bp4)@bD%our2+YZ_=^}N|J?;F0$}@r(WeOV_ zvfN{1mm3fldX${BR&P`f>F1NRY6JBv`Ns^t)_w4nAKP*sRsGk-v5S9453FhtoT|>D z8Ja;~bU1Oh(;I$L?a6#B%2U&?7}E4L9OLUsRJSS*=B{Atyj_ip#Iu%z?!HDQ(`0S`I zJ$Q2zJrF!aOS%O$Z)Sl4SEFV~otM$lU_2U*M_D3vYQiDCT(#YQj6Ub&^R>7pYa@C! z9#I6KL3SCP$JxywT6?kdDmh(RUA0a_A1-x`?B^D>6^~-S6b~Q`1bq_eUAh0ER1Mzq zn428FIKW6DabCFbX-H_Ik2d)BOKdp1xRJ@%;auzj*R&?*I2CKCb^CPWTMof3z*kvzc_- ze5&HJB{~?J(e5bci!UJ238#nx8dj{AKICq{RXv(@G6Jg=@72C0Z8Cg zJ=BQ)Rf-*!WB%|e!TLQAhUYdZ@KX<_is(EzV478Vp8Cy$FNw~I~%_~+!l zga7~=omqzlQ$B(e5(?ZtT4p6@2tX|2g&EWRCvOGnw&C+&<-nxBWoD5oTkmjyiY?}M zw&90UwfaE)bc)yzpa?b{05Q?N#V!*q{e6FT%jQ$5jmh=py{LKLe=_I)zr@Go|Igd)8JPb!Y5&LfQSD+h8m=!dUtL`_ zd7%ul^X2-S>z{M|bFP2R_0PHf`wRjo2l4IH_qUD)s5JaDga|B#{A2k-tlm`Gv@6&Q zNu0GWIBKk{hAVd}z{N5owUN^ukX8W#fNF%#v?A)y4rjfN8 z22WFA?U66j$1dTAYUc6Bq@@OWT-|y5GX*)vJoDNVNNlv>-99Gd<+qKj+Ft)a=l6%x zv@a3`C!L>$GjuI7n9!1l)A)w&*+OB+l=KgsRa)mdRO8G+vD&>(NX$9bR$&d#O6#N^ zO%D@g+lr~5e*8fSfC$Yulj{-MydPJW22dCtpNQ<~yj0#E)FIn3L_bjdyVvN5Z4*+i zy7a!=SV9BIC4SqeKAtf^B1}RR?eusSpS0FKe-N_DJp>{1!&q43Q1NT*&fO&}Q0by~f`)C88L1ck$BK-C zwzHhpPFLbGoQEvm2!+4#JeJEplcsalY*Ldqx5gw7_wC>cwONegR_U#)f%VaW==#$a`%p_ERixQmysH^qVihXRo5SG+r#Y*>u^Kz|SohRUWL zL;$jYR>jq+%MH^`qa^RANjpYM{9VldUd4zY=V;cAGRW1%4Fw~`^?F6iIQf`l0_X}8 z5h8+cC#SeV&YGIU>11%u@>ARSHYDzz38rjl(?#4c>XDGV+iU?QmLdrL1vbPjaP_6O zha29+p zSF(cIN+M*aE_c$*a*u*vm{qGrg&fgxEHDGy8(g>?ZGqH zjr~sEY%&Qt?~hyTGlCZ@DDA*Bo;XU}5PfOIz+kQt?~ z2E*5N>z-s0Izb*JadehkMU*iv7pIYRbG@*&#pW+0is`3^KW%MIymL^mt&57SU&N9I z2k3zM<(J8@)O+XF;}&kgOU&yO>l?Qxd#9GWP=rdAdGtD*qU{5_DXt_iouAh)_4B)& z+W6f2;ct$))WDwOqs^n;t=8t-?bhK>n+H4FEhvqtp<3{xb@bCt>u^)@Ij!1WkFxgA zB?gF;?Pxf0^*J7kTf1+!YzLlN^x(Hvd(a1Qx<)S<35{rLe;+q^Bci7~mPXVf&#e|b z-A=Q-5wU*{kR}rD>VHHJ>Q%2b+X-oDNfdueNHzSDHo}~DmJ8)p^q>V@1GI~oof#2~ z#A@&lZGne0?GG#{%Rg0-2O>0m;}KZR*%pEa;)Y}$ABK}(j1Dp6d!7(WX5vE)#jOZ+^>npSOH;+}AV@VQ91B-Fxr(pRdYZF z>i*XVjuWUrGyw{b2+6CvuyB#YfT6<@QrGtfr^hE}iS!fTt;~f9PKmkgf z-l|pGq%zSS|Ps22-!s&9!?A0oL#z)RXZLK@djOYU?a+j|SO|ED1`(4q|S~7roYC zu`nRdu46%jI!+X3EY{3a;E6(1Pn7NYD(#q)Lyze|YZIf158AZ>9s9-{nf`9UBBBci zD%=1rB5hyLeLE?}9?czNoFAo3Q!nS^#${aL%w#fzdf@ z0H61(klwd2dTV(2y6L_}J*D%&)p#>>bZXbo%Qviz1nPJ%Mq6*d6>42|xS=K{{?46* z8jQzWr~4J8Xh`rF#@~!KJDpq?Mt12`*?y~IBa>vc>Iw-N}{%8SFriBa*gqv+3Zcbs^xqWGc1qO?n$=#qbkMEKU=&yT4J|@dZwmWnb)%F z@qceF{eSV&zgCvMIZ;sTXVtph7dX|}rvP0BC~89$FTpYsEqGDA@Y<;@FE=06f3Gcn zcvPz$|M#O4`2OM1@qaI$)a#EvJX)?>H%lNpzF((tlZIR;Bf824@JnBd15N-P&FBy$ zJko;UlrDbr&DU#dYZfg0Y`r@;08aSzKae?O1|PWNqn*Q}XlwJ3+1;a`HjhH9;m8qQ zfWBX%5fyCFi@1GBiE!|iOK&@*YFKcX4M1ptv$OVU_m?+2^}t%#>3?*HO%pvoipB6T zUvTP|KxrKqxEtWeasXJ*yxP{_dhumK_r}SP#a~4=V3omP`!WIuKWX|x+5*%t5QAi; zfB?pzWpcy`GqQeSECvuPKnZ2j#E%@@P@;JeYb~gM4sn-H(>@wcAT&C50nr~*yE7J0 z+U7+d_>oKiq=8!p;<(P|HwTPkRcb74HKXkmHhu<;G%0B%Y+h)-cnt}#MrnVXI4_t% z!=VX=?92kz;1$uRX@=l<^#l=Svy#QJ+-a0-5P-Xxa%2X^haD*1J_7~7ljn>*n0(?s z5e_Mq`%5shDB3sR#R~5(`pkUjb%3M8XP7l4ddxj8 z){@It<`^%SxvMF}Kx-h7)EiyNe&jlm%HcvL6+cf2`51U z@&(Y2{nLmKY;{~a%|cw4iR)&Obnr?CGRV?%IPJTf@JJ_In!vsZXXWs7CES<8uNkh$ zH*d1GN_}BLL~0g-QTwE1>?%LnqvZvBvnohZv0IQH>4K7Nm_1D$ui>|rt|vZAIL_%= z41BjFDW~g<5oz?1upB8PRwYCJ*U|RwfkeZxbuskZEDcpmo7d8VYQqH4nOtvxxhs3KoQ_)hF)15XVbZ?T0FvHtbi>V8oU|zPh}Y>jZBI|{7d0@q z>GptpvvSZj_<%*&P5RP8J^D6Uvt|4|!HY9(9h`gFzm3|&F|jm=sGa6Bm=}6_m3O;a zc$p;zBK121P}ZX+AEGd)+fDh=!B8RDD0t)K2bKCJBxF027lYg+DgC=DNK zRBa_TBLQ{ysA<PV{;>WyAKOPVM;z=Et* zzLn?AN8f)}gMaGZF3XeeDy(=eXSrEdVERk(<@%&R59{GOiQ+HOVw=o=rF=sM;!tKN z?kl9RK{Z%-?I51tE-V;i>&>t1UVn)5q~=b>^r;v5tnyCR1I$dXh5C0^lB-_4MQpx0 zB!O&(W-SH&9YKJ|InMI*BX)nK+?e;4(L??@nh-#c%mM?Jg7k7y4j?M^zzS{&NKVL?t5G4xyVdzn(6UaBW#MEpTv_d5XB5THHCYW*Y# zH#~tLy~lEW1ALga*5KT8d>tPK)=A(c8#tgsw6NEk-m&EQ zrhk$5TL}{?b{!i=m@g@qXYH19gnM9-x-yqIx2$!%lvUs( o?97|eoz3hy%z`_Qq z^g^in?T?0P3%^P~fBU1ZMWc%;*G>Sa)bY^WY-%2?N9>BIKyfF@&5yfRyJ#T%LJ98i z@dZ1;xKgoSmWSk(wG;W^An|Uo^zL%D5%T8p#>fw+jS`xjI8JzP_g){>Wn(Rtcd{HV znq2hSoDR7uC8oIkJ|I7mHw|yTa2eRzREE1jWpdKx;-x}E0tw)EjYliH)1KBy=f)?~ zRA;YG*l+5Zoq8mtG~lvQGT`?KdO3SZFr4Kib zt5THWwoGzAvqk^Ef(P2<_>Xzsx%}MmA6K3{TX|88|M+6=|Mew4b3duMpVZt>YVIfX zSMrlm$I>jWQk6=D0W>Wpl>p2X=(e2=ki0~P+keN@tAo)+q|zxb;4@nHDsL-3_%nOP zl79l}#oipcvEKoWjXZYs~|D55UGyM0<@T)WU*8KkM^7h;hJI}{KIX7Q06hUhZWxk{hMGVtR zsa@=N$J9X8EuhoBPYnxfM}KBU9iK#2(CFp8bqSTL%-{|qCyh&F<#4e9V*wS!cB8s1 zRt4*YGAfqsM%8_VT`={F?@YZ1CL-=F64Mjj=KTHq<9#Ob|HIDND7pUJ`u`VeFZ}qw zFP^N-^M8JkkIVn_gwG%V$jVqyLFn=b>k8jVI;JMP7fZUiSTGk0=3>EIESQT0_a_$U zqxnuLLCfO~Ft1N`BF5`Id$q8{ysQ?}-%(4-XH1jK+=xtMRp1?dv35SV(!it6#tfQwT22 z9*eWMJHF;F(BP~ecRkRN+mT?f%_!*pEnS7GRR5L4+Pqa~Q2CI#$RsuNrX|f=G**?M4TBX(T_?M$OlsNMXqf7b zYrjjwEdQH--zg3&x%y(G`$^Mr#TV5^%)JHM!C2yUbkyK{OWeW`v~z4F$l zwKfBr?9;UYOrs{K12}fIgB7(199jeP1^Z?2;8CKm9}kaRIq>LTk}Az@-$H`b0~Puv-C(J`$*h1m>sDL|2?#czNKR)K;yyyd4r9gZ>u zt!{!NIG!jpY}Q=t9NKUgfcR*cytsG-!4hKX(>ZH83zAEnlK)%5cL|0l>C#SJudFKe z8J0r_(^j;GMIbp^17c>SH%?ropcrqLw#&j2c@v}4fAygp za3i3sFwQuSu8_*XU-hb{0ePR=70|n!qvcWze;{rcWW5NFQoHmiKmi3R$cHgT*%H|VTa(DO+}r}J*Ec*CFOh~C z$A|pUhg$Q|hkE_{5BcL-<@AVID-l!2U>LQUzdCTDyl75?G zk}51>hjX0j{|B}IAD}ZL95gwmlMW+wKof&8XjSqW3p5`kF~w;k(E?RW2u05W;X}bX zO%l|QfWVp?q!WjtL;S>jG)tz_14w!y5`U_r6krChW?gAJOnWNpOAiA&<|b^VlW(Sn z3yR+Anb5&92Ow4xLYo=1uA*Q{#+ltcFWt;yw9BUr_OZ(8p>6;XYCyoDoWpQg^c2if zs7amcmj(y;=mmOrY;sBPcvMtOM=yw>Zq)RC&I<*>>w*WrMT_`Dcxo3{JF z$6^*14xqmE_%Uz;7-tm`zr|5bh|Y=mI+HB6@-EP!6BXC@6$~kf+Y(_v;aEyY1^p@w z3e6q^`+cQC*s5^|C|Rqr>l^%E=M z7Yyi{Vi=p*p-0fZt=sG$FJbUqMnZl41QR*&4_DRcM%nF9tkGKkc-^jzi77Pd+ztco z6eYs5WqK@UEJfiQor!VJ60W`B0#Qc`V@MzrjwcP?L}G#+5Xwk{k2fgRJbY6jbnD)B z@WW;UVk=9-8(`e~6$HxtK;lJNYyxW1`KYB+vZ$bcho3#vu8-G;N}ibURrs!JCE z&b-N-nmoo9i#>GZi(pT!8dH0}Y* zy?9V-f~xi6kPmWLF`0^(r^M|_s^ z{&7OCGJr<(YrRg$qx?vAZ5M^b0PdmIOn`EL&5iczW^T&C%YK(A({{PKyUKr!KgN~6 z{5r-_7_7-MrlEl_jX(bOc9(yjOkUXzAdBn(c7WpbYvz6Zv+alptI->1$pSR%&lI09 zOyu-xy?9u&)^@`x;w3~N;AbxMVoygcWO-TyKkl7&;s^#kgmE{b>-A{loMY-^;d`^w zc|$6w+OL?DQGkK8Bk5%LHzBWDpRn@%`SP=LDE#x;Mu}?RXlZ>+GL$df3%i4{=zfro zRRJqS5Eyh-@;H2`qQ*mEffV(}z0)L94D2u+CcbRf5;2oywPSq}S5IKrO9H1_7F^5z z4K<$!~z*1?wnO)Rv5$gU1FD>j@$kWwk#E2 zsROX~w7ZdD+hOUdTk7YXgTvjux4Q5~359ZN86$pGYqBYSh}*{wwXU*w*u3!iWGidf zQGg3R*eYmzN~AEWNL@bLVs`V>m9n%SDDIg?d%=vj@%OsOdrlDg9@yg{oGM5n4@pP11>CMwX&_Lh-&=>Zx*iyECaoFSN`^apZ5mZ&ipt=*&y64-*abKvi8z6fFHgQ?{tjRydSFYH~i5gEO}B5>-M zK!6o)RRm8K${M3uNT$=A`l#R80zFo=x;sdlw2G#F)b(CwssV86{HgRA74uIPBluU!uGl=V;9dc+8U_W}zHp zxs#=}r%Zw6%j2lJ>?9@SobW$urk*m@!g9cWn0s#z=*QNcM5*oQypw2G@dv$pv$y^3 z^^SfH$V{*QsLmvGN!yJ`eZ8y5HQj;ZqjOC$E2Rn#uphN#43jtjGGuLIrX#9tT zK_~~~H#^LTf;bZ!)*DL@#7cIEwf09_3RJUK8ea{k(HWa9<(cW8K_s*m&kW0_utsK< z32}gPkx)KU^M8Ev?6c?p06x2z|7|Y+eUVQw|Hn7aX3YQb&9h?uk8hsM^MA}G#JPkx zmk{R?;%CeMK}YhxA^*oW&nW)~{JGcsA8=+6y`%c~odX0qYdFk_IY4k&iV4134iFmx z6v3tgAU-Vz$T!a>*O&M693b=0OrJ9TUuC&mzK1p_$nUdfW&A&WH2?o(^*P@EQ2gI# z_t5s#&;N4%f9m`Lg+>L`r-*>l@_#>Hd*;f2$bdha%YR?u6_!e{<18@=_S zYtrRH^aFfQz$1)p@=3)8F~+ z@;J|y^V77y4CUGvc7~Xpw?9)zPNb3Z%-Olkx z^?$5TG?o7Ya`-FosK@n-CjHll~tvV03b#+ zcr)&wC!t#i9q3Np_41GHY-B&7p~l0s@C>#mYt^|<6JrYu^0N!ri(QU|rK&a$lhIG~ zc(;$ei*-PK1*i#YdHeNF3svT=-M27PyGIKuiXOFTMbbmFsBSmuwtD$FOw$KrqINSF zBVD2Ji%HVc5f&-n`wcY~QDm2#Hh9{HO%ISdFF*7vz(8JBQm}r z=w!Z?C0%4dZ#YaFMgolXDN+(s`({OrU~?oScGf{;q28*2jfXS`6v%>+jcsRy)cmuJ zM8})*I?ib-thIsgM}S#=*pi-i^Uf4$ONkP2#)Ez5j}cz zd1VBiTQ*9T#*BzeU5@JFiQ#jjIuGXsuA_6$yx%8Zq#Yz z!}zLS1BNY1@^(B#wyp;}D^DZG=A{5g4T-~~Lt!3SMzhSJf`;ejvJ0;B=~?upq-;!bLx5(U{z?u@9lZ|5#~Yo zaKJ6G?kuGqZF(os^Y(6p_CID_cI;uyzH2l`4er_~UaYmzk(FEmoK z2EGTnU0BttYcE)R;spC!j0PAp%*78~;q!Q~HSU?N;$Y9p{Dkr$rG8S3N3(Z{U+oZ= z4vJ^2YxR|~B1ufTAvB)m)^ae|q@Ep^b)rJU%1davb4hhG2DR&F@mMM?SV!?FrS*$%GDYZ}gX}Jq`k868ETYxV+W+(7~7~ z3TI*5h4flDohsripdPpy48!UyWowx-Ot0sYVXg&$A|Y_rZ`>ZS?t=&>1?#@*o6TXz ziB=7unGVwG>he}qQS4eZs=Q0`%l9PGoWhK6qK(j@XBkC3=Km)bX? zN>+*7@Z|Of7Fw3{2C(G3Q=%T4_?mV_;khEc(=L^Ms85;4l?OHZsGpGKD;mw^! zj2-;65vw0MwckjYd?jsR;JS8*YiDdaFJYJjbZIYiEebGU_ZN_+flu7pr90QzF5DAu zzf7bGx^>0Ps1K|Rj2_;tN8d$jMWm|N7-By9Hd9*%4@cuiS*5pf7=I0r%U!HB(33*gWN2r-Z3C(%u=rETqI z*+9vJu0MW7A3aLOp+tj*%pK{hL29a?h_~FXP`(PFXSoPhXXJV)q69|u&soJ1N2hW7 zlAQejcha3)vW##)I@r~i$A}j-h;hu!!2sIb0g0b>@)6Y;Ozhj~G@1G^KCjKxUmq{6 zQk?(lhYyykBc?#O#tT?@YEM(OQiZ4Vrb3smKPDr>!j((}-#J-v(kbgeiyybL!C(}* z$xKJ~yP9-N(r((XREHfQq)2`-lJAMB=83S9TsY3YyHEA_q%Vm_ue0>+@U&UE?ELY11$s(|C~>t&Ul?Y zxKx*TgwnT13ac6q>T|hawQ!J}@4@+pboaVuJ-@h2ZZLp)^QXV>{G;`HZ)@{)>&@oY zPrGk-W>v9N)vd>e4{uWRTZNx6UfsbDAGXF>28&w4DSJ`00fBh4b?MqhLx2BkoMbmY z$K7!PUB1MyIR>eP1I4WxI9r1J!D6&`i8~!$(#)z(e{)@m)h~(Jso0%@+Iv8+g<=@t z=^)NJlyW;84@Uv9AWrinP58Tv@}9%8 zW32z0R(HL{lORdV4hP_XB~j_9E#7Q4tz9nRc<*cP?o01qdA+x*{V6u_z3pHbF%5V- z&F-*`or!Q3lbRYT;riw?ZI5qH+_`dXQv`e9Uq;nDLS|-kV?#_x8>m;(`jQ!#J|C>~ z3GxGGoH8mE&bfM{;;apeK`Lwx+#cu-lJzh>z|}m$U?vORi>h;&ptrP^u)$|EIh^pyR`TI=%K8L@O6`|f(mnC1zM^kOPV+GSF)>UlErS_&m+0Ge>ytE`4UHjs_85@bm7ILW&$ zh|xLobmAUoxuOaN7HN}lx_d@hy6F+}iYD0)gAiU-(TZGl9j2Fyb8b|$(x=Qk>>uoH z?HnHN9kezNfc75kynp`m$r|3~6y9tdy*t>k%`N4S5i7;IyLm`23wxoE8b@}B&wPeE zQgsxdBo1YE!iw7fr2bbO;j(OM2?FnRZxv(gnB%I+_Z6^mUBvtFq&Fn7hNa40rwQU< zRFi}B?a}huI{7u5nz^Svi9AV5+KwLw#hzfcJP9Ez(npM{zz;~yG_Y5 z2hNs8y^X-YxJvd_GR1yEZU(<3*F=G0j~MaQF^yWFSjw&@9WfZ^DNehjAfjbvRkaE$ z6IlVnc^y9juZb!$fON#d4jc|WR_d_MNa>D797YNUtchSB%}^<-*V~^Zn<@db^?GJX z9sM4RQpB+WYicN5Z`dLnZP+y)tA&v#L&vtr`Z!C+)NSZ}UM;qQ696O|*Vap<056-T z@#tj}`fnrwOf!rl-1+5*G7Xzk%p(O3OjGJu*FK4K1!vJ)yXQULvhL=!Dt4ivVK!{& zg#$N)?%5jQMONO>v`KYZaN;UK_wUwE2a5DhI-+_*E*mO)C6qVNj6;WdLfoh<-d$~J zjv|FlXv^7OV1L_wDzc;Pa0LRzDg_$}8l!#07CLb>9H7MqQj5Hs4u?Pp;mZIO;VM>} zCJm1|7}o7kbQN=Zz4$UwwsD;VFNm=tVp9z@Gic84;^~b55@r(m@jj_$Xg$M8TvL{@ z4E0486v=_<3<{nC1@*s5A>F?rBrzF)a%f}TzD!1vKpDiP4j`|65%hu?QLi2 z$HZv|ZT!$Kok_)_)OeqyV#1&Hh8WYW4JXpsm~H*f(A)s?E2EH!76?|z2$~@nA}0S6 z>O=F};%IEg>e!)ucI*T*6Y3#FY^{?T37mVXhH zL|Npc-cnVJO1HnWWiMnYJh91&)T_+cux`g{-XtD^GW%#j!|qRwB4H+dyhTA5eDH?2 zAt8mvUN(1r*?HOgVe{x^bL-8v<$ksI!{3}_RbD14c-kkf{q~kRSaFTHb3vc^0vXrpW8vA0~|oUKdBzO;4T9dN5oF^x7Lsv(xQ{`mm!T zw8Q#;V8BKvfgTkKOCxsS{lopu_a=cG7PbrmtxkHy@ci~l{Iv7>wOhgvep3whCFWGK z@}{{aJ-Q%PMo)P`RJ>4v$)UZe&CeOxTu?aFn_B7gr}QD5>i>5!9AAH81i)$WAD%of`u{z7G57!b5}(re4><2LdIA2l zzx3{xxfkHv3vliQIQIgadjWoKFF-kwKY#oO9sYp`{OW4Dt|LAmQ-z@(J}Ke@0C@(r z|CD^u|CY~G{;v~fKoCEP1DqEB`Ni|+e*CZ1XRCAm?@N42`9Gfb89Bf=aXZ>OjOI+> zoC%yWfpaEs&IEpTCQu*Cw`K!PV2Il?gaOSYsU>f;Y7;gh(1Tvw#wK#Q@64l^+?e>j z1NKgqrJW?&91golHIF{>Rde3JyUa_zD#c5cvr(dqFz`yYKe&pz32yKK)WglqS14~( zQ&8p=0vHVjU1flC6Q!fC%neR47Fd;{ei8G-A-~K|3?{aYHt1#X!tyH)45rnFi_zf- zuP&17Q5LI9f5w2L>1Qx|hk!m)8L4wuEX=CQ$*8?dHLHyiflU`h`IwwC0YB&$og-@w z2oU4`qx3w<5D!V5byLBfW2*q?si*8ZC^r?4PZspG3As7I(B!S< z`!9o0E{{$V%hkvDzFvf*su@7&zbiMqX-;7ksH%JKafahTe=PFvj;fhwG0=tGgZ6!A zlL%-^IhMhIhr6Q^Q+oEny z<^PqbEdE)Z^vrU~rjBPzL1J z#d3tr7?C*z&W22x2f|q4TeO5@9x#d*oP+?j-XI3bKOAO*VTLTMI7)GV1l1qt#aniX zVxOZC62_RF)N?r7kb;09@G(uUkbkPEdKIjp1I(ZwS95`{&T%}*?Qv`9-_%w1ooZxa z6OEVsxWNWz3wX{f;eJ$i_U4kBs4-hllLwVZON1KRw7?oTNIHo0v}ZWSHQOocPJ$ zGKVq?O1@)8p8F58J?^K_E0`scwGc-XC}^Akmw-65X2WErL#YH9+M67XKyqVy#H%QO zWUmjkFc#Fph{_g*vX-YH1QK^bSOJn=aOEuktDHjbuKH%uBWF;Rh^?67>@*#bWtQ%W z^1f@ra467)_Kc$pe1NP}VbK~Lp70JjL2|IVp?hJ>iodkf8O_Dcx!5@uJLh8OJ&Bzb zH+bs=MCkAwkMu+MQw&lVj7sQ0A%pDPeRhbe*nNb_k|dUu^t~k}b2Z=fgWdo1iNOcI zI_S_}eC!`b;`VVw=D*^}@eysGRoJRtJS1tipXklx;_(rtSy!@B4#)o?>{7H7M25t1 zG_iOLW6DS2vieoF&}|bDK1Gs zyGCn}AKeT?l}K+;b;LJ^X~8n>n{%LXZn9hL^er!qX=m31!2yM;X$I{CLkTHJRAMR| z3Z*_75Eq#W>0-Dvp3)_}wwn$po;I?0mVvQ$XfhxvW5MzRPNZa_?Ys!jk zDPma!G+(1qw#{kAfRNQRu=<4M5%hy$B@CODcappfJ%{O`Wob|D?0zfOFpfPOR8N{M zvRiMpiY_L>-r)y;NvFDlSa(W~xKS8F5jKnlZ==HSaV`Kt_@zRTR{Pj5$|BFsG%Pv& z{*?~-+k`?12Gr99oBCUl)n-KZ@ybb5L<@d2b_re>#eoPpGmIQR% z4+E~%UkR*uPytk3zKp)rIP{VSvA|8?e z3{*$a0@NUoA$!2p)KD&|ZFj~>aEmZn-D0Z?;+n0-bqJ7M$LxR?H4j~>{e2f|Aw!zv z=ohL2S-gU*{}6&%}>~T}EhgwJ%}>BR(?{ zS0JiB@Qlct3q>^WD;jBa>Hk*MazeF1e5zAiPpS7YjjO6&j5Dn}q`y17QEz`ruY31p8KSnX2?l1z*E8oSK{0&hUIZa3^FzBVSd) zhwX7U77dN^Dl#yP_fr9o__@mt(dJ@N+V^0PXxBtl4r?SY;#`*ddD`Z1x>-6TmDPh< z^q@|RoCibJi71*>FfrO6z_H~RIx9=CSMgPhMkj{@<*;R7kQOsDJZBbPxsd3K#FiPO z6PO1;hU#qXZhV6w!#Kugnz5H!GsJ_dC1A%0#jrCXFsf*wE{+BLsNF?L6t7jKpatce z9mfXBrIgvy#6}f`FS|-4s6A=0V2N2UP}!;H(Da{|EZ&aMsQI8$BWc*Od-L$!4~Ivs ztvB2LGOpWU?h4FLFTN(^Z=eHgO}7LT$u&1DMh#?F*B?L<&FNpjd)!$7@U8eY>HW3d&re`F@W=KO8G61hSrB6#*R|XPt;+#Kq*@h)E+1h)zOh3OOYUlqx#p^)I6y%x`?D z?t^rcJH1CNC^^vfHakV1#@ShJ7R{nNV!lsEqGlHw$j4`R3MhW*6nKTRbfiv_{fjt@ zUdKZV0CuZ8XwzSe8OSuGn_jXnA>U8q0dIp(&nyUi**SCxWqMgV>yX=Q9ae-Di8`l= zs3J6UuuZN<8%NpLVm*O!0-)BY=eg&i-;lhc=+?z>*U(noiErvSSRo>v2Poa;7ge9E zqsj^9)+Q*9?T4EWcG zGWY{u>Rto8{b-9+K^zQGd}VPd&?$;*FcA{va!&>X3vp_PKm`-Jk+NZ{(?B7^qq;rM zD;cC8PACqLCaFvoqixj7QD3nVP82tT2$wK|x#f%tEnR6%o`FJf@5MXc1d|{rIYkeq z$R%!c|GFO-+=Q(pazPO(1LNc|>F2^~S%kY*V3`VT-dxGWg$9Hkdisros;0A`7Y}Rp zDM0Zg(aW%n9>y@r?w;LtP!^A zXNj_8%@Q2V2xI~zwEc+tVc(|b&hA?@9A8~x8E5L^5}vSKYpysEzb>2NMm)YHqkUpd zN>aL9T_nUmlgSlbP8k4%pJkgrU|4QU38SP&exuv9*@QD4YoY^^Zijdja`(PUEWQId zdm0L$#cn>%h}5%ni<&{bGTpmlC8SyBig&{fi!;U;dD?BBsG(wUu3|V>#(hjyWNk{c zGaj+2!C4o`2ugu0q^PUAO<6s}NWl`fYFmnYn0}lj1cqHE{_C6(ukNN(D1$4_0 zFwQSgKHX*N)l2a14#jr5AJ@FxDM#(MDq}2xrLzpyXQxX9AEy`j)xl|=@Vz0A#M~Oh z_SlDGSgJSFWucsy(F3M~7-Qx;fjSM(#$90-tIpiRsz-YQ%(7u`I%Df?Q1_h)W|H8;YV8)1ERBP>3WZ(E3 z7eRVv;SWi^yw@pB<>IBAcZWlcPk4?O1xP5a=uC(O2-|dDcOU7y6w2qJGNaO#B2A|F z2T`gIQC>g?_c>Z1<#&)r60_3H4SI%eiotI2A?r6NChgW1g-hJ1jCDa|j;v9kvF60g!zpFFES+k@af*7*>;;V8%i6M=1*B3Z-OrhPuc~SrPemye?mw8OjUSwd!)U?$$Yl(OlXR zNKLBHf|XUyvQ=N#eZ<_OjKd<&K^$+G^S=zA+RhuRf)d^*eqnCd3roXbSi zbInCd#sx~T;Zk2k+FiGT1(O#XGjlVuR~Hrhn^N-AjhQ0?5vy=EG>K z`fah^bP^n6c}a0>lD5;y>1J&QilEZ8^IMa1ysmZDROmmWmFu&!qGt@g`yJkWy|Umx z{_eZzYd>lHN5FGg57hgKnc^3nqIGF7S7u{lOuYj0PbtV7Y->h|ir_b%DGWk8SHtm$ zwtsOt8m65ZSz;PoNQygaNY-dzt}gXRIMx<`;E~WfjXpACOf5@p;0G?!ZYPpmU@Vkm z;d;sC3&!b~!~?oOs4U=z;gBi1FR+divVQ%Gi?@H}e z&$6fSAfoz*mg}% zK#yjpZiaU?^-ju{F2!KOlv$0ax5M|gMEf?Wi^sHqn3C|uwCjDKsgh!0!I0He_H z8~MTO_v!*I5|OmjtfFkYs|s6QSp^zWOB;hmf?fuE;x_qD?Ks>^7~3)Z=Mn~3;-);M zIx4!?0`D!^S-2QoAJ&Sly6nONqsi*%WUvx;)OGN(S}4MC3#2=26z^WN7GViV!*)x? z2IHS)Y)BqIX>5x~XIlcQ)sl_@kEUmIV-;y_*g@J^Bq7ZBE(Oz@&1$kH0iq)FIOItk z2%%HXBUxz)A4;_rv5SoeJ#Xom7k?3u#zipL1bu1l>;e~EmXXn%&2(4&L{8_D4Tc*O zSHEUPoBMRbZBfU`u>=_Eh5(;d6dK-;4L`Xka-5cLj~ss3M>_s$_sx}Y0w#-_DZ$qw z6KIgQC5p{uv`i2<0cAG$n3C;$OevkTE0xN+rxL@--dq;oxSW!>XU0TtzfvU646K&D z>g-{suIO)Fj1tnJZH0h^c95iA(cfYrcN5G6lR?5OF07wlspnVb`J#AS5_&#`CA8Tm zb~cgbBZujcXJ2hdFAHpMrZ8V30zx^CD1v^?^chDL$uf%+6*h@2YjZW?6z_;!0wU9lByszR*ATQfDo=dw$+OTXxLi8u-lxLv-dvI^o_l|}e2ppg z>)p5SeldpF>hJ24)s+`(vsq*>VubQ_?Ix>zVRMO)o~0b(yF0idnwN_6zv*efoO^Zc zYv_Q=GEe^gCWP#G$Vo{KaBI5$xuP{Eu&vgCNk#HNetco`tEu)sgM96C`+u%If9BhN zKY6}3xBvMPpQ8N_O!$n}KYNGEYjZ=NxuMV8&}VMwGdJ}4+=f1U5YJ%dBlH#4^OLnw zoxmpv9(NKNPJjKgi47?8P7xiZ$qlY(5t1CH$qSusce20`mOVa6Somr0@Tj$SXiu*d zvR#+hg#B&?OY4aO)!CT^YV{`moI4HtML$#df6mG9S@S=y`2L?j-k#3+|1a_>^8Y;H zGl~Elrg1I=%!Pot5HJ@4=0d>d76SBXd}|NT+3oUg<0@Lheox}Pi;TC+Hc#T2r!maF zdR;TYQ`POaVV_+U_g(bcr9na{=J=OnrAzt0cyxj3Og>fji=S!!pHH4X_w>KdpRdj1 z|9z3qoDrHcLUTrF&ItWg7$I{k&B6h-T0jICl$LV^!otF)oiC8$pDT+QT6rvv4ze4H zp3kHMHcTe@CZKG-Kz1$^#yg?p%af#B3?ayPe3SGB>Aw>9;agxu0zRR% zZ#6f=hGEzYCjkP&h}KnT`gAj7DhEHZl+#bdk(x2dCy%6P;piXxJFUYb+(xapdo4;I zgHAU}p{euj5a#;>;O44MB~@cJG9O(D=$@Q5rrzEUe}k2dJX+*`7A5Hv_m3IqK478+ za6V$TF%!g_{;Et~IQWWbQY_IbQ3i(!2J}-Y?#X@&Q%2g!gV^Yh(xd1+?uh=Jy(t-N zu5uA%NtXyMpHLY2o}tHVBC!MbzS++P*EccNQK02GvBO6%!V0CS46S@@Q{!^@p4d~jU_=Yp3p-K?yvF{ z#2r4ipn)vG^iB!#0l6C+-LXZ-J-mTqM+G;t=xlhtIg-QK%N3NK!Kf%+CG;KVxM6Jp z`iHz!trRb5-a4f815h4Yit4^$agL^Q)Qbt$)Vdh@{ovXP7@g@dG=H`MD!)q)%CWZi^xF!GNhPDNUb5N()ig`io_%@?(YWDzCk~2V*y3-(*AHfij-+H zeui1HB-Z0tC}krh>)g-bS+_+~r= z>SgTe4UwoJ;cBqeP`x3cPHA`Jiwc6VQb7|rd8FZd)O3+^esq|{!t_cBF^AB?7??IR z%DhUF{`!Km9%Q5Q){#C5!Bh6$87JAG?bIJQwdMR+vW}EcqOuw#=olu7Y!FZf5UFc1 z?F91dJkVTmg}kRtM{u*nIDw;)o3`gPMS=~yI_fIoj{~;44s@tK0}e_MV1sdh(D{CW z$*D+hX(-oXw*iRXCioVcP;COf@nn4<+(~xN5O^d7+AZ_?jkJ0W@zDoF8 z39FGOL)%G4yvs_?hv|(RtM4aP<|cAoH(ulvmY4$)bJ-d4H&gsw3f{|^#2uu*VD*%9lIV2A~u{47*Lv7FQEu z@1$d+bbZ$B(AusIyH4iXZqnD8YtknSyw53V^KdSuLRwm);Y*$LJjq9iIhi$C;0%EJ`cUW|(FW>8c;jj2#aqeN z8B4KOYy=cMl6g)8lUiH?4AD+^Mo$#VHeAeYNOslWN^(EQ(r;)vAn0vgvi+HnC`o}$ z=xHX`NHe8muppj;X@8sqUwRwR#~AO@a$FhdCeQ%@Mo$9{;M{lFNM2QyT$Q#Cmqe!~ zNn|Z+Y75AAF@ZC4tCDaH4iQOaW(nGQO?C%^%kfb8pjdg@&M{*eGxQ_Y9}fCjua)#A z=U|65(hLb|%6FiEeFui*UJm``B+h8fV~>b3BR)tl-G=i4KWMGGGm zJQxPgknY&2%4p5Y40#I061&n1;}?An5#Rvl?P?GqGYZ(HtaFT$8u~*tQ2T@YXhY9o zXV8$^tOFxx%%rLQHS22MAg7NrBkW-Z%87@AZ$1ZA~`K6V+HB$2nH;>c!+uaP>MlOU>Ij4s1k6hLjzGzU@ebT zT1o~m==JFEMT#p#2X`qhXjlH$=ZNe8UO=J0&L(WeR!?@SA;K+qHi~JNlSb67BY!)s zH>wchLcwX#hDO&yI+(+Pzn`<5dcn4%rh>CuXHpj&SUM-DMBD)POH1tvXl{x5xXkt9 z<|ML+K~9gz(r1VeAbCMjZzcV-$U#y@2$11-Nr0|i1(9)e{lUTB-qA9AT~lO1KUu}= zX7~%#0VO?6A*{18NGWjYZw6!BZj#Rv-IsB#5NIMK8U}Am6go_rNG2^rQ$|MA&N)vb zZAB6Jax)r4J}i&;yih^Zk7(T`QY52alOUVrdoRbB%I-^PrTcL3g6#V-9gK4l1^b;C zb>imGCBrgnVsausu)lcs0PfL*%%F)#Mn`QUVn`{m@{BUy>;f6{EmrljE}oHnQ?93pz;pbmGER(xMgDE5Dp_Rsq)3-@PIM$C)N<~$+1D3kuOB5 z{d;l~N(pbl5i5B034?`ly3PbmZczS~rJ?Iva=u+`Vuu-fih?s#k*SQv*AqCMftp4! zppK}Fy&kH^dU^%m;qi(5AhZ>d&gDe{l zM>MMRyzf=8Iv=RJtqeVGrnj>#%A{n+=yGn=*ekLOIRJ?{`YhUmT5`+kdvtb?OnS?j z`*DUB(%rkX`$;o<2m`)61X21{wSG=fF~23#G-FjtmfPX0>Nkf!Z?%2~>J7w_PD^-j zs2{*tOuxWrR7lZWWSAwmO*(a_lac@xy4pj{>utt&I0D{g5FTV` zpTWcd`?hs`{U*Iu#jTy=6R&OQF=>f4g}3+YwjR?AWa1WO-^9p>4zinKrNR@x*M};v zM4Trtqf=Bl#a+CZ!AOrcfqtkvE4svy*_f?0e_)4n*|-Iwuc}{yrioB~7^mINcADL$ z4@Yr+*^Bd0lD$PUk{wbnCU+YJ$``6;`9w?vu{R`+@DrghIuGIOiV=&S3hsz*hVKpB zc5*sCzo&tw=k956M>w={lFUERW`rgZ!)XHIglkH-w!{}p98eJ0y9`4$D(fOF$vsi# zh72fAP$03(jWRR_I1|G!ssc}wJ{}4vQpVk!-g8h$khJJUbDhqNp-GPC$qeHa{2n~y)^K#yDHsKhe zzF*;c3kOz%;r;7h=Fa<2$|Q7WYq()H>&@8O5O3$G9*9_?63)_(>K2>cG?mU(;iQ>@ z_?eArAkK4}7^JwavQAjXZLnKAeYR!%?0s5QeJajJ?W`N0Z>*Y59ihTf_lE8&^n~IX zwAJ2&uDUG{=@WtJ4xM)gr*ISm76HA+${VF`!%GL@FIb9IfMEuvM*E1O!mc}g(91jk z*F3~%%T!G*=nclPl>vqF8F4$<85GVH@G^sE0oI7@>9>j^oS;otpsyu%F0>QXPMHJ> zMi>TR6;K^^Emb(Fypsq7%eCZI3sFQGq}(i}eJ-bsQk~d-?79GmgICK;ZLpA9d3-+t z1O>?>>tJw!EiGYzXuu0XZdVSI5g!G$D!#4OH96ti62=HxXiFa%8d@4N9AoUD)-ihg zek*-hB4YU+FWL;q){^%|D6=D0Qpj5LaqUpJ)(kc-0f3g-uuGYXb7=}Wt#9t=84?E6 zA1U*XE57SihW~@d|7R(f-3BItCkJ)KA9)P{c0YM|neGw-z*;0JL1(41pWR zXwa5HD_^7I<@zuk(JgPM!B#(cZ%pbk3Q;^7z%Vb8jBz7DcWE8)>X0syy*J<8@vsm% z8U=(FxutduMhrh+WqSk0097sWm{^|xLT{=tf7+AcMrsSG{fhZ>71hB(ij?Up_w=k^zKca+9rnCX_nkTtn)#-K^hwZhGqh0E^2w* zAw&7``m+->Rpapem0)~!)+1HW%KG@(x^>eRaHwE|%BIz#T#C_saYh7DlLE{99#>B; z`Q9jaXss?bztttnY*sL3XM4f$!dUrug$4riSM~B5s5&doW4uOF6&Gx1Xh%vfC~=J# zuGlRAzP%>c5`;GChFQB45am>;O0@D`JgixB)!Fkdppo%3z?1esQNIka&iYu9XMfxsp4>o=SeB&<4#+bj>&v{+BQWR6Qs|8k>^LL;)uV(;Iyl15~@5`<&m}{!jaq-T$({Kgd4+{qM8Y z7b|xBhbMIZ`(l3o`$azb#zjrI=NE14%3|iXSc=s~{n}l+VBNn!=3we3W`55+zh|D` zGtcjt{{;8U({F{X!|xWi!fja39BVLNu)}SUURTB1>JH9#MTxRSMv_3YLO035SPM!) z`vcwU_+BsvAj|en(L|f9U^R)tEh4_dLZ#V>*^p{hT&O`$@avZ98<=ENs={~Q>|08q zQLNRRey!TvP#_lcz>W^dHqpW)U7No)7OT)X?dU_c{l+NZ0Xyg(;x>P9xcBa03)ZJj z1(BnPJw_!MD(N}y>oG@f*k{D?2uj77N9eU)6y1+;mWx|G(e!S7BNnf0jE)DNWibn2 zfZlWw$W2w;kSV?f0&tAfmD~`85CF71OE9G7K>J{e-oltE8@&h%>Dmq(^Y}3NRi~M_ z`^ZvY&3tD}N-gd1r;$e0<;ZA(07Ur&ep} zexNIvL4TA1Sw%9dE?%sYnyIgdWwt+S7Z2U8O3g?9uy`*OM@Xl_n(!L)K9awr``AVchA*;y+QU>_AqruP7*COluuhS) zCEoCunOv!Lyl(Ib-|LeU`aU3U6lNyirTO6&3VAMK>I3HeJW5(nYuhmzsD`h~E+Zzo zeX51+7gksmrNJL}wp+sCR-<*rHgnIho9&AjujyKXu|SG+4Mv#eiC|BYczYnhVN9Nw>BHy; z8X`*9*OZPjA9cIBK^%?S6-M^mG3ukiqM$RN@;0@D9ltvc`!gd?wNyl2fq;F&Rtpou z&$ii0nkyUMr<0N}wf+L*3zL&`BYD_kbEO;#N{iqu86`MGi`7ugiLu>A0f1WEcC$^2 zcSaouh#vw=nZ-QtjA9Bcdev4fQz9EjMmDG)yH)JSY@UQ>%b7N^3h|RYG`cY=UR%Om zfepjr{4QXP8Mt=v%diz@*pIDNF-o_wN;7XX5*2qv@l#|a`I{$|S$IK%VH$Hq z+EkEm9~WUiE=syyGZvNdnxQ)uwiL!oMJ3RxcqF#KNaLcrM zrT5s&bYudj8d**SDx!dFY|1IY#B}&x3DAc5*rM)W#Nl&Pq#X^Z)*aZv@EISSsM&6d zg~Ot@P%y!Eniu@ipv{iETBjjqNpJ8`)ly-@C^UIH!AM{fNLH+u4T(^Zw=6{K-lOa9 zlu(iu#e7F7WWL2>qA02jo3NDfERz(uo&&9Y~cdIqKLtHI-0sMW&Ueqd%ZO1 zqm8Xo{}v1Q&w((f_p{-Qsd=7ul2&qdh8L^~Yo+uI)zwnAjHv0l`cR|GUx_hjLfuE3 zmKFYIz@N)U&4NN36B99bn+Q{Mc~Yxlmqer_MvG!Mbkbt9Wd^{Vwr@k!#f+r*FaxvzD2=$d zyX71K-8u-)V+6xK0^=;Rk+EpERxOFln`@&0ShHEKgAg(Rj0Hd*PaI2L1s^=c0F)G1 zv8~9lNd#ior<5bw62PJDG%7bN7S7&)qGhRNZC?num^O$R4@osh6|IO=4vQ+z%a$^o z?j!Wogbx0o3aT&GBD2ICy)Gp7@1As~rhDZFcn&Wrb&G(%3Q@AWH}1yCVV?T19j#c+ z@+h>G=#=Hx9GXw>6J^B$%R}*`)(u>E_gJ`dck+aQYVE6zsuR{TKmmV?)pjw6w*(1y zCDM8bIypglNR&}iwC5U0cTje61m^I0bc;G#%j1!}%PW~F|02)iVnrOo4*aZ*? zmjw?(zxU?2s;BcrpiMg!JA)R;c^G5QG1J067KNl0ryjSJOsa>s(kjNj6lY@2G>8IU z-g0Hp)tfE>D7Dc5^mIgb5GxK%iQB=0lEgu8B`u;#1pw4aRzb;3(^ao0v!HYJ;Nldb7AKtxux*)BJf_0dX5UU z9X-s^O(p=sz%S``Ex5OUlEQJd0c|v*U;(t7*eD?>;q9uhu)%9KScVtJMr_}cXnPqN z8$Dj+xYgIAEJm*tO8CXrlIW9 zZW8y$!iMq z;~PE{3MfHB>sp+nu0&T2Co6mf#nmCExzIgX?PSo)DUmcRWlT)MJGpHT4`9}mKw}W`h*ctFNUn+) zMb9APq-UHV$!fd7eBofwmCQvLc!)ccbEu^#byvGRlS(3VjBm`8St|qt_hFnR0m)B) z-=`wv_=xe)+m|eSUtJ^|(cYd$nu_y56kjBiV2frImKG#K)^iA_8N zUZpXCv$heB`^qS}%PtXoUAUT9WfkUvGurUU@GPx)5Fnm}983TPKf8wvM8F)+O|NWs~VGm`wZ@|pud?QoF7ETlnyiW-Sc^eP~Uef&t&r42hWo$2A zqFTH`(4g2R*Md}Q_7xivI{p=F;Ms~8jzUd^kw4s$+#LPhh`Mp_v=gtBxiCJtqcRGO zV$njppo>y3N>x!LKdV?xkt%L6AS0F3B1__Y(08?hY?iBRkXcoaBukQ!NazifF@}?s zq$4Zgs?nQp;#EqNCuBpLL~;myQPPtqnl+4DYrZtDZln0Jmgn@XCL(X8u+_Zz_{7~7 z-OGlH?rYCEeG1~+giSZNDphEY(p~~mNw2_!>!lk!j=~ZYQ?t+pId(O>qw{(}4S02GrhoHV;KDm(LdG#F^a` z*la>MzN`rhPng&1tL}BclUeTE0c04$oNvIuVnRnl`Xw%Icd)^kwu%`W4zYVyYEq9Q zR!+zTlWV%+wAHI1bpV;NFF}cb32Tb?B;p*aV5uNxJ&)tYP-Ns*i!N#xz-BJB)2uz_ zv`^97M{gLVDrbthVKx*erhDQ}`Z4Wb6d@(AS~-L-67CS+{q-=Zr^=jNlE!fY8Jl(P z&4vdwdB(BnnVKqh`ia>g2bI2=!XkS~t|l1aqj#E~kI6W;sQkhHiv|?Nfi}P9?n{&<)>pHY(lUq;+);}9AMSm_% zKG+=eQVo`S>?`kV)P|;dDuPX;x0=Q~wKdCR3hlf5lq%XJ6T1)spnKo~u#3|{c0Tj< zF%)g0ureBGm}tC_7Yzx4$;0H%+8WO!n(C9G1;5{b&J@^W8h8Vv7jZrxE?U29hMr)5 zFkMe+M}4-oP-yB%FhkZx=|D6#XU&!q9>V>m&M9f*Ui0VxT0SX-4sFYh*EF9TaAce& zm=6)tHCl?>s3x9>-L#*?7-A#N+7}HE0&-#Lh!TI{zDIRbmX64Xhc?*vF4_n=DhcpO zflyBI%&d-N7p+} zPn-yBMO@~V=E%Ij174|F36K?c&0SnNo`Err#gCHByjN;}yX3LV`bX~IHGdam+<+W3 zlcuCe>yTxO+3pvVHxq-1sSJ1`#CN6|@!1#U6Xn5MUegIT1tkIp#V1RRpDZxy5&D+V zwh0a&K<1qz7mjg-_G5mB7m=yTf2*>^;sU`sw%Jea z$JUkB#349n3oSVWyw+{k1b9_uQB7$two@Z_wA7Q9a8Hg6f_|&5y3_;xrG^!3DS@%0 zUwgaSnqoc#b&q(+%>M}*EPs773qIQIRin{okQDGT@6#;E3^&AhiJuk>vm`@oXCS0s zbbV~wDt`PUCD^Hu9~AI99oc^AJ&NOG-){o54h&SrkggEW8Bhb(Z&2K5&UuUF)RmCA zz@du~)|a8+iLp^cF(-jacu}cbmsf)oA#dnoB z#9RP6(O7vRPDA#(#M!U~5C{|28k2T!M!w!6VlffWA8 zTw6+p0-J|2d)LOXyT4(4I(mhUyy%q>Vgvg;vOZ;WLN!D0IfR}Gu&1ap&0U5`;bDYq z^{EEAV-He*1c!R3n@GX6N98;KcEpO?UQJGDz=GhnFr!D-gci!8Zv0gu<(9>NO|H`s zhV8kJ1fY}Szpgxe2H)NIuP;_!yqL#-{SqG?|5Z=;!oopvo|21=4ze#g_&76SBQtFr ze--PBV@3Z)u21KIU*~~e=Ye16fnWbbfnOJA2>oiGd$YuT1&~ z8k(gVf#*UdnaaXm!D5EkGK1xUrCe7rrUGOeLKp>p7h6J*dJUYe3vVAa@!xybl6q<4s_|FU26OQc#6(|8$Vr<~O{9oHya+?YdKas6ZzoaSVHJ_e5c zs&Ceo3LA*occ5d?O>s4d;h+RhKUxP#cyq_}uUnTkd z{P`cAtgfxRu=xL{l>cFMp8w&Cd^G>BCVXK5Q$llaQ8bCCl*X_RFnw68h$u2mOW^{SMHZx~fDEai*H8yG@hEYhz1MlP=`LoFW#i$f#U)h&%o@@t z%&8_Ewp+7FiAKF=TMS5{MV^;mpI3JA-iG%!)%4DXb#yI#sQa<7V2`NDmp(1idnT%A zu;G-G0yvFUI@92o;RO2SmtXLj9q499?RdFkj_XfxakGU5_yx__@TwQpDW6)TijK3X-V&dX^Bl} z3lGFwm?aH>YEj}pL8AsmM}G~3u5nJOAQstIGFL~I6*p!32h}h+#>JgZy}6(e!iEvL z>-w%;R=jiwcdlMm6?~fC^xG|C4W%qAS_*Cr5a*`NRd0Zzsn+N#!e>!%Fa%&wuX&a? zj|M~gQhtzA5P~M=tNTS z&M8#0I)T5`1T=9q*ANhQEeu9Sl-QGS5^(mh!@TRc;?xNS09GHsD@_i_t(B}&UJgI@ zVpAe}=d|3LM9|(j4PaZXh%A(az?&{yAPv2EO6SN$FRiKqf@P8jn(6qofms9bdU13! z^kYj7;RqNRIu~sK*Q9|r_Y28=~@PbP+==??P5?l3*L&mI%N*}80X zrl!;0sP(vmw6b7@MStICYyxG{eh{0I^b?Q_BOj;4gLjDAQO`otfFHNR>_1x=iZPiQ^x^3UeHj$IpbIV` zz8NZAFTlc4y3iWPH8diN@h<`FRyCDOzB$+i!R2fF&f58E+~us@Zp|Z;FHD&!<60;8 zGoxq`vqYV2RB^#KlAT`C&FjYm$4+k}AsEQ57u7s`j>KlaaNeohG$AI39`Bk1^!Qv^ zH@a!!V8MT+FlaGZSP%N75tczI!y6p-St3y*C|m@ue(}51K5wTf$J?~@ZPFhS4=yQ0 zrtxAX`Y^>%(Vu#SXUCA9{kYR?guWxM;;uzU=@l%!Q_RGZohO|@S`_Ms%Bx3p&he&!$eiAn6Fp1qF&wM5ydcY=?lh1tl=SH^;oy4~I{hNyX7O-8{WXlmqime` zF}hl__Rjd8(3nOj9pjoPRI~DJsZDfaySGu*4%e-_#e@jZq#B2>Q!ZLBpMPigeVezP zVf-_X#Ki!2(p%2BsmSnMxw90y+Qck=1y8aWk%XV1RMp3V6g3dBNWWb8(>V|PEHV|j zAf8(sYmsB%1d_mcsxVYST+h<;ab`$@3kuboH#v+p8UdBag(fPR%z=0>L?yu%-W}L| zc^DWPYsVedy>gskqy1-GH4C*0B3M}T>W9``-(Pe1adw>YNkEKGrA$nhw|}VNxm8F; zM{oY7zO%Y1rc|loXyG0)WUL?;a@k1#4X9@6^>BmG} z1EIP^v_c0;fpKHOsHsjgN94}N7-V}gHr3bXqQfdU?3kW|rbQn}5us$NnrWe2V#wkv zbNY2okFofjVq?tagXkT%%uY7D16XV|)0I*WU^`@>1*t)5!+-`T(4^-oXYhdWh_v_~ zB3^)n(?OhdcG1xl)vEy)OA_M)b$93wC(kQZ`wYrw`%_dzYmS}iK5C+)+QYp5p2 z5q35Zw;T`36;VZbc$8TvnipW?mxVyiL zk~z;2pD>2@cgDS8?osnqmki)?ndNMmq^zS3&dP);?HBCXnN@ZM!80*^jy_MZXK7~h zXQ33$L&>`eGe+M^q_?vnn)o3_8V)Fg5vLiBJ6ffHz<+8|34b2KV7y{%-hSxg0voV9DD2{=QuE)4;-37xcqjk7`l4v>-JCPxm(ibJ`% zif_KB6hQV;HF(hcXV|8_iUbtvU&{ z=}4b1FtspdSrP#lIOh#q5-GcBiAZeOF5?t7qANAY0T?{z4TU=n(Jt6yTmip37^!%f zp^+q63hRtGhr;?SuAEXLDt00CUX}}SmrZ2CeT_RoDj^51Syo|T8oGdoLV>ZIMhf_{ z=hYzF*6RnYm)KB-WD4#(2&gR-Mt@H>l3=W?eh8!2;i1usuacgHKS30*4f} zyQfVCEi*zYi$^;#Y&r4eW|;oqSXR=nZPP~`UB#8Ai|DZ`i+N;USuv`<0GnZuR_vAE zv?)7xkNRKnDfRzqXX$XXd@pUFm)DDD&&uLI;79v^Jz0JJe^yqXy?FlQ|3uI3rS&PF z|Ky8eG_E6-Qv@jt%EXYQsqchj4@>CN5r{seA% zGdSsm4y9S#^;#_;1`HHU(MnaXR+%!JxyjFF{Iuiy=OsWlSD}bVB8Xp(dlG ze}R`s5^F-I8?)596zmO1SJL4cofzG-h(;oDgFSW;T_(v;G`77Ib8S&z2pt%XP00pl zXXL#G|6hWYEn$RNO|V4wY|Ho!Z@kRyG0l{Cgsb?U)BzAn`X2|~kCa@3$;>br3|Udm zCZRW~jU^16EkQPbb|+21%hFkcpe*e+qNSz&U}T=(GXLJL2c{icZLR!on~UP zgrRg(3E~lKRfu|pbleqHhHFRBo}{zj%xZvYRgDv_B``spHxNyCb0qA5<1>zI^3QSF zz9c9dDC8m=jL+FLmJ)t6=qre8E;O;>NcvnF1zQ3_Ia_daSed0<&S-@3ch8wZ)1@v* zV;nph9bY0wPOb*T`Kd2pD$ykX599th1C!17299CebvRp+PY$zza5kAv+UZwE1PFVp z?M+DfrVeJw`M4Wr5~~&4=?|QauvfvlbO%3tyRfu`x)4euxACa(P6dx%qRdN^3Sh+M zB^lP#a^t>L(!x)7Fu2r8uN$TDtNaSgB<`%silh!52m&SlB~O%2#NDPC{-?x25U4>e z=WByoK$VJ*K$u2?LdwZA%z$z$Bm%e98T6dN>{fex9f8U}3Xsd@^*1JBFSL9%?sBMg zZLCfpynP|rO;K@}U~)}FhDtPqh{8poN5xB6MiVOslB+ny2)(1UYuPC&N_NUa&he^_ z?~rdeHxja#07t3*9AwCSZ%@>EAK_hk&T2W6ix7H;ooyCz_Rr|}`|rPx#LWS5r0@{~ z)fk+sDqk(TvMtfAMR#nBT^@^h9ZxcD}tb52Ls=^sMtU5s1PmvGy zr3(y+HN>b`i-99<+;{A+6U)*%=k%7ilxscQ`mnt5;E@%b8Xm)>O~Mq2Q9K?E=tMxi zqgz)EZ`o6X0R*~&$CX@5 zh=+K)*Ae?4K4X=GyjcL90ZqS>d`1IIiHrUmaCTAnf<+y&0D$SQAT3zY5#(E@BN^lu z-Q+qP=}MQ4JBSt)I%=057ve@)Zg&=wxYg^_pf^qUYc$3m7RxHzqdhbP@W| z4)ud5SKzaQ&%@j@SHzta9dGTuc>}_Y>3_7(+lyXnConw%DUaQ`sM3Kp5p~~JneF8s z3!-yM1*Vz?iDKtOy3DC5trx8C$8C#su+^NKjjCT&CpKMdR($yiC1t>jD^;jqx2|oB z6i~G4(mlX6f}eHxhRd&NuDEd84 zH}baZCf{^Uz1A1&A6cz(c@@4)2da-wU($xE;oC#z&0#klAu7>4x(Xu0leKEVQjSK_ z`Y5**fZ8f7g2VltEi=ghSL1Na!=H9uzph&DOMKX=0MXg`N8442o!%ckdKA6uU-r?4 zRZb8xN{Sv*1-yLM@PMla911ujY!)~o{~PqDiK?Mttzbon@4Oo{>I01i<2@qrwO>2V zbG_l{1|j&dtfiu~ZMTWPq5p*cuRPpKcX=U2)#x#oUR}35E4m(x?@M41!3cr?0BF;Y z45|Ky4`Bd=vn+x2(oSkvs)1$UuNf&$1sajyk<-k9CmYoW|4sqwr>qUN|Ka?t$-9Os zRG1ifUY_hjEhM+i868R_py9c*ZK|C?3sF|+5&@pg3OLk5TI8W|P<&5*Yxc*0lShO% zh(2!d^GQeUR5L;UybGSY(kC-{c0i@KwnwA$Mu*4?za?p-iSn@_V2U~3ARY*Cn+)*_ z->%EcdZBnjXWNuT2z+LqWc;UvkwJMSu3{dLkC+@e-zWlex+Ujf3lGGWLhFWV$Dz2v zlwc09AKvZ0-VQ)fg^SC1QS50@a>_(aptU_F6S7vfC=MLAiJve{Su3jKr%DhhWQCvF zZ|9|CKN)t%c}qYVv~WVLjp9_RYIYKk=2?E*#KhLz-@c}_eTSOtK}pqHqrtFf{7&PZ zY@N6mZJ{ckLaB(viCYNmrn2Mdj)#nYp!P6Nh_J z4vdx_rjOF6OQjY$h7B?L8D4UMz+~3vwVP*+@(!sarykO;bz;t@hgFB-B#Vrf zK-l)@p;YOJ8gWblk&0vfl(i`#HM%#f$z&BYrR$~TM}FWKa5*iU7mK<-#;+PjDodQ< z#a)y$QF)&9X_2|+S&M2OspFh*8)USkcgh+h=)P(Mj-J|XpIr~F`YX+Eo;O#iIsodX zm_?P4combuHLqQ{{i8T6tWY%&{Be&|w!daQh_oMMtXD}n${NOsng&RkN&|us1ltZu z;Y{;1i_+JCC!HThWXclCA?0qTSYwZ-iQ(mv$2vxpIQfH<>XCO;gB--1&4l~Rrv2PE zTF<=wIKa1>Cf#QX2(85|5gh9Q_e4w-oen7!!t3ytUw&DS4hOvin0U@XeHQo9ZVGU; zmM5i*zo<|oRavPfRA)b|T^PHtfI%~R74jf_9UEyNfSgpG$jX4bw8hYyn z?l4&M;u!zsT%;imDb2v>SD{x>j%WScj}2bpo4HgTq{$%zfa+3B4jY>UxMY@SG=peHJSeG(S_s0b@bgzcB5c> zRxg&<+uV0b(gDdHKazKL0j`F zR}@qCcgamk*)@f9PJef_R!RYNtd!kRb!mx}x4Cb1VnUy1)qya2hyFi zi+9p2b_Q)fqcYVF9(`Pxvn`^*7EpRZEB35#0z!dMqycn%>!1w-d%hsZ$5Hh|Kj2Vk z6+Y{Zb33qL;9ut9`ekk4fzFHue6(pIWaEaLcaEWd{u@P-7+U;(ME2(LNM+>^}yoPt*qd<7^W_u>Q^%@9x=Wdp8|sk``2hu z~9DBK`8&4OTQegmFi!;q12QJwSoB)5c56pk%n2nN_wv&%1R=)rd6j70MW8Irb z*#<13FCsIMqwgyoz>CpnxW2sHi_>nC^Tss?+4=G?MQ;UoxXcfiI?2an^lDk!9G{Q# z(eldItKYo%rg<^yb)(8-Uz{1Jcv|cUVVi;XbtXVoRcghx2SXBaqoeN`2|FWR6dQ7_ zhdPq?FtWiF$ESRq_Q%)mCcq#N20hJv^7X|zXb7m9Z9=iJAqEun0>ik&vxS`&XWc?r zc3SmGmdO^Wis{u7Bu-;tB`LPFlYlNhUE6D>Zy9V8|+Gwh*Hv&a!}&Vk@i0sHz@ zK~YqJypz>IQ#;E^C77d5-@nIIU+9vJ2hvvR70*x&=CL;%kwFC__q*@Di>i|~C1-dD zPrGvDo?(0{?&8G4=GJQ%`J+Ni$zIX*tP{`9{L9+Df zQIca&=3L`|if)%Nt2N2>(+UPN!21&N!{Ry;>{+*AXCPpUd>N%UFQL;^n+~qV0)Raq zBFUpaM8=@G`_z(_ zy%#9)dWHWsI9(_j85f>3Umdy+_eE-7DA*yNEd-2@-Dl<|)qVbM1R_#N4%v`Jvn8Oi z$V&+uTBJs3p|uq7Y28*YKUWvwi$Ls9|s z>humZ{jF|0;mO6cHXp(D)|7_}{`Iw6YMUSK5G_Fs07b%Wi8XkOZe`SUB6_RnH)wdM zj1IWZ`_j-Lb2FC#-9hc7+->oV{YJF1U}+{q^$dj`@J)8L)GrD^H&_MqG45`xa)eI_ zTaIRuq(s`Fgj#L}0A=J=ynGY$AESx#0>MI)p|n#{%`s1+7piygQhoIRs(vC^_1&=q zM^Oy*FE#ga(X`Ds$9A8{<;R-YG;h7ed!oq@SW!GA>q!?BUSXnHZ_PGY=ZUJLMaR3YIZWtdf=6R(aicqUXxb7D!^QPw8+q{L%?#D5=`NU=A!zrr;zDg;Z^$A@ z+n6#*!TqI4N~j4%rmP>2tvP@htvTp$Hhvv%eLagOwFdNlFUwx!uEW4olHCLKd0|1( z+&vZ_9n$_54(^)h%vHJCBlKl*yh|n4rVEMQ6o6Y4-iAXle1P>)xzETay+z(-uQt5M6d}Ju&+(=N{xtC)Zx_mr32;jOjt?1-P=@*?42mzT#it)v1Tnn zm&)0hFq(;C@sC$6S|Er=)0q~-^IxGfC$5oR0e$rF)nYTnydFu5E{cOrr_+{J$6H&( zFFH+Nn6uV7Uf16WKBBFKr%2i#`wN&FGQ;PsG4dkA7m!daR^&ft=@i~zex?rL$0u1G zpJr)Z#E{ccvP((3Tw-fI$7`5YLNVJ49153Ug+VAe5vr?$wDQJa!NW>H z^YrOuf#Ma51#Ow}!wIr%rCgKf9DT3BB$W+p_7s@9nGzT!NN@;i%eDA-3K`n%ZS3chnE6V`~Zt8Pje7!=7i3{#ZZf{A!JUE)zSf`X@ZTk~Y3r;twQ&bGjFdL9z zr8ylujK7Bp$Kf~|VkkYj|21&ZTyimy-U-2>F&buB7~Kr{D$UZpY2B9Xv=?_=E>I-yDgVxoAl&KPsTmXT1?!5t`HM_+frRhKU6OKXa%^MRC>QG$UqgbvGnN z%^I~@V8?4GL@+|HR{51ZW-ke(kB%^uM&y^P6|FU$^?-*eKMx*3>f_XTmnWHw+M6dN zkP>O8!FQnU3J8-TbywPCQBUunS`nPby~7?wym39iRQr-jO)*DRQ%{O7v=pM6+`xK) zcksJ6I<1XPMS*WX``OeDLEMOH&Qd}8Rmb=Og~2)b=Dc&=L1H>$W<1bnOL>JFZO$Mc ziP3MFei+t(H;)tecKi^LAL>kKimO8}@D^by@kKO(I#ky?wNY5rtN?ZJcC{r&A0bF+ z@BDwQ`e<$6LM zCF0N`h+!?#+m^>udf+!x6sO0d*5Is#x+$aWa`BFVpX1c?eWr)~SeCR83iV`Ct>}Yp zRh_|~LhVixDNIeBJ(fI5!%Edn%D-5mQDqNRN25yIBt~)SCi7o+FXYc1?#E8WJwAqalYPVAop^a8O-(sVQVhtKYw+P*fl9upO$Z^RN znS_yopFC89CVfJszzk04l+*}JS9uHg3C>wwUMDQ)IfLbRV`{Ar8&R$ncNpvZZ!#lbdqH1p2|Iz#CKQ95e7D&xCG345Q>8)$GtSr5WVg;IlfL zT&v0Bnf@zHU%CEGCW`qv8dj;UO?E_KL#U6Fs5G3GR43Nd*2cv&tqe+1wZ=LPs3h|& zfmu#QneZSURZa&q=CzVTzjtjI%8PJuZ zi3ICJ5od{2s$QH<-W(i$4vj<^gaf|8n~&r3nKZrycT1)E)l*|J`HpV5C5{wi8iT#f z6#{<}y&G}-g1uN0zhZ_6flwjaP%reP3RkFyfcZuwtpl2K4#pWw??=RX+E=8QXegbj zb@p_qtE?dcM*AB{>L)T*u+;86bR)o(4&D@L;Zzw(KWw*Qr#$#C%#Q+SzJvTk!l{Z< zu=LNU_D_LT7w=C;teV&tRjAz3!MDLUw_;L#<-{8s4^fv%TLSv_!QO|p!u_HTR^1tT zyu^fSYwLCS)ip0+`;OJMg9}}+1}x=9y(V4Y)(aRb+xFieHh zKLxVNrl)uajr)oEAr=6>RVr(yxE`hg=5Ow6f`eYf_}6_Ex=>cpBGJChgjo!kxmgh% zn@;Jv14ueG1(*&!yAc3LiKG2?kFRM9TqEJvwPC$pyX@47?u=`ZH43b9>8f`yS$3tN z@mOk5|8(`M+JiDPVT@L3HgJQi;Ragq6iuX=7sV$k*MNj$18Sw1$!-ws8^!1O2I?ir zXwLN=R2Gu1p$c7ZDoly9SQ$H!n(#sTuyEtUFC zE}+HXk4!sMcnjFHj9lKQIhmOAlO7x^>A%PqUp+D!ZtI~K6|F4LaZ0Wj=2^KB(#nEM zp;j=Gb=nuQC!4boQKCOgX4Iyj8mBiZjQT=|h#`=QRlh6x{oQ>kL z6|3gWPDgT+6`{eQP?ZfuXzQ z=^$Qnx^zxO{d1dPj5f%_Nk)0-0FjF>7gh)c#S$G9E25W?I2l0)@+o|c&#fRe%H1gX z^t>CN!T}n1!KRJskn9XvLRfq*N2s#Nz~L1oo~=(lG2#d`9-R&S?$JZkM&sWp9pMMF z^ut_iiFv1VY=)&QH!UbEGP>zs3){LQDm4)z(jOLMSDA@&k-)sD0+wTClUxU156;h% z&aPa6=PGKVv`@n;%r*jSjU+wOZ-3|)Nc?lHq@rR)Ef!@}I38hEAEVR?#6YuJIqu^& zJYH!;@_$R$rv1fc?*LGJDl@Ik;}n~9W1(StspIcq7MHqP6Q>AP1GsnHz_s8=D)6;p z27%(fdQ27)=2h2rAENGAnZ0_WRdU>sn-zflI6AII4`1ZflSf_D@&jO}{93qtCslUfFsCpct8)TOJN3UzEvcZ`fxXa#7JVeNE%&-y0m{d@L4GQQc(EW4d8 zd8%)yLiBLy$yy#gTwUvY=vN&qQ}Zm((iZ$zDzX2#mkp~!A*VwRlX&%GLqv3PpEW); z?#Rn%3^G(YG7io;pO7tL*jxEce-vM{U3)DtOs$wR^6QAr-kTh*Oyz&Dbiycnp}tA- zUKmyEpiMF5IwrFq{!Lu#lcGT#l~R#AjR_3v>5Pd^3#0JVY-n7Qu#Vo;Dn6;TJbZH; zeE-tC*MyTmcg_}O!~!Y=ke?MY%~B*KwKQ*w>p$ySK3Rtj$jrl48% zpZ*!0HZ$b^^Z%c{e{X0bJNAX){QKxr^cLeI$s@^t?U^02NgfP%%-I+?fXA7Gy>(hr z104&sqLz$FcAn4v)~~A8tuF#!lD+euQ6^aGUaQutNBf)}(<0NT2Lm3@wANM}QZ# z?f`Hmu=}=uOj9EalnXc^8(q{z@gy@b7sXkgwkGkQ$Zvt_bApIRa)5|^&wC`DDGO0m`)UOUmiKwy`0;d{h3T_vFL&-G9Ya(W%_B`{nPlSNy$UY(}g0 z$JqpepUIskqv7~I728Vg5s49}c?ucIV+{Z+(fxVZ9!K+7bI-@q!o)+g0<|Y%_IS6H z*&&jL#}B|~ zsS~*-1WTzk7DuOc=2t>_T}ouBg%Su7Mv?NVni>j?3oMO2Ib_KgHr%*W=@TV z%uqt&heIzDliPB=hAY9@$p4kQU+*0~rRlTEaB~f`5?C1OG|Q9*_6r50LjGGV#>|7z zEjlh&Js#$jwb5Hz*@zX^Zh;?SqZ$l>DtDz1b=H`re){rPJ#ehjg01mK74B+Q+O0AX zWb`0vA!9Tp-4d14^<2e`#?&?8f)tCDbHi+lCbPm#OpIUN_bl6SCIXo+K#5GON2=RQ zA%heMgRLgS+q6CXES6#=0d&m?5N1d=1{2Hql2agZ_ii9+kJ?NVi1sMz9Y>j;pOUW> z#G`{>K=imjyUWA|rPJr&7m^({vKNilEpzP)Akzy@28En!8cFU?%N!A_vJYac(d%*T zZHXCzM4t2PL*m877WwAD#2%4o2HtLRvr;Tt48!2R4)>NPuIL+isx=&6ihudk1YCVF!*O@Yc-I18wvOU6Hh`(%F9bgHCNZ8y!26~hp*&Wb_!a-X`5Syb@0 zoBDl4!!zh(hWF0ij;Io)g;<131F36GN&wCw$rT_gbj&`4Z?w_WsKnc601upfZ`V|F z?K<_X`qT*~9(GIj&b((eC7siXLH}$T$$PoGLBGDW#O;5eZ(kQz3Y9!K12 z^;?!GfrV-`3(1k-RtJqRF5KP6E<(SyeR4Uae4-ANXY2Sj8fN?gNY zdy&oPA7tyK$MlV(4HSsVL#ikK!L}|tA1vYL6$u2ndQRq?#v}Jj5@Bs3d3H6xq5u8~ ze}$fqFtc3ELU)%SNy$Z)O~e`8q>iTtJck}x8H~nF7?I|K^b$I^!6jlg>UF1Y*Mu9e z$$qv_9?;k%S(%1#1%h_ifz|f5xvJIhK5qD+isWhK0@<_S)0xUPn1Z2MQ?U~TGIo`7 z1rLfB{E^(z44_ZkooUI!^7G5vbsc!J%%;EE%|bc4luO0DGQ9EwCD&~se%e&ws_q(p z&slb>7|m8~f8BifF=s1AP+S;OuUj$FPDQWpsUM8au5Z=a+LrtkmcXI(4|qk7pgcd` zJ8p0GpLMn(5%jVEVk$Q3TRjbR(PJy4iRTPZj(Jd-c5o(cdBQ;XvbAmWg{9L2p|#qj zb$)mb*{aWlnLjVIksG!}S;`F3c>TDz6806z&RlNf3Mknu{_`?1xyC;x^WQ}`8n)TQ zNl@NH?U*WH1kFmNt_=_i(@{i@VbevHWr!_lF;TRD;<2%VfN@=&GoRf?d?(0X60W6~%Y(R-~k zi>fz4A!iGldCNm;Tjsi`T6w~t)mh^z5b%KtlmrQjj+4+E)G?nCNGHGi#P-1UWES1V z?TlIEYjDmm&Ka4t%?a;ZZ1vc<;PO+a8VFAW=KinE0i&qFptAT4bo`Rejf}fGe7$uy;xnF znxgW>}8;+yTrC9vP+3By6X);g62CaP)kLWf}kZ~{^WBw#*$eplI5 zx9Ujx28OKN`v0D^z8`n2aM-m;+guVNLdF+79^GBho0h*zjxyT*s@nT568-HoUnDe} zv?7m)3VURH%%Th~BmNF!nPMKbXY({;kH5|S4}a*Gm|N0Wh2gS3_B{xU>5S)ucRPoNZ_f5l&rVK{ zUmhIpzk9bc6VV0e^1XNwzkA1ft<~w?4-AkjAj#-=mMpLl@)dv`(u-$sjf_M7C+zOY z$==TSj;-r+tzK<4yw-UrScJ(#BSvzyI>q@jMa&Lt^|e*w`Z+0XuyH)+orFbv9gmLB zqCb`wrS;@69nOi2e&SEbnaHvw+f7v>$kvl=ws^v};7{`Fc%D3w-gK$0HqIP1cmjDz z#U0pslHXQWdYMi?R9%{O%=h=TQ^lK==InjVl;z85mSe`i{$!JQ?lD*1lws`o<*n&{ z)ex8YJRD=WP;IXesk8dCCaGAv(W{d;e-yu4@1&}C4b*X;deF9XqqI*9O)x}$+o~GX zmvvcCBS0u9Sda>Sc8X1wA%?r3re2Zumh^s6Ffdr+zGr8aLgraAJJ*|Eagb}(37B>9+3;4pPhIUWe@L!{ppM?6B>t5!xuYZd^PANV8Wx!`hY zMDj=hNkkeUtk@e%+`~epbJ6Ly*4dZex(v88-;2dMq0`OZ%GH9)3xlL`3h|0E7N~24 zO%VZ7=&TGoCWkzP)|GGb<;=>VU3wuA?wA}#*DeGq$Qd-PYG9jb*yKGT>I5!f9OD9l z9)$p%DB}Bf{1~RibVi}<=7y7f$ymAyCe9%l5|?>ve1tCbEoFv*1pap1!zK zI(mf^uTa;; znj>=&Js#F-f|bYCSojD5pLK;z@KLvbW>w@SmTlEFn;{-8me`mI$?gq*S4{7UyFq{U zh`d}}-)Zy*?t;QGCg`w$(A}xdsFHEZ9HLvCk}IM3Jh@olHLLk40HX$-x1zl4=*_Y3R!E5SIX!wxRZKd^kV zPFu)kG^S9yRLyM(8VWD64e`OsL|%!PXGX5fn+CjhlG|jyw7!T3q;P13C*zi;{gpg~ z@DS#j^mv5WwZN5G6cg*n{72}L+E+m5nZ^krgk=3)secHGp%F1$q&PoSqDC6A9n$eg z+6jkXO-PcPB%bCPSeJ1lb{4kqJeedP8#v zZ-#@VCmji@fDS| zMalRvldL9Y*(6SZD^-ekX)9HP!Sbg$P}12--BPxD^Qo3O23x`8%7SJ(To3Q(zXV&x z!-0i-Hv71laAw>_KwNl^{8Tael4gJ!;j>dYS7Au65|>>t5Oy)BM%Ns~C)k_DSlDl; zd>^*v7O&9M_Mm2)_?D^`WEY9JyM zq%x>pf`AB7a+#ZB?iqEKkA-LBn)grbV_0ymCKl|-B&xua^?OC4RKnyd7`|?}Hx;J8 zARsgWx-oZ9GDOT^&82W~Dee=x=;Q|hc7hBq!Cy;|s}_It%@=0F)!G}qHQz?GuF zei)dz*hv#gJ^<(0bXAn|ZMh7CgF98#E+Zt*jM}Ua%Pu=UV(drCo2h)qB%6`YP>K@) z>N;aU9@A{}W=5o=SWPTIB(9}i>bH4INf^VYx?q z*R!3e$h{_b9o#VS#nuMn$X2f)>>O#TG{1CfyT#5yVM*JtRky6N_1AtE$_6>g2Rq~O zK*M^r%{ad|gP5bKx>`GC!rQi2mW+Z66I*;6*Mc}&Ebh7{H!LVV_8Ewy+c|nSx1^(l z+l)Bfub~q!5v!7!S;--+1d`cW3;02OOh>TXS}rog%^a~fk%0u&r^)Sv{q=N%W8Tg{8r-&JriSZcGw)b5BKP>FUBLn{*$H%9@&~AHn^>Lt zw!sJ0whhbSd)jshN&<%kJt!@u$*`Q(n9M^YTY3ZePB1jY(9m57aBb*fDuju4)G9T)fve0 zbZUb3{*23Hy4XwSbd1hYga8&c7*1Xukh-Pk3j-Gg8sLddG=q^J3~S8@eeV+7Wsq&m zldE_>o-p}vX#(L2`@jdbmtu?eK6o&gdsM{&kbA#kqgSLJe3WZyz`z4smcb8jxEX+B zhgh$G)KXHoXD<(Temd*Q zi>hICB?AUKqa^Ws1ed>gw!FIbM$VqYFtc%a;BNiFB$@?IBe=v1?XkP?L^y5arJ{22 zG&VYy_iLRBC93APQX}<6ilx@aq#lP`sg08g8Po7JJ3FE}@}^C*o2vRbhr=eTRJ`qA zc0uK?^yaK)J#%~Y*9Uv`ZddgjRbv$%2JX7e{xj28 zToa1SuhUsq%_L3XNAlX!aj}1qOp>Y1r=dYvD^wOxlzfluUZ#31V3;L z$Ik@=g|1GK#Mel7?HS5F8C+IIc7}=)u7>5(^N-0ufeoC|g7XbCW$_R5Hf^7=v&?vz zsSmdTeht_mWIH6C>=v5II z+}vzO<=W-SMO8&*WmSe$oZoRa;DkDoTKu840}V&%vt6oLJQLr@I7#ZTG6O;i#!&-d zZ^nf&$U7zY>npQ@%o~9Bw7wFZ|4~w^#82xham>|X9Xz~WlVy{b_FV!f_l#k<6aKJD ze*P##@8QHaMe`pc@}}J%;I9{N4i5JQ=R2oA?Vq2y3_C{dC8usSS?0zF!53i_s5IL~ zH5X2Yip%+UD`>r%du!(%x#ur^Dbmspu+)3QGNrw)vu4O7kP%cX+N*gY`7k_k$cLdB zvBWk5!P+25FQ~nPgzOH9g=jHY0=hjx)zq2wl&!TI;j6qzP6snqu~k%w&(y=z+H0-m z8vJRfnCNhNEeFK9dJsP&U!KTe?ucl|C|*}GDS+I~H)?AIGJ zkKRH2%uYUFLP0F>eZZ>UPd-tw9nc5GiuizqR4oeCY>jTmEtj(LQ=fiUGnW1bg|?+| zKcA04;8diaG#_;_;7YX{z)kq7th{T{CmV82WC;MPyi6-MvP{(`a!d7O>!TXe0Cak`r|wW*+=R5cQl`O*1X)Wgd#sN_kRO zDmjj-aJcOB@}X3tC8riCr7?3O!TH;tE{F%G6Ewl8b3>-X7#>56V*4@=qy40Lsu?-} z!lMC?!ends;B?`%tq`4Pk}KMZO<*y9Gc{FQzx? zBsRQ;20LNNOzsO^SXyDEh>PpI2*^=xS46u^D`_jwcs?FOzvojb_>5#+1+!z%5Lj}P zVp<9ZjKlf!zHk{ufwE0{ZaT*GRS=2Zz z)acuAY=*$Wg*V_N$)3JHPA5xZl<$KUwKdMO^;{Ag6)D)!#=_~ZfbGB{)%29rT-NLV z(!`D~^}KA87|gq$!c|tqH)*K1Q7i$ntYc85VtKe=3MnBN|=Yfds9 zrAaY`O|#iN<$A_(!7pnA8QLY0_$|^>;TAFxgow}?4v|n19$B1s@Mbn)qcW3suuxH; z_4DiH1COJ2`u#=)89Ba4-_xaV@h7Gj>7On?3d$)q9amt1hpxzBVw^3kV})OVQ$r-B z#W4;0(^K3dF5NW;an*2w1uM+OjfwE;jMH>9_kgN`0hG~RRf%}B+hME9Al?D7_m!8g zyCwm)eC6AcrO4(ab!9TU2CCJ>94ZUrt}GLcmzkVE_o4!gSvEC#T?GQW+-#X^EZWDc ziGjw4n)BkzGB@MJd9Y3Poz~T8dANXzyiGz>xt(2hR4YOB-|IW4YN7w3RqLRN-%UU&8~QT6S^=_3uuqaq5s{=;M(cc zG-IsY`x7&@rXQMQ+=Mf*yOm9^%VBz|y3APx2mF3udOx67?SR;$2URwJ_k`*`QFv-E z5{Bz$aH~u&ixkL%OfI>SwdDfp!9)#U>V>onZ951&-sY`gAL+g_o<{CP;VR!Q{B{Kk zH|2KWw=EZPi@dks=q+CFh({Kh-I)CvDlhCEX^8MThXnJf<)%eP<~1HN$oPEw^zWO& zyDCN2X2YMQQi-B@L5*!8j#XhDRvFB%gi~1fk}9xPxA8eEy|NcrIGhOyT|;<5tP2aP z<15v@&ds_wVX{JgVl>@vf{>@jR6TOcOc4_l%M znGA8k1G}8W?4*&wM3Akkz}vT5&fXDCX@B4yqfQ;JvdO`&J}vbA#HkO$8K?x6k2ysfK97R1I_D{ycb_a_FiA5oyK)VqB^@>n^Nt-xu-gU~%|z?t;v8+ys~W%k3Dq%|!RjBZu}@vt+!9OhgdbEJZYF zKwbp1{oZR-NL^EI;2UBo8oG3)J!+#gAe3CSlf41eSf!QXbTY{C@@6Ker(JapLX0X_ zZ%L^^wvO7Ti*3d1J-oGWo`Zl)g7NrC7@R4vrtx3YL~UZ@>VZm02W$@J%;R93yqw|G zP8aIW78)Vrbc{1URScbsw}u}Oqjr|UT07@@duBCb=17Y+AOK$%l?JAKD2EBM(j>9u zGqX0&rhPGlP!luXTD)xG?2+XaCeEIoL4@08{Eu?hP)U&5ij)O>w;;CV*>JT1-<(5t9De)L*P|;7Lf1fh?0zWtDZ`nlEp>Rxa@_;v+ z1`V@N&7EB6PE*#U*|pV7I2Jh-?I1>Oh-HGvvBx%6rwwxe?TuJ_&+d$mo_XQbG!2Kd zcMbGe0L42<%k;hB5#ijpcc7x+YpSeTM;X-cW3xS=cyl+K-N~)e9(AJ4_4SP|{%-Q$ zr~LOB|9#GXf8f7A^51{uzrV%bjdlLJ!C_-%*MEf#BP>nc zZawy*39tF3K@3%v4qG(Y$cjzAmgoIF0@H53HVMqyGSG;P4m;5`Xlf2Hl)~w3t7&DQ zsK>4(8j+J@ZHtX0+lxxbGh@xgB!`+`+F)(hNiLRTGh!YzLbpvp%Al?|MwBR9ymoM> zDUgMWDP#21@OheN6WK_jMb|8ShFAL?(>pEe`K2``B3v}y0baxsXtqO1yzEh~#RY zXH=+uGv}ya)XG?r=n>s^3Dq3u%|{Mgb-e;E(^wd6nNfmO~@P05WAQ&5tRWOV$ zu-bFEfgD7z)ce_l4%uqJglmXs{EO?YXASV^V>*yMdYJcM*1k1OC-it9*}2xDkLk@8 zJE2IlbSj1FaOB=*nZ)ISA#Fd~YW(Kc&;N}d-o-sO2<>h5H~Q-mz5R)xxhDZxJiGg{ zHrDC$!?S1jcjL$B>;Av=ZDZr<(`Ucg*!bb;=JOw(t#44d&GjGYQMCSThg$tv0*OV@ zZ-(O}xr(Q?spDu-ye+|MM$2 z>2ofzXh!TBl#D}7Q$}!N#KX9g94N?$kr5gL83SxfEO9nUSyD63MoVYBAo7 zf_kieo7BY2EwaAKuVDwzp?^8F)|js?HjX#rnzIKgY%!6X3rxL{FZ(0tODA6KM`y<` z&)@Ex?neh_(aGuY&j)+^dr@=ejD9z}(c6RbSI2M8BPwyab9DYobo?^fIr@d_{`X9q{Shi~={j(&<>P|c&`^XTy4^}#uTJwHa1R9V3t z?4JSP>;2Q+SM+n|#lhjh`7hn*<-z$8puQxqJJHF`>G{F#o5P*c=;Y1m$?@4fbzzU7 z933AW9KAfHmiAxoAD#E9WqKCv|4cukvsXKZhdjcaH#CM*?&I$9$uFk|KfOASUL7Cq z?bE{-=+5E6&Wpo+Y3}HkX!mgE;B`0J+j+h7(>~WbCOD^DOndnD)jmI>7I)~s-SdOv zBLYN&**!iwKcxp<8tv)1t^D@jY`+`roF1HEVqTseQv;Y-s&ULg*U%b10_b>07?^^Hv$GiT!?z=y{@AUq<+5KDl58LlTd=xugz+=31t0l$wrYSCyhs&tu*Uz=kg%Ge)Z6Lu2XCXvlQ3)Jrn5>1W%{ zS-)7MarWkfkJ8}P{^5zoK6rJ4B8`Ui6GRZdQZ!um79rG`SHs}lW5P%uk!g!%W;zMT zUxJ@y#z&{dg^h*4+Y6|b=7U2xDfrOv5pdh`uWC+PT@?f%3-@FZM$G#NgvO;kAOO%O zOq$F?JCu)X%|gR3*s@r_!5n$Kp{+ZmQPx$ycwel+312*jRTZu5c$Ha!=#Sj;o67-(^+%)FMt3$@eL z+y^3O&x|@xBEfi0k!qq>?STrtbuzh>u7IMuy0)&6*h~nnViKgd)O$9$g^4Al6Twh; zGE$SFKvG_?B1qywv=$OD9N;7pw^6}E?wX9P0(}fAEH|UZj4VEJ&9l|6&Bx-cN}f$b z=3q^DHGCXMG&yNv$It#_3zh`B9hzI4{!2UQ^aZAMw4d97b7?uc7GCL+sX>aET$%;t z{tpIpNx{Q!FrfW)L#H7n+(7`@y6ZOl`iq81$(Is?1G_Vv%eU1bE1ZMWE5{ z#4?!J=>w+#q#6Z1oef+tERq2@7CJxY&CG(NGa;GL#+QMQCT`!l$(UK_xN$ojk^NBLsz&Dh}3zz1!H85JW zd@x|s3-hzSf~Pt#n^Wul^>2Pp+y;}*;D^@{-w699whKp}u?xg|r|a@a%TdPlWG3An z4GYF%*N8(d>BqHOSf<(Nt~D0)qY^NFr_6oXYA zR#?+i=7+8XTE9qz_U>)dYeJW^y>bKTDyvl0oG+q^EL4r$?d)A!mN*I?nlL&3Lyp?* z{SHI$_-}0VoZz7!y~cl`rk24Y)g<`O#LK~d;)3n?Phm8ozu&t9;T>Q(50W{%Nc;L* z=qy=yXF|4jh2pTXt_ds?b5KdyAYqUryS^)9_cP*mw;dbp1bDMVQy@EB^ART-4~%am zHTkwTw-4Cgh82LqT&Aj2#PT2iRu#FxjnLzp{Fl1_-P?*2uyTfWIL{=cvL zlRqyB+l*y4IQ6ph->2~XS#ewT>!;=G$29$_vAEOw8DoKu?B8W9CMwy(MuLwX9%XB% z1upq#+|`3UGsX(v-22jDEQBym;YsK6x8{pGJw4H*sA4A8fDq*VF>Om7H8G8eZudgA zNl`*&bfl#infN_SJwDBd3uVX_QM79E>+#VQx5@d_|l<0n{8bs5h!yO;*5Ee9B^ z+7&fB!-k_;xih@m@}D2Yg~Y@^|9F^aDcL@R!n!jtQ7n=y#SvccmypV|NM24(C3(s| z6+#tVyoaX*SE|5DtAi}-!de!vbGlMt)PyaXWUx87E0$2@@T$Bue5OZX1^# z;faL6Llemyn%40cNkz0(uMvN_(LoJn*bY{d-^MB-p10n>NF;b& z!!7J>P~eL{v?RC+AFlj5n=^M)poKJvw%&foq|E@fQ5#b?73ZLII1zPk#DI(ppx~sv z3j0a+?P6z;15CpL1UA5s4G-YDbv_U++*w)6(huG@ij`mO@BaJX_5S&*nR z>}Ft4tE0@wQpzx8{VLhEW{Y99MBD~&P$w*BM1~?tLS_%?qj}S;zMg_gcs#tuiWwil z1`_V;u=+G92$s_!%qWaEib~>h4~%hkF`>!x<>*6hLs_H?O_ytB&nzJ|(pV1;%Vqub zaDsm}HRim1^P700SjLaX=z+1}0lh3qDn!JXY%dd}MUl8hc!xY%K7LxzDiJECX{Os1 z=@;(QXaQ2t@~$f!h>PNh(1ER5pVBEBI9=rD)fio8>F6`-Fv_g1;V;VP0cCXenSH0V z8xoDZY1+V_-{FtiY8rUQ&2(uRED9q za)|}cs(;Dpg%k%}ale`-$U{ek(|Lr<0E=`nLHqy#GU1#CbkO;hVx5tkMD>?aTZm>q z7&#CuG^w$=F9@+DPZmk7Fr43ea=%jVyND79yHmroNoO^}p#>ykv^_3#RV$gfvKp>z zRxMDp-6rB<%(*2zB1M(ou8bWvhprc)F*n$$04PSZRRHx%X?N}-uLrG(*HFd$7OAKT z4Y`0*MQfpv6GOoQ#J$-b?23!8i}VFiDhj$6v3lpE2tgV#Tc{LaL2aR#Ua%RNIsS!T zCvex`tKD!%NEnCiYMZj3il>=vpRsPAelv#+J9U zUG8Yd-f&7aake55BTn0D;W$$|zG2xl8*w$Q6Asb~(TQeFF}qxsa)00hN`J~;49M`t z=!Gxpe38<Vri9u+upm<1Cxm44?R{ zi9jGwG%gmQnAV~*@4YlV0g6i`Er%(1$9JPzaNr!0(nQ2@})B< zVGnNDmE3N%%p{lNPRMSvJM%?6_Ndfq%D4_OREY({W3|OVAgM`mHo*lK81By8C+!@Z zU<7~RF2+ODb?$M5${bF_sDBAb_3DUqx;L39qF%-kH zYCI1u$v=yG$mgib)A|{+Z_dKX(btDgHr2JSOzRc|N$`bArftdQ8vUDey|T}CP&EoU zo>{pk2kqt~t6)^AB`ZD6u5NbdwhK<_{e!s|{-d#XYU;yM>*tZdiahe7eC?j`-n%Z< z!!XrEM8}mc*0;E!_f_G(CsA0&?Pu=ceY>b`#%!DdmX4`8nOl+-=-Uh+jP9)4%ZLxF zBvc-o4l)L&a_k=yYR?}F74!JL|QytzrnDMuwQLNT$UG|2{^PBvnM!5>TU z*f=s{%h;dXp)SC2iq?K%-+3lbYWwBeKXr{Yw}{{E56>1e*uq|bFGr231V4!LC40G! zhdp5SIT<62!b`*aI%c-=wWr5Y4q7=dP zkEc0E!kqVk)&RxVG@1Zdi#M(0f*5E=6^CMtelrRK*9S>!n5tCYH%ERNNiA`lxyn++ zKoogId$4OA8v&}BRI$^pwY)Mz09mI)XjP#UKa!VSui2dqSH`j%k@e@%X8);8!tTHz zx)}$e;en-KZ%6SgUBr|0FUk0KzSS?G3o(JdN{Z)?0|62fGUWsmh)tG>hnVSMco6LP z+@jPC|G0Aa9)M6PRorr`cX5@N$!)pP&I|~M%-81@*Ko&kakR3&hxMgo*0098y4kPe zT%tkbM5kSH+GmrkP<#|cJ_+}ynOHwN+KGJ~KX5E>`>@xG_^#oL6# zV``Ekn3O0S-5;wfepTVcBG(F=o3p~9*PCWN7FhLM)}L_(IEe5&oM#{CX82tcb_ad8 z>IU7~!pmMKH#GSl6E)ixfkl?FL@DeDs`DIRV(H=nI?I*#5+8`)HKL$Zlj{+lj9{s! zi-E`$r7bt1RK{iytv)7 zKGN<9iV|sc)sNcE-lrxf%x?BBUmhIpAML!}@6w=PfU|8Lcx;MmMI0@RN{o9fjaD`z zCu+(XZ6xp{%tq}tIfLdnUc@4*t7^~#cW?=N#~eCa#;q)*jD{IuDmg9)->x?6FfPHh z9Zpd1lYFn}PGyTPmJ4&CyIEDEdbfYLGc(Cz%QN9Y&?>-b8vH?>CsIAhhB4qCkFv?~ z2D?+zq6Ba_z{gagRYsSobsG}%^w5Nh$6CLjg(Z?t&03Tx+KczAoyLNqCfLM$V3dt(`x{@*iGvOss*0pl%!^O`avNLktLzMG z8!e*;X&TUqR;p~w)!TTF46044zsiNWS(TWr*_7y&Qq%@Di zW`)R@XCJ{)#R|omHyEBF-~_B)s+FLC$t2S8qlqfz0>X*#(Yd3x{~$J)(hEAXUG>$? z;B9>iD=O+fZ1TbEn*8Q*U`8iQBrKYg1eNBTncP6F^d9A~6_%Gq>EYgO<6SjFWk*=g zSe?o;#3Ib;8JXv}z#>?R`G1(s>I@b5>-$wNTHWCw^T6h(W+6>~$w#~(o3RFjp+CRb z{5hyl8%@35`Z0uM_GiIJaUiCP?VLs7+RMU;GpkY4pGl!Vws^Ta_Ep%MV7aL?lbX(y z@pdp*n*vMoG{H&PX%)@rBQkFbXkj zLqjLo)P?vYU6!#11jd?8XF==apz8;p~*vq5s*LIgh(<&6Uoz}Zxhec2n#s{m4-+V zpL)bdK#*5S_=rNiSB_a-Q|m6G+R_o5aK@9_bv#TK_}P!5eUU^2;Uhemh`Eo%P2VKH zb@!Xh`&Q8Fr~t3oI{<4yl)uEz@AYs@dh)ObFNtQ?)(Agh(!J4Tqy6ospzzw#z`EN_ za|W^R1h~;aY_~46tW{TA%tyDI#z>OtnXgE>Gw|Tnfxv8UHyvSk1#JKX!nIviJhF#=8o|8b(Nj1d&-y6BS^w0|I0V@i3jFi#wMXl?}GW1`1niAX^7j zS=g=00<^1n;sj_K7WUHKrcTeKkhHrfU}=@%JiE=)s>rfh$(%aQRi!rG#zlbpZ3z%8 zwv(J_rkaefm(})82I!|tBPAlamf_S*Bhiz)FEi)Q%o}e?FR1R9Ye#*^y zuAL@xro>|L^Fr1PWmo!dm!@(Et1xZVV7-O51HO2_YP3QXt{CYq=U4{j**iEj-Y8?n zMkcXLzp}bTqExSL5_%lRL4sAUoPhe}1%Lqq@Wu=ilzZ2B#h^JzeY6YMd+9ug@}04W z_MRc5$bDm$tRu1u=s*`ES`-G_(t+Aq-*!LmNp=>Zgy(7UQPSuwTpDy-Pi8i1ceDdN_PdQnS&xTWHXdzKKr0lOWx61oruF%@i; zs%Rv_fOw(qyzBZ=ZoJEkBfqrCSg|5>g0b3Aqc3A-=1Bnp$5}UG=5IJrv&6^Gqg`Wt z<Zd5&~<8ezP*^CO(fmKC&=1H9Lz7eG~LApgcUN>-`0Hw}7hKkHXLUv$hf)zCPZo zMTdJU@9y+wI9Sd;7qc7JN1?{IH|vkpSrX5QZYGk~sk@Pd);TUQG2hin&6-8m6yf7s7V=iq`du^wdCtn;hzK%zzfFW~`jO34Xivst zZxWl+%%s3*DgdTRj98z;NBHIQ2Q^4T>uUa#iZ(+KD_|5V+xTJObX+vqF9RpZ1|h)@ z@H}xkWek>1$CGqeO-Q3l8vHm%VLKuEoe89-<|e6yfFz1kj23N%$kp03A z0ZR@_8~3yvxF7_&a<M!KR5GWcCNtI ztY+ovu5!r^TXzM9Gq>^@hl6mlh^TBNpa)51suGWW@B)!MBr;S*RH)D0TL0%!-YV@a z8yw2)ug@eY3qz^nB&wnxe(@IZV<|qEMylN?239@2v@h~Z=gd9j@kp$M{2s2ja{t&E zV-MOtUlXUTZdX)phGLYq{@3#v_?V+iN4(dLijaNOI(rO{t_8X~9deG`uwsY?RGH_R8 z+hEFTMtms}6s)X#Lv$hcE4O-vZ!#gAnERWF3{PRw2oV?zxHQB-Z-Q)g9!q6Y&{q!s zT83GO|61%eUVn4Zt`H9D;U(Ozrg~uIh8QkR>$=|Ty%KF2DylZEn!_-il0n?l9d1>- z5lND>bWuBdsmz|p-+S!jm#+|8Nx6Y3_l@UMr2G=iKqXay7^`*^2RaJj;~**jA`x** z0Sp+xx7y<>IDkx?vpC*JQfOm7B-+6HF2q%maTDtJUcqoNCi1ryp8{%R8VPh8#k<`0 zgbL*k)Y4U7q2)nAm;ZS;uhh6_a?#Ts=f6zGD-caEvVoTW9L!UzBJ%;?dxl052cmK@ z>P6S#V3*U+9j>kMUoc>9m`zzfchHFH5#xG{ykF@mRbeYA^{o|#%PEh-HNox=wFCH* zbi5Fe+(As$96a$#8JeVS7W$DjFNj$-BH~@Kgd9x(tE`M zq0m4JedG1{cckSIBMS)|qtaWA;5I~dk`>EEx{9iglVO;;3S$YWqGbsAC z)5L&!fW(*n>%HeuPQ+8ZfPx4X3`>)h5R>*Hon*ti#l2KO?X*p;a#)fTFhU@~1TE8w^JCKfq3(K}|C>E?Oz0t2Bh@0yWm)-Zi5&QK{!Npbq6z&D?K?rlJhVN*$CVhUT^ z=Uk;2v^@+4 zl$D*mZ*zB7X3+Wkfg33$gNyh|*0Ce|Up&r%$l1%6ZbuMsdYU&pEQU0gs|jVJwcs>D*Vz^{ibm2D(@FQ;fJs9#o-9#mTC8%qllbkKh- zzG{*MMT*@ABM5<50KrpP4S-+WQ(YFIy1rhEl>jQ!1$=>WR*XDU2*c*oZZ$%EF)#B> zmdq)OXP#PuON6T2*j+Vhqr;0U`dv}w<|`Gsz8*K&Xmtmj%6t>)dNACtR_6lRO8tVJ zyOqaW{A$kX(A-jP<>gFoYNn6*XYDX6thLrZ*Ho(#W&T}EwXDJR0#rYH(?se(?i#42 z1m66e=i%u;Y#vJHY9gYS@N-Vb*e?ou)s5=2oH4THLY*-TK2WbLi7ikO-gv#*ALFCw zShV^V88~X)`xP&SfiiuaE^mg2!CF+pn2CWA%-7#|pOJ~j7)}%~x*Ft*{?X(vJS(hf z{3lrrBmGtHf*_*}?ho=`ZTY%r02$mM-E=F&!|2T{B_11PQ$6 z5TI0;4*5jRc9V=_aYno4;A`z!22aH_ zo^&{ChBeAtlP18Oi8p1Y+5nEL#g3Pg2?wQ$qbB=R{?W6EqmC&QbkEDl+zhm+0>9v( z=>-Mohew;B4hY`EaGsd@c@z_1KPslyEOgKMp%qc5^J8PHzRImRk*zq%;&Cq~bVZ{y zwgaE|=|c|iB^6wyO8}gKLVxHDS9gqcKC7y(J%HC)3q^PwMCOJ%lhSkw%eR~&XMKb| zW5VT@ueuS{2ef&3ytCKa`Fa0z=O^Nm>Q2|$o0F5%{j;;dtNp{18Y@5;{q|JFy@FUk zG0l;(+Dzb!=CDFaCIQ3QYXm2tpo;(M5gRt>o)KFH@*g{5L0#-DJya*BZ!l&~PJZ8r zZ#|FWf7o~h&mwiY+OvGsXfwek*<|e5CDEO*yh-rzn~(sm?v_E(y!b~#x?jWr|~Q1MOQ%ku@dDTn;@e$VoMZcI%w<1l*Rcmm&sA+SQ*eZXw{X*?|If{(MscBb|`f4V$htCEVv$2#LjkQQ3@&9awvqg^< z@vtZ1E}zjai3ZoBhj|Z1Nn6u&LXY>SW0d`v-fVHKU5U^~rBEFaBetI*ZuR9t;^jTt zYW(Kc&%Gbk$Msme*4ylF^w*!Tpy$aMi`kxd61Umim$tD^pC6t*!@nCpK416$rO)#p zpFe;0n~jYho^C$>;o15ImD^l@_VmZ!MC;#jpmm=m7_}(+&2XF~SMjvA+`MHo;xQ8CKMrSHa!mz+`vxu+fav)28R+`qeF}!2;yPV z9(AHk+7Dg)-Q>Sd`R_CS`<(y&z<+<_zyHdAe~Z5x>-=|v zsRSX{sQbga=xQEMIo;JI>xV9Z;pZ@?N7jt#T{KJPIf4rhk>L-1E^8dc%7kM_>e7s` z3<7OooOIu1Bgxe=&PE(oSoE91^B|HqL1><7oo0s{8Yl6@NO;UEj>(pZQ4HOLnon8m z#ibvnz$Cp%jcu37SdWuA1ouU(DijwpIOxY=?@oY-}jpp4U8-ATKMBQQxv$rSZy zG*2LvONeIRBzn0~Vw)azMOK#)kQt_Sp_9dJ-(G9fC_5gSs!Kxr)U50fO_N7$O2QGq z;h3lyEI4882)jNG8i#wKArWQcA!6H%?3SUsEHzR?tj5eOvcAf%B_x^7T{7ldg=rx( zt~p0gSi~&riy#e5H4lP6g1&U()qZq#{PO(m&gp)1a2B1M9{+r>x4#!Pch2ZH-L-EI z&R-qBIghBs>CVylFVXSKXy@n`s{8K;M|<69|BrkpM#rbo!RwR5gME5-aI|~)X7AwW zr|1RMJUTv)4i8=*oDC&X72Lu8834ZCKiz#rKX+an93Gti(j_A0{0LBA64;&S zWasq!VE4`8&S`Y==Je$FY@fQYM^KKAj}DGro>EKuulJA6`_wW$i}rt}AJN&Xox?*O z;m#Wx!zuT1_xR+O(}SO0oky>Z5BK)z;R|%<@L=Z!kwx6x(J#^N;m*P9ZnU@adgrHo zu6ImuPPv%&@a?O8enc(q(0`oEl>pITc8`zFPw7FIMtgd0E5AKB+wVp@rw3=4n3t!= z)Bq-yY8*2ds&}++;9%Or)uBT8{pM`n^=EH?=a8VCp}JqV-(acoU~svF#pGaM6#L`! zV=6KE27`Gb-{~#!-uZm=B$-|jCR}k%M$ex<|B(^Xll9-yf6?Z~*3*rx^{0{EA%q0A zC9W6z=XRe2lqxXVe-5w7c4amSG2GUpjVDhe zLcBVl|LP za0R+x6N{N-Iw|MJzPz*DF{7Dg(_RS)=or;O6+AVGwS1fOfr42d>>5@L3xSnjILEnO|F_JyqU5Dr_(>WNx~}RvL1+#Y-YF zn4zuzy2HII1z_%FW|a8=gG#Q_kDMva+Iu3f_)P-SDmkhW(amfshXxca7&s;)NytdG zJAE5|nFiA3v~HeWJA46QwYZFOoYV%ayw!a4DHMfChs+)M5?iJ%@_e`Tc65^~ljrzpkKFWb z2Xi@a>UF*NO-A~>YgW1&xtp$-8Q$QI?TD!WJ=t>M=Iu^c*lseugM(MNEA%G6m@wAgTL)HW(%?@tY2t5YV}(80SacbBHVqc0?jZy%i1~eLb|2xG?cf# zZ`*7&@O>bL>q59mUW!ndp_@?SZ4|G*#G7(jhG}oci5amKmFjzp++#tyT)TlZE>(!7 zP}~HI7d%8{+Ja728*f6pYNS=54so4uy7;Xc=Bg=LJpxn1UpF1d_+bvk#YfG0{N;#3 z>BI*j3X|-b`_?Jsb}sbMPNm|xb;W7H_Nl9*37(}LX&cgXdh_9y@8TAIwgL}!>}+~i zIdYHIr%P7+E0jZ_6YwZHsVUvpSg_O|AZRxOCxdU6j0*!ptnrN4kR0uj*zFhxpFGYf3qmu}R6<@8vbxlMn0 zUy1LD4e}w<%F|$IH#U3C@=1a_r!f&@_wwSdGt^g5sggPfar5Q$KAdbmmssg?PO17_ z?Uw~48qB!)u-0)<$|i0;YmvsK_H3Xt6Klp$hGOklQJ2l7seE{UWjV0`baPrNAK$af ziu#wpxk$Y1Qh8>x#G@KybxSpA)XeL)sgqsRa}Kf`M`L@VJO@~MLL0zal`#+TiARei zV-+fiDQlO(1k;IrwxtqI-N11C9i)Y9mMin6N;lwqG^H&-}r_;wZV8+Y2NwA=EFXl)ynwX@#1@XD;bCH?_ zk++MLZ>i6NoNzV{XLUJVo7GzdpK$drAGGiUTB_Bp^e)Sj6d!m??%X<}2#%9+6d>~u z2w#EoglM2tQb~ujo$MdGZe=VXP$YTB(&oVEnN(ILuSb&SV*XsnZSFu=0Z`1W?qrfo zDhCxw$CmTDjf;d)%ee|ULf?_>q=#PK=qm`dZMqs{kjp?mt8&9Y)Ps%}7b#7Rg zTbNd>$n-_OH>`#lt{B)jy=3Xwg2-QOJY~5G2I;IPS4Kg4E zhKPY!rG!;&X$3aNr_MVg{0W3Sm8vh|M7?%8>~ zdQh56An^ho=_)a#xu|LmtH5DagB?0NM1AV8#sj8vjR?a<;`^d7<6;y*nrHZWjzmo> zfY|Ay%VwC2Vk|rcu|Qg+X~L?d71h~JbT+rX=*!DX7V)P=TF|j8)qwnqM)2aTWEPub zSCSd5ekxXOf%W=hoSAw`beY{~@83?D?v~!}F!xk@_{@q9ldE((O$(Xc9T+0h4M&B| z6Nb)7qe*dqjPxBOl$Nhy~g3u4b#_H}U*~N1n#hIwB=8I!0CSWTO$08hn?|<{7e^3I&wb5d(p!Y$|DtB?@iI z>DpC$6faf*kr<k{3QvFBzh_1k1Jsh-Gc-q{G+|LTiYwl@A&eX@)L0|c$r z;8@r(5E6FK`U0FkP9`<5FC5&jBr{k^{MkKQ`BK_XMuzBrn6RRv_eH7K>LzPPa+ywX zKP z-UtByM6#eXdPH~<*wDKcDyQ)4WnjI649r8;zCItYICFV-d2K9U?Z?SNH zAemI#tZjPj%irb+sCu7@z_W61YjKRFXXX`@OtEmdUZo5IZra`&YH$R)7LplVC-jcj zO+@jmTV_~w_v;dYGC`PJ+6-Qx@P-MtypjMl)Jh#EejtL*p2TU7Bej88furArrPeB2mFgE|Ul$~j+I z(2-#(G&5K%p0jmAFyF?ouZwK_ed{k0*-{0$w<#F^r1U zxGQR6c2+|*w40*&V)#YXmsOzi+iF#pmNA+wfg{=@w{QbXB(|}H+s?Dyso*u#&(sl= z8sUo#3uO};1FyoCXy5HkRL` z$r6dHjQ~&L0*D=#XX0cX266r5$fj$pb|eI$W$Ks$Dkut!B>`-;;F#6rZyH#D9}Y{_ zTwH;lO9AOL+xO5@&i3nT0ztfh4H)_+V8SwZ*;xQiicFJF3$r@KHVl+xnW30You9|Tr0}4F$eMPd993V3`WE7ek#T|Hi=l8W{AoZ!De60<6mZq;;XRGWFff0z^d*F19u{-m|{W@kITC z=1$>o@r{iwRa=6;lMoL?Zy}P^*1Ks7>6{8%LT@usb5h^C%yo9z(y1#cuG6WpH84&3 z(4z>tx2X9D!Kj#HeKa<0c@Vu@Gpvo^Us4>6={b+5v>R}*) zP;KJ`2VtmZjH{M3HMGmPs~Ubsj5_tEO5G8vI4KvK@Bd2=2V7kEaAcUJ;>fll_-p>* zUmPwi+cX>~c2T?NaZo^l}`L^#H3cn&Nm zCYfg0$4;$i_GTMy+a3q@(w;SCyJXkj ze-Y=wmX5raaBC4jvL!{{lKYo~vEi}G2hc9Fc)mzS%Smhnptp0}>~mjaaT%-MjYQLd zuCgT{+Hk13HYDjV=`cXkqNo7Hal)WhArYL^ltcHGJ84|5WK4Wzj7AzL z-)4dHu3F|}U~E7?o2MT!-6ma-nLDLZlv+6nOZf-HpMI|n+oJ%nCga?OAQPLroA^&4 zz2x09Bu>?X{v7*hG}A;MJ*sT^Q-TP+RW^3W3+9pO=W`; zJa#ETd?*`yv|I1S-?!|vUTnPY^sk7!nfWZHlG9qI6a9WWdRmCsW|!$6_?uPm%i^H` z-gB4GN;*m-<3>#1$%53y%k+m)uLaMZ^s<1#=;>4 z-~!jE(XTD8iVOJ?jzQmX=G>cj20O7XKP)PxrXuw- zHiYXButwIIaU(~GedE$7m(=J2mFjYMji-LLXEfQNBdCNo$awXUabI7&THY=+| zwqm%BX1DPD&FGro0O>Re$RLyxeCG1fi8Kqm$bkBZUNznk!7A6UFb|`!#CD?fTe#zk zMxTYs<``+5N^b&SN?|I!_jd#1iRr>xTnK#cX{&=P*1TX6Ngn(C^2(QdG_Z~`(&%3T z5y>~0dA1>h*hY*a?pSi;9YGN|_}wd5M!9^FZtz~##He6j=-Z~K(Xra6l#(*DE=^vR zhpaEU!isvXgmUY$eJ7?2mup!HTYVT&$s1@#fp-eWqt1$zxjE0v!1VB8=B#dFc+=fy z69e1a$c*p-z4;8g+NswEHkQ4dR@;b9ofVGhXPq?rr)0rKZzapNq8(0+<)AJyHRv`I!iv~HroO10zg#`AyW$YJw$bo3=rsQ<@3G=u(XQL&_C$Gz$I$a?7H`~X z3ak!?eYlU)F`-Rqa<>nze;BiBRr~*Mc3V9t{%v%TTzK8U%w#Xv4xaOrb zXiOhd|5wKIdlwhh2-l=CUWt;=Ufe!}3 z9qIu6n{u?J{+F{bm^T$yi~*_rx&$|bCi!9{kPpZ7om+9i zJo)pIkmy+SOLy5)WGJR6)aX^|3EIW3m}{bPlDY3DM*3!H8;Opiejhz(@2H~Wft#nq zr&MvBH|c1eVTC41oUNjAlF7_m7f`c! znh2`k-E;|JIC>v&(O>t|=MaPkH=pC%AKDj~4Lqr&Ts?*#bNJyl z?3PCBZ#K`3`h>Z_56RtaHXjqF&BZu|s7vCIAkhAuO%>@&9!v#I$QIm@_&mUZZ_hR% zQ32AqG2x3i_P+sz(ev>iBoa)e{i!PtwjF9H6IvDGn@<(|;-^6T22kDTQ|BQ>6>17v z`VvXOonbE1ku#>E}QLQ+CkE)Q$XH!04XT(Qam=e$=LU=3D(jCHWE=%UraXg#2Q_~rWQ75Uhoz%3sIvq zHAwRuhGm~4;~UbMgx4+EqG3=p;|V0A<-%BEwN_j?xBW(AP~L+l^X$`|4S`@n+=U#{&LeXkf!89yCmXTW>uVu^*2;do7V{F7cgFg53?Im+c& zPoB|^4M&$p&4R47soH9d1L0#*Bg-A$MVHB9bZtyToqMa4u)&lVLb-n}HMZ&d);YYZ z=o|qPh%L}fGDgs21~TKRlo5ETf6aNXMrqm|xt&a)Oyk<6Ca|AGhHP?x$awfpG6=_- zelIk-vk6ql;o(_Sgq|3OZv1u!zn!PB@WO-`7$)`}i?~tsW18GL%TW$0#n{eH z7m8^wDs<4e7r(F5UlFoPMmLZPRjBAd7J}Q^D?|I(`xQt(QMV$96bxTrivNIfdcjX=Lx$d8L10wGH?^Cf9E#Ur z)S~;-V1NOt{jpgxWRHjk;tAWn=|9KaEmpVWW`Kaxz|ckzb>~6i#kJPvF}50kA^e-EJPJ%i&~cs0**DxdvGEK=gxJ7-0uuW z>)80>+J+(1Mur(Oqtt66oqnVcy~+~FXYp%RjbF2B{F+tc*Q^@2>Za9&*GR5X9HP{9I2A7ve-xyu9jEbE( z`DWl6i7_SD!?a-87S64G?In`yd|^8(0~Rl-_!4#lw@K?` zBKfJu>Yp`|yb|KxrP=2rV*tX}C=&?7ge`($^WnlHExb8lTuudf%<%+*U?lgBCwG5I zG=`=>HGJ<%T@!9=O#4AF+d|TbV}_Y#p;_j@_#9Pg$>I7mfK6}2)+5VVk7yO7NVrZU z^t~=?&E^qveVH%2sKewk6USWLsEBd1X11YEI_MbC6GVe+48Yc^c?W&{7<~`hR~0vi zE!r6!^Rt(SJ3pPlCcV`$5s@3jSyptCb_4@ep}WsTYJbZwjMh-ZDZZ{gsMz1?Q6(x|-J72rCZ2Eyy`IyAm8QBZA4 zpvt5^qRGi0l<8*_Pp8G!Slv;01e)MMiGPA8mL;#rd(`lkqyhFMs`zZnq~CC%qCY7+ z)`_r15H%|RQjLb;r;)X%O$jwl7G%YiVb-9C^PuQaUhn_*hyHJgrsn8VsddR@BojxI zY#{>5u}L;mjx9mZZwV_+?s|^K91{i|eTc6T;QaQBNdkLoBl5Srgo!Q@g#Q{{n*^`{ z=jTGQD9J0XlY)A3^zJlf{KFoILjv@HzGgw#Z`DTSCYvu*^Dd#3h#+-}E+y{)*A5-sWw2$fHQLd8kDAX2fx`fxsjo7Uf z`e|HzUFH*s$dCbG;cBXwr!?83!Vo!l<7A#)5qp={9Def}E%RKzjME9B_q@vcSUc!*QTtO87HqO>%TTtlnKjgz?;{mxF!p)>y5rPrJ1a* z?>iIVrf6}Lg`zx45Eb(~nCAsuu*lha-B?Qz`YRb|NR(3H;B{loQ~U?L*3yO7jWwk; zOGDK1{_DmX%uTsl%W!LHVed7jBO}}gJW?|q`ns_;_hJH9l=NRiu;br;f+0Nv6C=M@ z>7AF$qtFSjPT!RRd?6&F(q}s$Gz`5#$N07AU#d&9U63u**uhoH1i-$F$>7|^T4}5i+BpFOQ|#KdM+D`H82h$x19{C( z=n~ya=Xqr{A*lL(xrU*=>L$UwcSM?o6R4Ore;PDJhqhTe$EHv4OKE^?E5mh#@m^@V zx41VCN(edJCRzxa@C8*e1YcEoOAPk?lC*-$n}=2NOEzF0_poYy$;OG0T&0@jYK4*w ztmeZS^h<(Zb>^2!G>HfrZ0=PS#kaBMh#bdveV)at8*@7(?)$DB&)`F#ZQcXQbHT=Ih-n{GoC?wbU5>6FQkn~5woc|r?R%L&q)Ao=x;DU_`%b2?TQ!>ne)R%cO$EV&VCMkPgO_01CuAWSkO(5}5wdib` zG0q0lj5S>=tut&4Tdow3$HwTx1m{TeQgS(Uo#$7sE@vbuI>bQYfkzix?}@88{pk$^ z>}RFf{OjNF^6=p7yke>^-Zx&nIXK)KoKughgTsTPeZMrr*xP@( z^XBlp1jQsu3)@hCSJn}w%w$&vixjONbT+r(C5_5(0TUBZwHg2+cn<0j=gMY!i3`|d zK~nFk?#h>_LU5c(B-VHVT`auiI6A9Fl#f0j?QnWo8V}@Zwl5ktGiqiW-720mqn(oj zR{9L@SYj^Lmt)aCrEH1DUM9?2-)7u@mS&j5AsQA`^?SwfvXljO{u)l!s-|poL&>r; zm`#@Xz*ydxiQviNI8_8kwXiM_WjF|pESrmneHy8DLg;ojK@%bMES?(y2FAmZLrue+ zMM`->7NK$gHcX|{Ap(aK^5E=tYI{g^g7NknRpTk_R!M08Vd3xs zKHPV+sPl!F4zJZ!XGHX%iWQ4g)5EHgg??5Rd0?Zq&-Ve*w0ULs@4>4$AG}DC z>0p| zP~Ts%8uYiUN`i|c_Fh$WhTnf~FZ~{xV=F-)!!v_ZRGu?@z!@;%$+j`ZP{&%!F#1cU zz1bulB^qqEP@k@wJWsEt9N+e4wz!i@gq8|3(Dlu73qU-=gG1ahg(SwaCX@%~uoc?` z5GVRBjOc+YztHN%R_LEpbzlo&z3WejSCGA>JWIrTv%MDjWzb+D$8kM6YF{kgKcJtr zHOf}Eh@y$@MLQU}zD@p(f#AUx4bZbBIcR8MF zzfqZTU>zVae9QPtyq4{Y_g!4k`L?}{Lv&me7ZmDCWxbn~J^n1np3utGwNN#+iw2h~ z#&Ka&ukpAd)t!qC!?Yn`5+v+d&a8_-5Yy9zrI~wfSW`6?sPgmZH`KJZ^v-c{bj|Q^ z*P_=7-XhDQSCc3MmGBfq)l$$cG4pR#M79WD`LR>%n@_0&zz#U(mmeR%Jj2N_-^CGG zf35lPtCX?(|J74fGBsg_bQ4B?+z90gIXuXpxUxNo;=aLMiG0hMvoe7F%vt_!;N+@qj_^u`P#%xt9QK+g^FC z9c6xd_=)bg#sk^66)$iY391lQh{;Aw(z%RZxB zZnp&MLF_MSsqk~h0zXWNSBdR=HhMQdSUO#p9$L~7)SKqFOZVZPaQqKjg7+Aa6aE~W z(E{wVos)gS=iymHP@Zs$BkO*h{z{CUO=%Ax=F&4;H65 zlyGLeJ53H$v*maF;q3nLkN(fSpAXE}{~6ES*N?IP{gqpS;XHQp_xhRrcJ}kx$vpd$ zH%-Sw|JTW`1#+_Y(tf3*c&13eEE>zc>+DHLbCVv$9Av3DEQk0*lBv+(=?e z=Z|Ym|8Z;gdYpO$p~HbH@Y#Y(>SR2YNqD1o?@XtE?{!xELpz@mq9 z$-&x2q%bU^S){|1FvlJ9RBqfxagqqRCjOD>)X1u!M-egB6}u>7ePR>32^P$12UmB#2!lB#v3LQoSsKFyJO=+T1At$c^J1BsEhAO>S zI{GpDpnC%=p;o9ObwpMgmD1|Iw+S%V$8^Jn+U;tPe@SYQv$0zBR*noSttV_A&yZk} zQ*6M5*Bon>q1Mct>|V*bkh}Ogx#x#1IaFgfNw8xuSJ7?WH*O7@wd-;c-$s%V%ZR&; zeHeUEWEsEfIb04~Eh98%xij)ifZux@Px9`q;ZX@3$ zY98D7aT?5~2w(xsR3);jsm2C|bn{F$KWgKO}W?&l@5ZHbq<^_q<^wGetQ5^9DS?h+yRA8|vG2MC_-;LiRgmbV96@ ziTqDk$N*yn2ghQ$yla0*?s5$~mv=Qn;Pzkt=9xr%=t^9}zofIEw5RHmY8M|kf}pP$ z(TIbMK|ERH$t>ce67n6MbLO#Xpd_6iGDI7@T?BF^#Q1bmgSir`f5-gVN7WxhoBj3v zhGp47YT6e5g}5~k*jH3K|AXk7Z9iI1T3!#F9T($Rq=g-*Y&@H{k&ysFa%RDhB`{9> ziC_2UNMhGUnQr7gQ23or!>P=^pqWAwY-5LbW^7Ai6%aZlyR>GF$JvQFDD`^XaqaKm7QwzwI}xPuRs4m3`lJwJLPo zUhyu1hhRRJv+bZyX(A!Y@Dd(NxH!nn&E@r$ z<_&4kSVYAESU8rlF<69q0cz`}_M5VzsCcSrKNSF?F^@nBLLSDoiH$`P%Z1RPB09*e zs;D0fi<{ul1qjm;w&E*T-g1;@ zo8l6}k_o5uPCmsf@n_+0Oc1X)J*UyW1DqHiwXNs`JBudiI-5wIR}C|$(Lu2VU9?dn z_Z;c69b9q{fAw=@`eN!t;9hV^G0HSf`6k!4O(q)FNF>JT!pXi+y&n;mXOo|6!m!`} z{`V{lQpvKNu8XJzgpoxN+}0zAwCQW47BYUVK6s*;+WJs6s)Q<3p`{7k!ialx9%0C# zvFOU@2^qH7c$wJIn6qrHdl?Fm8KbspD{4t{A)7AF0rC@O4xpEp$Lo*S;9Uy}#`A{`dQLw~znxul}#Ub-rtLT>Mf`3Jq8Y z*-y*mj^JxkZ&pzsWQksAohhIqNRgIp=umuZ8>tCIu*!*x2&ZG?Z=JT+#Xq zOsvQ>(5ggu*k{f!jIXL5v205{UnX^jNU~Olb>Co`+(OX4y-`j$#e34gtm?F1Aq2M- zc}8e-U9{d!Tefs5wFS6{d@;_J3r%*e%#WDKl`_p799N{6S&PmXf_kt>?6WDW4P^>1zYK}v=A5Pa<2Nj{CY{FMHrB`iD=R}27|Z^7hvdg zwM3?9Ofj?2X-?XFv&~YuIfICUL^)1yu!q1tTkEJJWc$lU1SHdVwdgEdV-RcYe@ zPLj)od9R9E=2*N&F$}v>8+y$#pCf0=B;EF2AYW}dQ*ukEmzob^%5ubEd1Izk#w&X~ z8qVV>k;00Kc)0d(WOGycmMB1(SWXXxQiTw+a}(yDE@TWvuXH=YZErQS09Q*El7_+K z1lpOKfBxau+}R-myo%+)kEW<)x5rHjH<`@VP4 z1f9+`==b|~(`KVl)5iC`pe%}Ni+1w8J5iTynKQmw68RW?ACz|=;*?K|<)occjHXSXt*1S0+wN)G=Cp0wwr$(C{k3gR z+qP}({x8qf$<9vRr1Dnkx>8kZt!H6Qs7dOescnJ~VQC=L9Z8BcIW7o4b!X6Z4)fKT zIGI=wN!TXUf!%f`7a&(NE3O~DTMBb25St)7pKgIzh;(R4woD08BG48=DGr)Q?VSc% z2-h}&N6o4(<)t2OL?lNAUXbxgw^?On1ub!NVEB zFFn5uR_Z|Tk0J;>?78ek- zRlt({OZP^U!Qg`@Y@)X^q@~WtKyrM08`iohfs}wnF&i(!h$POr3SR?UVVybd zWe*Y}11sdN=Y_MMT-~#ZG79=>M08uY##c|4Zlk z;2?9gLYdIKZxwk5?YSULwuE*{meY^r&zETWHol?3CZ2z&iZ10@jNf@%b@3T)W)8q^b=GUZP5{ zaYBUg@TQZ)Q?IW`NDuQ7$a})H&NS{r&x-yspQ&@w)Yiap;Aonb=b z-kiJ8+A2(vMU)cL_C9`wB*WpVly(%kXUuV|3_!q1kFIER+l%0-6l^h6OMOd0g4`&B zO^+@4F3!ivy|s$nxxvBpj0xrQGQWEpnLPj}jmEmbfo+eF&j#p#j^)C!8SBncLq=L6 zV3}6B)u5b4F-qdQF$)-Z$DP9NIZdaYHv~G`Z6L{h@XO!$NnY z1PSeH#&I&(k;kcuIqXg}smN#HX)!yBCBS`+oRT196gp6WY(2!5y^DCcJ*luJ&MLud zwWu~UAz!C1aQTcAs>?O)GCAiAcpagt4TNpwtvLxlgFhh}MiN-tc+?ti*<7|UN_AQH zf&R4NQFRL0k60B|g9TI|c8CBjTNa~R4FHIZi7KopRxOu1%V*#N1%O${i}_@lJDIJc zo=VL)A{+LCAvF$+mfrQPXHW1wRHx`!yr^T{xfP)j6oK|h|Kvss;VFaM@FXiXYN62S zjT{5c$jn@nU%6)A)HkQU_<2ygU_DCH=wJn(G`81V=?U`G0+UfEX8&;&bS#yf$8$&k z?+hVv?n@@^8x)Ag|Gr@9q;!G0E$m+m83B^2elFnYwR4lhOq!jrzZW+R8t@C4#>=0A*~9 zhMZ^CYyRn0*E_z8XkTq19>0)5pj+NQqpxWHRjZ z*l~=yEOqLFZtWZD7z|RiscX zwVP$VzpT)GI^s;W?v6Ll6kZS-kp^Ubm0)z%M+VLHpwt`G00RuLd65qWW>y0v?hY%q z%e(YoY6ny!v5?-BlWDksWJ3l>A(j)(?G&Qi0tV?n6~x+hq3rYAd(v89Hx*ZKvc_A` z@;7@@;I;stDq4EaZKl$yNHULdRBUn#EalJg0ShzLS#>9&vjx979HK=WN;%RSB%cUk zNE-xt9IjDARl7|sU6pW$GuXAl;crNx748FK#zTZF)i#S1QNlUbM=4@CztJxoB9DV) zX=LYIh8j^rfpc*UO(sO8)^}yI>wEJqkXWY&_#OlC#_WXA@d7BKYb~t%9u{XtNh5Wa zxo4(LayE*(>=Pz43`p|7{HcLI`8~IT9PYHxNzJU*J>)p+E03y9>e!0T+~$jvQkaM` z8;C@MiwMv&f?u`FtLu-b*&LlolB;qt@XD~$zA9l{M~uFa@O_T@uLE|-tv=zY^s(-L z84ia*fOxLCAB&8Fgk^*dEd}BhmTZ;E_rz3A3cv zfF|+3#5lFEX}#(7das!FjOZ&=(@bUeZ7fv!a+P6bIny3t!3K__wdn-#5)5^*)nshe z%ushjwf3H?Np~FrTbGszhfFjsU|FmIDcy^s- zg6G9#9n@tUZkrRig!r8c0J%>Pfx)jEB6xt1KsM126AMgejFjSU8N4ONq zh7~#ax1iGO*c(H8QASFS4y>`hIExT}Xnh6+tlj{WGbSsC>9~@Rf8ONpWv78PD3fop zxJv?1%i=5$@#syitC3jEAVmXyV~^B+mg`a~>=CriMg&k9Wcl1ZKsN70#d|;6ccQl* z&)(VXv{&QY>>c<7fXm*jL-uzErECtaUWWA(hSwy0?i!E<5EH@#g&54!WD&n5V%Hb6 z*Ey~=*Rbf}A*D?$Nc=Jl;4c}wq?c|_xPX?baVQAFCSAlJd~|z*2wF;)QbYIO9=GkQ z1_zBStC9+b6u2QZ;qn5n=;;T~%a-I~) zg+2CqrK7ze_Se{F;xmL|`+;4H<~P}@^%Z4I@jc~xol^%b@Fk2v<24y;OPLARaXxQJ zJ#UOn<157MA0j_)n8izDvUx!+hQCtdW6w#Qd5!lgg;*pOnXslsi3zJo9y@plQ@&vY zSi>^1Y|;8x;Su6|CHuw~f^6kK(b}e+GF&onf(HE$2DDQ?v-ok^TN8mQDq@SEU5^7R zFdpSrn6)fmK+SScaA(%OQ38Q-p8Abb&GFI|?~I2G+PQH`Go}R)Pi689{m$KzPx`g;3aDDaYOm%=@E!+gF> zJ~&pIG}<}$`g>GZa9FS_TQ(*xQ=Dn4hk0>F$C9;=rA_`#gc47AZ&!J)p;-w>Qp#Sm z18ij2^pr!_5f94rYPIh1B&|(Jl4)gDw$ZR!-0IZ#bpk>I()ztmx3gR(rVbO32I;yT z#{O4VrYyFd3|p@(3z@`Ar&;}>Xp?nRsMs(~3%h@X;yPNFa@z=KWh*^;L7M>~U23Fp zk1nL_KVVr~6KF^PVx;$Ubvp+#L;wO$2?ua=88@Q=KV2ML;u{(dB( zos8dJZr}evdM?ZzoVg`SzH>G%efw?i0k*jr2BW(o@J7^tG~P#Od+JRELLZBW&st-~ zo_+H7TrGz0axk+*leb@_swI$6M7PyEV?qQScE}G-OgY8LGh*rYRTWuXcQ6v9?+p%k zZGGL}<8-_nAnRQlF!(Hap z4_SfAb$|hgy!eMm+*$3H_jhXFPrP|`^=ozK;pGMAC&@lX*X8UC7whT@2T|+lqKk`j znR*6l+ZD2vm2-uk4ZoFjnXAHl@U!M-LiP@2X}9GHslfj`iQ4T)(g54=wL7jitHh9$ zuK)cT0#4Ad%qEb5`WNz_s4(N`%CxD4w*U)POC`6$c_foFnn$h(^50)N1L_~DyNs%C-Ac_`NvO9EnUZl zn<`6ClbU4Aiyg~R@J zK7w!(3=Ft7w4I2i3av%cvQ%HlTIUHBdO@1VJwX4<8U`(9y5 zPZkV&tRPH1+=aT9Me-8o6LBLXFVQIhZ2fM)z>db2lidf}L4m)$EUQJ*37%vpFz`MD zeAR{EXrg?Gh$l!gxI47%5_HcUqTiq`&0gmS#NO<$46UY)1rKN0WgfbDOkH@^5OU}? zh@)4@rxFytREhsyiHgoE!McY1qHzfdSQZ1Cu;SYBkpSO`;UXs}Gx6RSSCxt*q{RW# zdxP70ux+4a^^3w4U80Vdhuycx(1i{=N{`JDT6Za<*4i0Sp89CUM=I0g-`09-C(UlM zT(mdK9KXNY-EQ~^A6`OAWW$uf@^K*CLjzoc`GUO)ig8iILb7M}Yu*kKE7TW`Zo8UE}q9Y5RW1zW1E> ztZ;m5`#wMFRQhz%nkr%@`DilOsZjcA{MGnNi_>3l^h+;c(;O=5)}ou`%0apHQa zLs_WZ47CIV|Eck2g!G8pz=r01+Py~I(AZnv2u69(k#JkEm;>qS?4{o#RcGhVQkdcx zEj+!`Nr!DhYgE)jf(>ipZUud8Nink+^yS1|f4c0U4fM#`2+fVSpAy<4EV--3V~Qif zt6KV(Kw%gim0#ux&QPWj0=^~1?~K;nxsd5QUHKXGLcDL3$3$63L5oK*w*94dU3i9` zk4FUPL0j*zYNI>LnX2JC?ZL=zKE4s(magxA*}BcwRIJuRG1Z&ZWSqn)u@))gCs9=P zy0C^*z$d@^GBWQ9Q0LX;$n2Ro)j>-0xCnK|PE1KtAx5g2ZfX+lFwxix3M zq_cd(Ow@Bq<-Cu=TMJ?oJu;>&ccc=*D$(Y7c#Qe z33BLuh>UCuNiZ3!tO_-1Vwy4Xqr1HE6UIq2@eJCn>M~eCZe}I)f+-H1AaG_(-)R&| z7&$HrDE@DK$&vRnwx6M1zx5`2ks-=Pi^sQA9Si?STbtqiE?YsFxJSED*aKB0%2xvF zd)*ccstAv%ri+YRNRzSNPy=wJM%|cD3<$G{FxFJmtbKVS=PRLoroH*oG@`QPYMHDi z>T_@RqZ9XXO4wY!>%XOv4^VeNLS;2H^8#m~Ls?vo-}Qaug{4zizi5xH-MKQw4TF%&{Z-;aBWN5lcwG*paCISZgYJU04f494 z7GvL(sxp>4&f^F~-1$hYx&i=r`*1`O-p4>U`EQ{ED(JkD5UwZ8-;RqBpMJP}KxiwF z$-#+Dj-4K8*c?G7_fgH}som)%xj8sU3Eu~r)*&m`DHaq^ne#TCm}KDRn}0_s`V6+3 z$Ce6zAB$K=*26@rxkhE2=~9)@*GF}=3-WF>uYjBCYjZ307-r5V8lRzVCLBCuG>}4j z5o!kGOh{UxLDWzpTD&MJ0HMX(rd}@*>7b7{39qXoV6|?#bVWQpcI9fhT~^?&>Mx@A4Hv697#1nn^52&E{yd4yCZ%zLOIF3|Q4d6F-RDo3m!vlMXb#-~2XM z9PP0Tsj(L;w%_Khacde5F&!=C#hZXM{)XdGKzalDOY#~;`T-7YP@eTC^jOiYQk5+2 zYDNe3dKf5&A{~1ICUtpqtz#i$>spDL-2PU?|a|i z6$zlJW-_AKwI^uF7(oilo3NTn*oBsD4SlXXW#yGpV;T0aCugbo7Zhnv$ddDZWuCM7 z{ifvMm&ESR=K}(TUN)Z4w1P_(GqT5-jyBF=p|FdEC0{t^F|-a5e^3)Vz*@zcl7WnN zROI#AHjy0IN^lh^3k{Ki^kXRPTdIYnWfsu2Dx-(YMnRarO(Jd|{(FNwBUVD&FdFen zF`^Y}m%RD#)@?lCi!1MmpRV5Sg)f2QMEU+9E6wXeL!*<2drQHccheEAU-UX?{Z+%m zgh@oZ?$+T23Uz@)J(o6qt(xsF6tN~|DdFxmXjY$Li0xCX*b^WFn~!qzfIU~MJWNv9 zLA7_u(iS#$z!)8`VB6cM(yGUw?7{F^^4mXNTF;?oo}^C(!;hQsO3y$Oyag(B+_la5 z@xFf6YlWD2DRn7;0T3eZbA@W#sivDo4`86ES?q|`7}s`wYsFoA2_5E>So;mW7Ohcj zK-t~%{6a%Q6z0Jx%tEvX<4tzhvJ^|svY$-lLA|Q6BaU!jE$?#}6&~qk6PB?M+&3Tu zH*7r$sXA^MjK|q9D9`F_oSdP)JLw}(yoC)PvpJBv?=FH>i`&dOZxuYsyR`3gDX`^< zP1RuAc})TrGxt0ULcOeh@;7S|QtX=fYlBP;vqW7~;^3R;^sv$Gz@YK)Ognk22sws# zPoTXK^cR~MvvSa4LjA6~!IN)Hiqzr-q`vn!$|^@9u$44*XYrt$2||TNKKW(ZD6|n# z3ItMTkpaF(e!^t-B6Jx`w8ax=-v)cx1zT*F9F z47~l9JHau$KgW2&D@7ap84o{R671BBaR>pFj-5}m6s1?&N}A*i^2E9XnfUbTR7Vm) z7ELJIXvKg(;?rL2DZ59!)HllZUmKvY1`lvL^@2d?auIV&xWcTd!qsfPb{%RHP1&5m zSF5$yHnR6+qQ;fP?8d}G@{%KD|Azx+%;Ls_vEjE^KHm3uKb)0be?3lDk| z*U<@#`nG8Tls3BSt5+)^xZpnHsnbZJjrc;5d_$s82Xg@RKfN zXgIE9^G*&7r`y&fl52Zcpz3B8Wu(u}F;l9ulr+nvAlz+UVBLy3zx31F1+z3U>hl=7 zbm}TT`LTNl?a?I+Ph%N64#$SxZ!)t21#ie&>zZ9zD!?WQHB&0U0fB@Fw_RtOYiah8 zT7c6DW2-|OA3+l9QSgR)_h!$UU;4l*3i)tCQnZ}RB~Vqt0}99ii4s1 zb}B+dQ+F@GL5ZcNYd*~WfTq;utX(Xzww!GviOD8)G?{`XO|eGz|k-UkI8kboX?; z%K)6fr4ppG6wg&#S>b{ElI>>04#F#qp(R1!O)$W_t4VB-fdveizjKd5OghjEnFc#Tg`WE_l2Y#gbHp@WgXbwPP?Xp9 zpx;Fap$l{67$mT7{S}<0OTAlZ37RN+rdl^8=@2`svF*dFvCtvJEecbwE;V7&7#v^7 z0{S-&C%3ihyRlTZOJE&Mz%RfGyjRxA+*UINP}aZc5J`lV}-_W zuoS=?Su$z&tHWCT1l(fzZ+^o_pg|Q8-*EduWfQf-+q4wK5k(e6D09NClq!KAUnW}l z)lVnk;P5Miaj+!Y1cl${bG-t zKbBLITj=PJi%I%YuUbk^M+by+n)BSLjB63uvm6qy++$!}?n4cE)X$Ym*0uoozP`LM?c94u`Jp z#O3O=TrSaNMdYrMign3NOBc;ux^-MbMV8Puv9ZN+L&w^lxml|H^`3(aL|}#z=AT(y z^0z3ZXu-2hu*rl8FXp7jq_ka>$;PGd2*Fo$>RCplwt3dBNqmNmwPRNCW1@#E`Csdv zh}vBjTbVwQ&gq!i-2Do{yo<}7Wg&GSS+vMsB%X=uL|7;4+44c|b;>!C;$NwJa$TWn zUgVqf4bJtmMsKAsy8-B{g4YgYcIuzCBT6rwGWffO<=De2+O$fbuF6!d$SzU|L{T&u zVtzGsxa%P?OgM#6?ex?8PqX{tF3qdF1z(1ytJ%e3o^FNgR9HF z6RT;5l$ti^KIpH{d1uK1q62WhNjkEHm#R*@K+Sp{FSP7t72df7FQYMG#n7LRf*?Qh z?OLMPX~`jx!PyuBR{+XixQDiMs@6*X-B?3RHVRjz}g@uKG#5-?v8ixh5}xowK zP4J4)8EL9Y6o1o|atT-||F4Xf9Q$0BHRq`(j6P86b>^?x-;vQ&1Odx% zQ{z_zXun&idMoWDrOt6=neg(L=Nv0^xIJm^@7{VlFe?f<-TehBWG)!xw>N?Kbe+-? zjy6idRlu(YeHgHxG!F>-?py&k*~yJBr-QwaavY&lf}22xiV{yGE& z*B?EVFsTXDxQ++>$bPzFsJd4Dn8h&PRQK&vP;r)uz0FC1U$GO00*SWYlo~W;k7X2T zpC?`$v2oNSy|qPeFij>sL4cAM146`+w|lnn%l0#Fe=FWY;F~SJ!5r>&4x7$4M2dGP zFWiQ-BX?=_gzwrG%XpoT6G0X({&`cx@PVY=KLaMf?C%|P1gkfWbtW*}FAR&gPh$p; zRd$)0P{($o0dv#KN0)y}+x^w_zclTl??{o}#w_hox=yT-G+{C4I$W=#K+Aoc+s0LF z@4Z)CI8+Rr7aKNcFVf^OD!W%oJ}G6)gDv(Zjvm%04=emrWc;nV*}VE!ON#Zx@0>$^ z_ag7Yp=t>}G9c?fkG9d%0|(f#I=|>VLZKp=?yb}aE2~WCRwyl}j}L89z-7{|MY^w7r>}|NOVyO%4Ay-9Tw}W6G)s zj(X8xzKU|x_?#MyIH_m`9PcwFj4I%Mi6TuiC(!{Dtfbls4qZ>dgQF;8CTy zX@eJWzo;LS0`?!V;}yEQgOPAzozRd!XWCM!SMnOb#uUWN&l&88I6m)R1}@pZ=cbqY z8)<&tplnnZ3N9iPs8UyF4+aiN75t(l`(9tSamVcT#16&cKD9t**Rs%36N}k3hEhjG zW7|3sy7vKbBEnem+kCZs(Z@>f}BkEsVn(#y{Evvr8 zYH;FD=@iCTUbuEags*0<3J1mcKK0&v0~ry&K+fM1q=PQybSd-Qq=9ny_;Ta1SaDS~ z@Um%!<5D$s`{Y@(=YqmPxnoS?2v(A;s=Dx`8@MN9fCy2e?rcaiUtpCeM%jRovq`2K z%OAt^>;{6EqE-FHyGZt}%3@SFxICFPCds?x@gvfLu|Bccc5Hy4&LK%G@v5jvQW!ng zY0%@$AM%^k(nyZ?4zp1HGdDTJ*JBv6`9t{{5nCW{#t72>%7^6(JPoam!FzU`ruQDY z8#xDinZ$`jR^v%34Ejnc2Q`m1vhQ~svV>Jrh2C_e3HCoD3Jvex@y^pPV! zS={m%oWPiO_a%_SXae8VFrdD2;H5I^cjTFX7M|EM{BcLrKEP&5U2T^b)sS;SL!==ZTrN zQ9SUBmm)5QIUkm?R%|@E!C{NmYCm-csm~1r&@9yl@#++NH5XR6>FLR!AI8dI|KWA|1sW+8RB@$L6h>E?J;d zdF94)7}vcd!c^E;yK@Hv0W%KcBFYgn(xII4LHsK_(Rw8r> zlT*LBXi)e5RIKEPu_B;&@$P#nloORpkIJ7hVdXz>!fwpWbjqI?o>4;Hor})gf=qA~ zS6yt-rS%sqg841rO5GelsC%~gSdP7kD&Ei|cPcyk96zSCTM|ln;dE*iJt7T4eKlYE zWWwfQl2fMA)%kV_cnFx{?BA-vh--&jrp#+M~;%{%WN-D@jgu zH~Y*Yi2rGMNT&O~$;ZZBVA{yC=ZyA|&A_Y*$$N=IR?uLIvgF?Eqr)4am|GEk4xjH^ zAn;CHo89x0!qjV>c~UW@Tboj;?VoK^vO?{0IT{m$KyZ@;ILkx}hT==sGS`JBGF-Nd zkd)8{BbBNe@Z7brdgrnb;0wji0F%pMkM>)V5ISc+w$Jl}d3MqWvt2Rsmyc5)wbV28 zA+Iy$@D~{JLU@~1E57jz8PXH^?zPFDDphJ}3`+DP>J1_)(>Xdl*6Xk`2%^$87twp* zgq39AHdDrnaf^#z1;w>BM_EY!x-Nb~tpK}Pq>z?odC-?4V2xW714rp)Q?oiY39AAs zJ+*uZ=srCzM=#G?6_*`GHR8GORgUXMXX zIz<7>z*(G}QzyWJoAOvtE9J@rAn{8XpZ8`y2^03#mkv+A#6L!S>;jENXdU#?o6A6OE1o`Xzw^F-`i*Qm|3lq>Z9G z+4-s3pU_Q1kAQdhzEfK!OkDO2Kww3f(rC0LN>1u+;%i(u+a&G?Sy=0s~4Z zNsW3o-jX^U92mc3I3~F^K5SD-f+P{{i_Mb#p~odAjC1>lMm(NL&%r!$&3P17ulIR| zaUUt$UPD5x-6%N5WWh(T_$ffH$g8tQo8I5`830z#>-D?7fQ@ZH`HA%t+uOOU+S=^_ zOiaz^>Sl|XVKy%xw`Yj&&py+A^)0!*l>61%_Vxc#!e8|FL-Bo)t9>JfuivQ=bEqSH zm~hC2?W8OxHlM$8AB3KxNW2fgY?+0Ziu0YkdaQ|#Ry4tZ>o%=iMjE=yc+TozozQ*R zUau9u-lM*Gk9+uTw((!Dd+xTk_pXOIzc=;D>zm$@s_3C-TX73YY!qT*fjPk>qIR{N zSYq{6H=}8fA))O=mz@|$wBunn`;MW{GOT`Y8^@~-<0J?L@myqxnkIq(^-43&G|G<; zZ|qw&If?d78Vg$^Hn7Yz$uthYMkD`j4Yp&dts228n4DHLPF6%l4#`UcPJAF+RH<4? zI<%R<$%1)oh7Q;B2=_(C+pyvw>3_vin#VX*<4|TPEeq8jXGVv}CJzG%_mc!AmUbZC zV&p*~&k+&j-@#0!b!uq6T(^vtH5^GK%G~#on2KO`fGJI_ktUzEgEFux*h+JnXi>t; zA#gh!34$Qi3^7Y`N-CSgn9jF}|2b6RO8(PG?=YHdP1}T@Y&~jVGbbm&KwHs5t3w;6 z+FQF^L$xGi&O=oY-oo?O^ZuD|zTkef&$6z%voqP~5TOvqJx+zG5XWME zZwb9u!sJ|~#}FO1aIP|iiEO3hBc|^Q!Cl}R-ZB4c26!Ce9ay`gTiyMhoo)u#8|fO) z28Y{b^plIu^+(gpV+`cwqH{0irotLoxI|V!qA;GthxI%;%pGTZM zPHPt}GKbImi-}e}8^>plt-@ z#xHf`C(BQ9`o?MuyZnx~u?{!Q`kN#Z28S{Y5Xms-EN&|O&}}3yU-_xFA|liuNkCUY z@khdM+=UK2wMqe}S5Nd+7LI_k)r&5=vGKf@vDNxL`iAuiaAi@j?iQ=_|;blaTTdy9OmzAc}nL zq`K9mU|i!+fW3zCfpqkA7)UzvU^xWI*UfGs&$Rd3Ia{#2vd!R!2{y@ zCUfFJp_>ZS5HfvEqL+Nb!w}QU_$;4*a09Bqg%yvg02Ujf@5{)qfI|o4YvXm`yAP~@ zz~>t{yEtX_jWbScR*XcceK`9VTjN>D^**qIhn684O%75Vv~Zo~7_~E!e=A1;;_=-i zVa)_3u1Aaj5r=S&nMZUqilWuJ)&LCP7bO%cof`r=%IHE-f#9f``r2-;ZH`a#?@rmO z)ZshR-`m+g82!ynzK#{QnIpm*iUbV#rRRy@>H0MdPbFbHb0q!Dv`>=-8*C(fR`rIj zL0vvDikB2FtQb`lWKnq5r&hwY5y|{ckaF@aF(^<%3?_St$P{`9i8~zogfM#Wv(fjZ z{H`Mmr-2x>K!sbfzfghcvwF4%+zv)?kb&FHNy|=X< zSZDn{)rz>zd!*|Ezv4zC8Ns$Ex2~fv@sE!_lLrlSr z>?Yr_6`}4eDiOGD*`k@8QHa%aVq?>E`fji5QI0Q0cdX}NmiMw97JTd+Co>cb#XB5f zEAOllj75W|&nn(BShh=K5pb@h7pn&)LuLL&Bzr>75_p$mS7gNt2l1E14ibW`7I&Le zn-$@QuUuBzYfPNDVnxs{SlhtC(;m;I^XNxg9swTja7)IW90dMLa`=fE+Y&RRKgGWU zcYiXY63m6Mds)xfiRlWfJufW00=@9$ToeH+q>Joh1rY7=bQ66I()vB>@))%94)9eI z^lGP@Z>+;azaY_2I!l_vnH*el;dEb-o~{xA?Hqc;s^XX>ChP6+9 zqMqfNEK3LACDe+0$XveE3-5L4HTDM&S_Ko209+_jB5E$V;TdU7-t}@+66SsENCQrZ53-y6#$JU6?In@ML(G19~kS zb$P-EmQ0K%VS;ofK{1+P6_6Rz*LVD zG<$#MxeP&3rk5%242U3TLej=oz+q1mX)VWryL#D?8Uf_f5S&$Nx@tupO z)PhEO>Q_uS`~#+uXh}i?J81phg4$@HU|N4DW($-uQDd&A3!-~@g+-H5B6MMZTtAoD`yZnM;BY(~JT*pX3c{ZCp5(z7(d+bbHAdD) zmdrxf-93~_X|_S0S6rwbd?yjNOsXf@X{F65JXoB((oNEES{iI#`Dw+`p8oHpj)52F zpMtDEWhbYDm3Fa~(+k3Sr4_}}I!|v*wA}OIx0{d}6)X@hlO`pJ(Aeu|gjYxKV6lE0 zY`3MX%DUGdv~P)DAILr`kNW4bK6I3a;JF+PS>$wo;S|h2?$Y&uf_A~(2J)k!9Lz{_ zZvGAOp-I+VKC^$O#UMt>aI>oKhYJD%^7Kt;sGnZC`pb%ezDDWk4QOA-W<-oDqN z@{H#@*|!d?55l!orPfcV;_}8(Ct8HWtQVqEJeDlJnS?fVyYsZLTzZ!1rI_5FVlW`? z!|@D4(P5~&t9*t?C%)#(Q3#+b8B-|G2gzG&(%=$fhZ?&a3dVkZ{7M~0=p{pMg?SWt zKHAhEpjitN`U)yej54bbQ0&~E9dK!Z&bNH2?M*?Eb>_@uG!M6QHGFoYn_QG ztS_l@!(harKGb<7zDhbg}^5S@wC7WH}v9?j&q0NX4D!7x_ zEtwQZHaO4IJ*BVhDW*GeTAB%z@ivRfdt&{nW6vkFw<{$g@E|7di0-xCSM!vr8Mbr<3(Lu%0B7zDkv6x44?Mp znT6+^{ib^0cq?m& z(`A!F27b}((a35B+phPkf=&1BvwvKWDB2$1HQu-Q*WI4qtFD4uNFABPAlxK5)K4oc%1CBjM{M?0~4Vb}+IH);mcy`Ht_-)EnEoj&kG^3C5O@wM$J6Z9RDv5+IjE;#YXBAq~aBaw|}H~K7= z{NE}ux~b{tzL%ODO|x*Q$mlRE%#GFMcaLHlxlXF=$FU|C3oL+&;Bslq-s&NT?5d4= z>GH=pP(kE>x8zv3XX&RUjKOl#;TXB^k~E6Y&%b@|9rrUm-u>{V!au}a!AI51!zPxs6QD}~Iv_)R{JPiv`t+T1j-L;{ zi+Iy5<19Tb7mrA z!Ak7}c%-SAB{{!6y0KX_YWit$ibB=AONj2B5Dp>v#X$fF zsS`-CG=kLFkET0EO-u~DPqhI-KM+7;oPg801B(Z#`?(GFcMqy?Z%WlL-aR!VVMc`T zTU*rfBq@e;y=H}@!-A24g4N)s-ZL!57QdYL&NYnCOqap*LuwSAaYE;*j1@vt?K)f} zQ(x-Lc`NZu->B#Czw3iJ=d6ttVSvrw;W`!lc+K$$l2Mj*Au0c`GA z7K2a$%~n@uCWy(VzxqtEACf#vJLpRVq_=(@;fR~AIxLa(Rpyq}&DDoO= zXAZGnzi4M{gfo_DO+YJ|3?(&xsBG+qFW|# z!;kmJGu_B)Y_Svgz@&9N1X+$%BEVUk8kbc~{E9?aL08sJ2s3HB#}M;QL{q?cnp#9NZC)N6~PF-0}ByB@cL_6&s3tpAv6HSqrCb8=>o3NjhEN&^vsJxGd>#=e@SEu!g2f6eUUokl_0bN zzu8LTa2tRH`b;TDUUSiL4P5GYFqTnms9|o@{4Npd4+fSC=l5Kj)+FhCrkXS3FX%ks z!DpuW1a!mE(_*jqQE*BpQxO~jyZRc1nZjG0VQfRIb~;{QISU2 z|InRLW7<);wK)~62-t3c)sCH7SlZdL>OeeZ75v_iQ4B|q;V_Ms-1*nTH0zucBvJ)T zC76vNRLc2w1%gIMPSpG2p@l`HHY7Bp=_-*v-yNhW=A@ao?TV9&+cCAo1j3~7ZGR-H z&6PrlWIux^v?|0;Z z9hZWRA>P$Q(a^YH6uE_IY1p;gsv;9E^@21h2l|*5k4%tEJKD->wyE?wJc5d7I+kjm zqY+r)>Ed-9k3)jC!+?GBu$)H5?}+q@Sgj*p6zP=#Aj^jMwTQu`r*$b~*x$`V2>3u# z|0WpQ!^nm-3eoX|;}hN_fR^ zO5})^HSP@%74{bA^uwlgpyN~5z9Nz`Q~nt%^IYA8)*aS>oOqnqh9pWKM%kmPb;@>G zE`}?IqXT(_ITSYw{bnVVcg!r*bwhK#o?yE4_$NVAU(#sm8;quGw<2K;XZNvfUJ@noZVR#2xz+ z_JOc-VWWO>HHyBZTo%GVZLZDcOm*Z-)6Fm=^aqzt`6!TO2*50|Bl|{_z(6@@O9+k7 zuCQgoz>2O54#=DPL8*vda~y-ME2WO3VoMoIz>EwG1t%ihZ~W`u3XAQ+Yy+rTC|8MF zRvZxZ3SydS*NXDbc0XMX!o@muskjpw74_*hlX5{e$T&)Lc)ZHx1fQ{>9SFL6s7k(R z^YtE*!!GD5t|I$M7FZL4QFCiP6RAJ_94z;G{%I%~7m2_#zL&x@NY4q()i&dNMJ9OM z+?KEjYCdZHPn62cgf_Hxs$FcDrO%A#oRnYT!jY0!r)FtTNFi)!q1_UQjKG>+-^lfh z&^#!XR^(D0!}pWT zyyf1n65g=n3SnJ$iLB)#*V?=2pf3InyO@rYS$-6$!x=nn0-t6Q1@;2+7dRMI&A`%< zzG4om>8GcPd5K(-8Em{>gf+7|l2X_Y_-8O_FMAFXYs~=63av0fZy-?1O*11-a5ZeD2HUSk@AkYE@329GrOTl zUnSr=Q@vt|FT-$D$uusY^16Za(KNJ+&Aphps4pY?%Sj*E?WF(qGY~};nl&p?jQ-kRb>UYK{`jy&?R`UAp3EE)`UL6!w7 zMbyvuo;%O8nsHJr7|1&4Z&7mB9+HfjIswm0)ZB%o*vB6k*TW?#+V|_Vu4~RHs6moJ zjsF3iKw`flV4Dn+#ch&IyIwV?1oX5R?q0UWDuI2HB5&`c$n&Cqg1y)9lBuv(%U(!+ z^^kWfF)lEJyOCU4o{HO#A`t*UkuVA9KQLT$@n%_UPGgHhG5 zsFN*gglJOMAVvM>o$@H&vwo{9%`sK&=-n@-JkGg|bb2AzgU zRE9a957nlxS?pjeTB7n&UYPX)-dg$k`)ES|m9WS|!E5~>*x_C>TuRa>|3M$vH<=HJ z{-B+|At%B_o68r9LRa_~>l}QxKHnHe6qNdh5RdriyX(8#uGIEh7%R2@b=rPg`+-{j zTFo>u+ws^0zP~0W>tkUMo}CY7{exQV;ACW%7L1#_HeHyvOmSSrI9O~-W<#2dC8CS3 zMQ75(0s;+)K3M-h@sdw`5-*$E_cWWoIKSWsv-`4n}>hu*OC|$e{Dc66v!3a;l z!^M_d4q2i3CJu~$<&`Ybr=sMsijQ3ZD=;LsWz(rGT3rAEjt1E6#jG`zCNW*TTm}^Z zFCJ_kBLmj$6%N{}^$G{fWEv?8F@H6R`Fj|})XQ%F%dKMkGgB)~tCU^dZ#MIFuvgpF z_^@GMSe}iE15)%I2!J5(lbmXHZdeVe&P+$JAF?o6P@PIpzZ&iQ-)gjTf5(5d*zw{2 zJuPiLcT9=0N1qSt?b&YngZgTjSDee`beZu)Ni?(*Ho6|^o?gzu&jR{S3FrO4ga z733}LZKVf*)!iC@4gIn5DtH)}Dzk=_XSeqFm2CnFaDr3mX0;Fz3m-^`=X3b=TLOfG zNE^wcFH_Wz1?NSpmQ(}Pa##dMQ`K%MU^Duo)C%VQ`$~N z0W-s@MX$(MJQ2~qYINA5=^9EL_1agEc+h_i6&x z^`aMXEX-Cdqr5$w!EMcdqZgtKw<1nGormo}pxW}2kJr#1~1!S)Q zqr|dGd>(ktiVd{htylBzu&cD? zK=A$B@BecJq4%Kk%l%wQB0(w|V3+Ta;k>Ri?7cjVEjC9i_nBN%@*=j0O6uXnxhqC{ zN^UA_e6^e7Mu0jd$xStDDR5YKZF6^EFNJA(*zl!i$nx$|2Z-QtBmvK6&7ZR+AZo38RW*{ZWTSs%mE>;QX zI<|Ma0b90mY{#GqdXyr|V`T6AK^a&}?&x+<)($cRqXrC1ME>AaD%N@LORy|FWXOer? zjJ|7&e5Qs`F;)kabL`m}Y=k}Xb&L<{5?8Q&>U@(v@vL%*Uyb~KHS+(}$p2R(|9=)E z{|Oeo6vjd0-6lZ~bbG|E3O_drYS?uPB9@55P=%_g3qD;sy=G;X>3lqPRe%pCi> z6S}?HySpzBcYZqKAiw5&@2aH>&8DbwG#EE?kV=}b7-Ajj%WVvK@^DVOrAI&EivWrc z%tJB#*WV|C8mk*knk=eA+$o#gw7&bRey6{!zv*WD$aE+MPxw&mb6PEzB%|Q6+eP_p z`hV9ovu(e5BuLC@aC|>qAH}i;$5spREoy%BsJ((lJ+98|*oum7j|zsevCy*`bsbQghCiVrrC_g+TFEL8GUa21L!A*twZs6=tWRB|B42#T672l zo#3DIy^^AVeF((ArqlbA9_tDlgozUa`=@?}{rZ+HeHa0X=3f1#P zHrsv{<=5%uf+1}`+iLvgGe3N}^w@x?x7pw5uRmcYk0)p86;Y3$3$6OIPM;s1J;T2n zKR#dg|HbFovmc-RW@F=rr<>1zc(%SlqV;dn{OZpVnx!cE&2XF~SMjvA z+EiDu|9#4TpYh-4{Pzd``y>DTSN{83{M}gRzZ_&GW-(Iz*kjaiF}ad;P9&11;py3yr4VM!oFx>wMIW$d#!OXfLfm7zv< zY^<@<*X+)+6nRGbAM_$y%jbDEN|^-=G6AsUotQY#qia#~OzSi|+|W3QCx)^%uObtF z7~UQ8RMQv}KRQ{Cr5~oiB)v(s73<=WYqHWV_mzpv^b-FjJh<6%NZ9@w5xK`{uCfdc z7=+Yx(_9joJ`vf{#8B`mCpA5kuuqGOfXpzp3!N-(`}SG`_}cMsI_CK_Rcm!nGkL~s zis@2TiUy51)oCH}H@nU8pk&Js*F`;6b^vBnZjtp>ehr>)nCRRkW5#z;?2Suf=f)g{ zD*_8`tn~d6^raK8_M@}om*;PHPWPjOv*_gX_~(PY{k^EUb4I_L-RSMX`K#kM=Mj}S z-8nk{B|3f??Hv6=b^ra~Xs;XX|MBE>|LiO}K8+4upBx_S)3bx4-NQF~2S-0eFR13x z@p*K3@cQ7Kz@8tYNvf>i4))Ig@b&&F(nFl@yf`>KIRB*^y*xNS0@RlTb|*U7IXyqv zeRH^T8lAj3Jvlzxr!F9i!qM^3!O_c8YH9!V{?U1#TBc{w{?GIyI(xNqc*rB%c|&73 z}&gsD!Cg$boF*SgR zr5eW!hUy*d8#tKuaCN8V-X54uyI6)PiIF3CT0-sKs0YL3|cu=!=^dY`VL@=h2 zXS&{CUhh5cTdU1>#|uhx2Q3Y|+BC$wvLMfuIgVLx!wJcy#s_F+Zh6A3MTjaz`wg}) z2qqMH?ZPK-WgdywzR5vQx9nBrZvnU7EXB+FFcEvF0ApOV0|NE8y`<4IumH+$i!vA?pTO-XR zjL#w+rjvAWX9#rg8^X%9i8Q8b>xYifqG|xnT)8(WRar00-lVjsZ6ZE71?FS=^|%9z zu8&X~8I5$5W(~va#As6j7I0(`7&r@u=WkHzWKcnLm4w(B1$I@-#YT6R@krS;a4or} zz*I|7vHOIxm`2phm8ZFvC$6}vAHpE7!AD1oM2K@~n#q++Y>Pz0@CZ|rN1TC3vO-;^ zpI|&{SS$L6Iyn+?`oS@Ek)HG?Sv*$equsJETQu=_3$<3o+fJw9fpIYV=1T(#R`>i@ za{sG8_tAeEE#|9q0juf1jURvfp+x^}Y(D*!{`*IKex?6@rT>1V|9++a{yWltfPdh#G(z}gRpDLV6!P^MCvou z8Vlv7QghnT(qDE+VzW%98grykUC!7tF-Ded(^Wv$-rEc-NjyhbDYkNwVsK(dK5LFk zrRJsmM&p>RFIfx00o_60YD9KwbDnU_u2EvT%%}~AQzr%S--+~ex{1EbfF!Hr*r*al z)6APWx{S49DsLwvV<5)X%>js1%c7QsN?o#fl7{NCq4OFv6C5Q0!-2_`(L#uKqWDcP zU?oRQKjw5J&k?W;li3Kj0!`8x*78ot>~64pSD8WgLC;MPS(tW#)@`6$N?6;1GL$~H zS{V9I8{n?-sxZ#ucoMYVmhq&Loyi;4;4w+DtXt7W$K+MNlZyk|neYw}AHQH;1vWcwmq%`wBNR==M#tr7LuC3`I1bxu<dbmtK0@(2fg zi#VuqQ%<8~u3c{%Hq5XvDa@%wSId|Q1)CM=a*3VJd|Mi;VK)GINm&8AQFrLX*pTBh zP8kp#mpL*Ti{bT{trY{#u3^S&)n@*=XS2g0CS!gFi>$kBNxS{h_-J8kBX5J4O)qn{ zGH;&)VjibsnALqy8|OTcEKF0z2BMC`-i&j>AYD2yv_QBi?XXf4x-KSKCQl{v(p3g~ zJrjRNbbwPkgN_2=5rfYap@4!^C2$kHYD$0FhET-$(%EVe;Td0IYw}&;XX`eK z#J>u}zcnSutBHEdPz>6!Xp`{`TaB{_fzCI2#%QBF?$4DhI41BRf(7k9;D-pMNa*dXs@2Swnm!Q&|X0++!#kGd38VtaN3aCCQONUazJO8Z3_=h>H@6%IhVbLV7T?XmERZ-gV~b3 zHAY!}lMsn7yPWv8>6HZc)|>cg|5G?8fvr64ioT+h3u@Kmvqc8<4K-08jW!3TIPapbt7lJKNz$+X7d&Z0fJx$q$RH7{CbiO8+Dx^9+(A| z&&?FUjcJ;hdiHphCd=)5gKf16I%~dm?wtVR-UC-!IOcO1Hqx)bkX`JDQc!|5Vq@cJ zE?1k$dF04h9n{VExfxIt?TH0S=D{HDUL5Xu?2Bpini9DP!vN=2D_$0MTJvOK*2zW&3|Hw{VL;Oup2;jNL@-=$;r)f7 z*Sv-J-;TdK%*H_^Obr2M9`ylqo1gd3sAJ`=<;9e_cY75(?$r&pSh@3FNsPh&*=aa4 z=U{IZqus!t$KQ!{zM9FWWYlgkGB(xBVC`?}|G@n40;*Ou35jvgJY!+Vi-V^tyAIc& zFX%d|`CaGVrtA2zqU(p^urUal54vujuI&1mi}dn&oj=nsW3K!9#0)@wt{8%r-}Z}_ zhoR5QJ3Gzp-OxW2HguZZvyamZ+n4@!{08YZ9w#3;*5!&hsyjulwu_g;6$ZkaZu6C9 z?hWamyt1x}vn)?PalA2k;TFXMz8$rm(Dx_!(Q+;et?A+cHR<1$87osB?wvP!e@3{} zL`>bja9Vs2mS?gSU;3hrQe}fLQ{6AbK4bbk%_r&fgFLV5GCVZ3^H2x9)hI3V#9Rc3 z$I^S+;=H~f-d;gIF_G9)OYlXUa}98}7I`i#XM-)gk>bR$8k!_i&F znkR)Q_}lm{H?oVyn(!rbcXG@ZbDiE^YcLwry^JOA$uuM4k(QW-rjC*9l+`-hJw7@c zygEKTJ$P}rKRDh033AJWqKo^%{LgoRPx#glWS(NThUOqR?0L z!7<3z3z(@lIzls^h*lIY7a0v*{V^Ta!$UA60Q{Lg5@`YMv#$hhKw>F0+$=J~d_Mi= zWV*hny<%GmiW}4K(wc_jwN&bQS9Zytm&uZvHi;5rkqM2X*8k!cr52=o*Sud1{5GA^ znp8m_=&K@lecW{=Iu+Q{G0dM$7PzYSU{hC+2^;sTenDNw%72G+FKR@5eGpXSD9GLMC5Hrhlp)mq!U3BlMFG#^N6O!1WGbv_wNEi zfjViJKd>R2{U5F2Jt6)Hd+{&WYCT)#@zi*s-x}*|n>YiZ-2CVc^X0U3$ygyJ_d-Ew zUxpCd&nEH6Tv|4Eb4NB}j~2=bp+pA9%_4-F?u3T624=woCbq5xD5gW{yXzw{ z;(0SL`uF(g@E6{qCT`U*aS;dHI8A4gTtWh{GqZ%YJKFzw|FlqzE=Zaujpe`_O|p#W zk>7=7`MF`WIR1&5lx+AX@4V4gqiv6Eju*_oqa>L3t&)U5D|jB*T&HbY>vAtTHB|#4 zLLGjxSjco3k;D85(m3(E<(4;k&m6JIs@tIGy?`DsJ7P=S>G@^S2uUrewGBv`PCC5v7V^?1|vB6&S{jA1<|RHVb7b*=9f)*!=E<2YoKLCmdrOmInVMoKs_#&QO$-L9lg=6g3DAY6W}0NvjRH0OWq-%L>x4 zj~vsC6MdtL z5~fuI!=#D7oWxi8_BvhUjmC~m>>1CoNjONnB8OpO!{K)$4g{ZTunS0?X<@GB@r@UA zpS5=l^BfL%JdKG^mb8y1N<&_zb4c!JD;hPlG;5*H8>U(I`jli~$VBSs5bnuDv^M4n9HulGOT6lN|WXOf!P0spd^|aVET|b~G3@8?i{o09V{+J($}6Zlkc)K?_mXLf2+X zHrY5i<|+cKR!x#C!(9jxKhV(M64gd{daJmcYylAMGN2PdEHz7Nwz0-~Lrg+pPBNR6 zwszc@P3?38@vRazo#CLfI-PFg!ZK%bbTek3Rmr+Xa8KZY3zt}B3L0R{plx}5X2!-W z84{+b#f3(f4W?Edl!Yg0+{SlXtX~h>gt>kKe_`bg0Dj%A4 z#hzWz_>kWrn`-QFL{MF+cw4$SC*Eb2Nd#)Md=2%iwwxEEXD0#Jiq=&JwwzLfFt-|a zqYZtA@ig3HY$mb$fc%$rND{+JA>=rfQXFJ%s^+HTT--CLA3h?VWaH;}l8#LvJDBsF z9GvW9&Rq;fu11?aUj^m{6KL7T5=FmHqR!b zVt03q1Yf}72slvgz-i`3h`phEMr9nr9|vVFR=$l{JeLS+I~F5B zn4?Rg7uVVO-tn7rvm*-QVB;?-j@WY+ba)WGQAvEZu_WTQN7L&tju_kY@D`EK1C9b= zkyEk|bY$6NGwBk(SuB&yu!V4H=uK@eJf_On#7Sbpd2bP6RCk~>Z|NNleoxm>m$6x4 zhvY_LxQ;W;av_ID0byKj%5)@b68VkzUa-xDCMgC%njzZr;$3bHx!k1xw_{&CCg4m^ zM{CI9W{hnz80AyV#{Kh?NHA9ipKcIC!SZ?b;x@W`?k;1;ZNtb37+je^3@5@nXL$uC zp*`II;xz=x0vSQrkYDIAfrx+_xRqgeE?F;zcY<>zy^mb~Z>H%djwzm9N=$sd5T|0* z-+DK1NsnGb0JlK$0QVkbAe8_pE@Lh}R|4nG8gS)UM#<<5=7Z3cwR~&#s9&OoZM@; z0O;Dubh}2}dUyiz z0gusn8$(2&&25QAW}O)&;EHc(Cgzg(%0`2Bscmpi$F<01>KmV$@hcf?f`ziRQjOLFjoV+6Id7h+&O%q{ zvfGr-tV`75v_MgA|%zJZNn01#mwb8R5@2QO6=6wn- zwXT2N>Go$K~rEVNoi5XMxvB(zmg_aFV>oBxjVl1)iN1EI!x?)uj_8Y<%+rFCN<%Za40u0{pJn!V9#O;7U+8TI# z7pucD_XCQPxm!#8a0V7x1BY+8x{q^i34tn9hf4#fxMdB{N|*sW|C(pMZri2e;N;!$ zNNE#=zZIP!jRbQV9L$T>fSZZO9{VBGhVmUM&!mKMg!h> zaH2)?3ek-aGbWE`S?Z`B6D-Y4ri+l#vq+L!RH7MqLSz^YmR6LWn97R&Ln_IgAtH}p zu&zXxj=!Uo42hUXOUQsAbfEC)99B5FA$EXGe#V)QXSoC+V+gX#%t@C1D!V^{6D~%y zoU(gsVyHH^N4=zR?0pMj$I_--Dj<*@aH-P6V4A5F;nn$YJIv0Q+ao%4#+>6Gq)d<$ zRILIn?XkJinR`*|C=png&Ez`79LG24kqR%ui)2NobOlYvXjuy#6Po(|j(Ob@37yFK zwOb-T`I^qn(y^sb%y~N4vsn)VNR37T<(%6h{v`l07NFaV21Pl9y-fgv&>L(PJofG` zS%ji6fzvyx5@`rkgf83$coGZ>M<<*_M^PIhY9Zl)$xp2zhDMAG!XJLVLa1I-WMuw% zqW@NBq`&8gg2;|If|c=w=+<+|eogKeW2P*B8Xq zh_ddY$t6HzbD`#YtA@j2?eqhz4woOlf6^}&8?w0g{{D<_lKd+A&*+NQKDr*q(y7^o z*C8%a>_=m^=@mh3beRBX!F0Mk+vt0DjH_u!JlkA78mrPJqM~{fVyf%P>EuP?Dz4tgefVplG=P8^~GLGL9Sr6tiMHCs2?uKxnaNt`Cq0UDSmQTIgY))87&qH*} zbOHkww{kjSTb_Akfv3K2a@5Gl)C#gk%;w;3e*b$|?-`P+xw{fa67-(JQj2sy;{6JM zGCK)QiK(x$RmB$FNC8y>ksLqyhDAN+Tgase9ULCNr#*%j7aLm))6b?=20WQ1pEIFsO%OC$Js7b%_-pfI=loT3 zi-0ckISMxa)+z1=H6y9o4UBEKZJ{=`A?bg%Taf>%w#zIsmpyR>-SKiJ8dljDAbzDZ zSG9Nh_3aVfw#^Zt-DtE$7NUwd`-8gD@p`f8x>4-H!`t@^JX}wLS@3O31SPHR9wBah zGt>F&6o=FZD)~Rb6?sE^`i~l-8rnQyy!ZCKJbwL#_bV57K&)}aBs}{EPl8b{K782= z9k1R2?rdAbu!@r{q_1*MAloeK1gCPY=o7`h#ffL9(D3wOmOxNKQpC!_P{wSz5L9LJ z1p@N{bJVA&&YE-+*rlAg;ZBw*65rdwA8{wn}50n{U84dUCsc4D#t2}u|>_)DhOGM;-U`rLxgqjO=4IKpLhpE(J z36HaK@@AFQBmrHNQ*DvOvP!-}dZ6s$*f-@03iZ(eJ8MSTW+pDW5%s#$T%)|6E1uKb z>d}2QI?<2T3jGjcQ-Qw-wG&9-RS+ohPbW}kotCl`B4hEMSn+>OD_|8bo=U19yS4EuI5xio`q0#} zu6!3b)$_ODjIbWK?V_yN76LBbMs2SVLR97I9=D3ElYiB^3L#8`gv8*Jq3I!BoTyrF_kJd2ECbm2XAoRq^un z>^i#@?R?Otba!?~n}QKRb#D^|dvGi=t^4=Ifg}fqC{;D@oJH5hr4V@n6Eb&|CKpKkvAckje zP7k^C;t$3B?`9)hcdj1KQd91yWI_Nc1im#5}zCAeFJAT`Zc2D;)4@diN2XN^;u3Wg} zBU2wTCMe}zX7e*nH+!sBel(@We zJj~+xm=(A43iYb76+%+h-Q+~@1Nk}rwRmBb*b&Xqk*oV#MKlrZf&J}Y5D*1)& z3fwWhSz~wWoT!vV!2#3+(kl3L0;RP#I-Nr0K{g*C#Zxs9u7o%28m%AF;Y}!&yrll9GWKV^N@^*3|P|`{OorVrl zxKwX^D;6t&L?ax;Z0IeicNySdG)dy=a@JO0P<{aphenao7hCTG*I>NvvmM1?Oyn9z z)7$o3*E`Ygw|)6|X_CxG;S-68{9kRrEu^QFX>!ImWfSAx0MfT7V9rIb;9+9>;ht|Okfgw2&fI%>TNaph1PkpPZ zZ!KI)&e}SXxAkbJ&l{Xx#VjF*L+lw@O z4+8DDN*1Px>gev>JbK<>N7vSpB4PW}X|BrznS56%l6e183GjG2Pp9Qr=!Wkb=_Z=J zG}6E$i)$J+Xy{>;Gp3f(KKE`08{e~syFw>ExEF{wU!FsWIIOBZ5uGUe2 z+TxQ=@{NI|hFLxw1p(Kfgvx0s5B)4VaXpX>XDb=LIrXz^>xqI6t4K^V^5nbaz=k{4gA5F+hjG*+&#Ux>qp!p@bMdZMdp>u>Uy-;?_yxf5` zHnt1zN2q3DWNg!wT0OG0ALEn6sS23hp;sZdBky|5$@4 zt_&;8;n=ph-OwaMW_-j_l=TKGW2L?)UnJvf+Lo3(Up(qGbjo4ON_g9?YNyLA*4J0; zQqHE-wXZZxX|rC@YZE?F)TVYkT5|KA4?9(KmVhE4*ls@AGB=&x_I#|0bv53_&2e$X z*qX3aPyJS__va#?wxw7n3=a`#DkBQ}yM>S7=k75i`1yoD68wC8GzsnX`j8T)ERkZ- zr^N&+!$0A1lBHW|zEK#+wV=B326Wb7L$Roe^XA6Af*;HVTCGNV+j?>91)JuIyUga> zA;MK|F}yFKOKliAt=_hiP21b`n(8M=qbzLjf@C|oIfG~0T)i2V+3TN0jZL}~IOu-% z9@VMYPV(mU$?<bZRV4|`&oQk&#wWk6X)qe*H>mYl`xkB+G4`+A%a|`k z{XWNh!liWVlNjCA-v_sl_0JIxl3qcvYl##Tdly%5_GoyGSOD^gXPy52LxK;`|9C}*k7n#7 zLeIZ*Q5qjNLLV50SC*C`EK4uAEd5(4>(zQamF?8^oC9nUpLtgEvxo1&)T^(k*Ht%a zR594-2SQLCg*X#hk1Gcz))X-Nynq9eLD@~9u$uzk)Qj^B_z7sq6dCFx3@~uVRkg7m z5NXjCXzL<%?J|0Yoi10`E|2$3m#1r&qr=hTaadhui-FElJBZO?{fDZ@o0==hF{d%8 z9OuyW4jAv8kDSWhL+3jm?XgpRbxn_acB?Mo%u}qczG^~a{ai7}k*7ZG!5MAZ3<+^S z4vVsPF=A%Ub}(sOpZnIJ9zgA0?Z~fBYMu6nwAQt2huoTfA8VG!8RhynfY>w;ow!dL zh@1?rS-Th?J&+X;gMn;0hI?TYZMMV8d&7IqWA@mYuFwDU)cA_z+WAj>p-2DitSeb7 zrvd`X`5hXJvT@y-MtRZ*`&wmxek3ZHk0dHATT7Np4nL^*wajHi#nH=$Asq`2P>x~d zodN2@b&kjv(2+Bv(G?Xg-_V#y>b;OOt-7@F$6DnCR8z}`xVb`VWaD<^@O3$~=F91n zb)xUHY=&^Y=@r|6bVH7x0@`@I~ToI{!$IId^mv0Lsc;k7?1~SXXd(FBjtOius z*mKr`Xz&IOyl2H3Gv+<(^~8D6-qC~B!KixD*1Tx(+|iTP>gfa#UelN+`Cn5B@g=!( zY>GHJ{ef1n>*z}0e(~T+{fsmzol#m?j27A*Nrou*dvG&Ca84R=HrVL{8-W|YCr<)g}R6J@XUON6`jw-7g7rZMfU8YMsrBuu^q)soH#9 z>;{+QQ*T}K$}22h?KZBuI6_HQ{nuNt=%93~bn6?YHrVE(wMhtzuAGAU;Ixkf!?(q2 zS55I^8$vkHKFcqSfP=w1n>C%xz51@+NuG1a49Nh^b{)8Bycoq=Fo4aUJ*nHSgY^oF z>1q!g+Mp;e1kXM8c`q_pHOdj2{Z+H5z_EcGY&G={p~m&#hwxxFAp>{WbJL3vaMw0_ zfHE!GolXoft%x^C-)9320?5(uwLm9^P2TpY55Pm;8a@SSbu|4v6G0G@3 z`sS}@d~!wO=wv&)8k^vhv~m`k*0!jgh@Xy7o3+}2QeNc-oH8Y%NVSpxdxWlrRKBfm z9H;nTZo;V;G5x$aWfWh1^H(!YxuS8LvYn0LlpCqoTCJSMnu5-Paj028-zz|xkc2sx z5b&p>=4P!npr%*30W~Aq7d+FN-m<5~c86D-u&3vCykiW4?L0clkcmro+qUNMh8kxdF8II-QNinQRB` z*Y0JYWT+&yGZiBHb0;~?x|dD@h1;5uzA5Aa2?JkByD|w2H2NnJ<=NoU+R) zGQ?T|ItpZUL69J}jfLujwYY@z7x_&M(54etfwSS9t(&)xnmasl!}V5d)AWw(nS`s+ z;rp$6+iQc{E$ue0`ZXrG+#Ul3i}`zzkZzGWcXMD8Y6P*#^cteK9ThE2udDkf)Ybdf z{-36?|7eB%r|IkOrmnx6w!TidYn0|l12CGPFl{fay(Ss#1S%pt_P*bmuge0|?bqul z%R&14p*5!%p;lCqN`cMvnglX+RxG$|j`L%Fp?=_~S0i35Wb^3!b2O0XV^djarRRM8 zVsHN*^+s9=(03B7UTij%w|;|B?kQHo_!3%mMQyr)(4TGS)}Lx6z|fDe>TG77VKIS2 zwNP(A=ic_S*Dp`r96e)qp~KojH_89qf4cX(r(ZSHXqyaD*?BLW6E**k zaSAE={qWJ=-v0jn-qXE*d$PCp=%JQW8;y$PsJ?yp{`?`KOg-@B`ECg2mec0N z8m3G3UZA}&;lKw7c93u=Si*!}lAK6*0tvm4Hb9}J9T%4e$@ZUcbldDfeCtK1GRY<; zqCP_6O;;dJx=3zrZnh<3Omao@aIqZQWWgpAS`*UHCO^-|h%8R*KeG`M8jZ&B46%lZ zo{EboCDetGceDlb$d}fp~+9b^84>GU(U*5bOLfz3(RPqsH@b~ zj6@H_%|I}=1>5d~QQCcWh0o88`AF)}RKrIgcB=^|ym56;3w)P0*+Nf+GMkYD(PdIT zrt9a?3Rqfw^D3K(VDf}a*yi=X{oSDzVi-3s?Hy=Qk2&Xn-phihaQ?I7xw%v;U?r4u z)39=e(~p2j5ne-|JTc-0~{JQD z11B?sT;N5q(2oI-xcO_44%HRYq%VEi`}MD4zGs5Tn&)cKd!|=`mCoBM`Ek0O%I)+s z+hH-;Y9Ry1>{de2;?vA`uGa}R`xJhWe2eRDopp^R!aM}TB%u?5`GpFU1=7AFLiuv8 z!Afb#JrckS1gorYJ6~MXi2jZ-C6wFZxn!A8Pa_ke)hrSM_KnLd0)%Tz*z>|HpoEmK zrJ>=dlcMdcfq}UI)E>r_&m{jfk61F9c+$ zf!NG;BqA(()+8093p}_I{j@0S=BY>6@dnjkuM)j^j|5I=`x_4GW$?^F1cenh^?|`` zJyb*hJF1$fK&ebM->}^3C=EuaPUSYU zR*6p%efuAoBsM71`=jWvUB@<=B)_yyQqwH0vQb)nPHc@ zS&nG$Z23owv?P1$`QG5*MAi4(Bnz#p5RHjI^Rzk3EB?bcx`$BoPIx60wYuUzIEHvQ z8rN~Fdlv7~zbvy-Qz&L89Q|06jN;~`%56!gf|0OhhFKS*J{~HxPa{d8I?9U%VPS{k z2x%s63A=EPWNH$ikWKrL6qDVk=~&|+rPHDULdY_G>eOex!9fLF8?hTW>Dm}vH))w~ zt%x$ru3-bZ2Uc@Y1LlZ%La?ljfITC+2Ed_N99R1oo4W8yI`F80zsdg!`?!A!^R%?0 z9Qs((GpLDS(eWv^U8PMx&})}jWcy;o2rebYOUc?>9XT|zS*k5UpZeZ zjfxuQ>(2HoUB*2W{46 zG;?RKGjm?sy&U$8qmF>-Yd`KvXuUXlkOQO7n-~Pdyve7_`ZYy#`gul5<%si$QQXQH zg4O3^V+zJwjkh7O&B6)3H>3Dj%_xK^TV+Av;sScH(ELX)8_)3XyG<|RO{HxO+Rs73 zhTA5ENezr!>=o9mu1r5MVZJI%KCXwE|0PymLi`%B0kzFlrD*m=`NbDU&|+65AQoum zNHuzu&TYJP&Ax5Jh~wttAlZ{7VHFTIvCh8c5c3ji@?A+Nh|nt>hXcFF{A=}LDI9-| zj0crtY`@LDL*(my6z1By1|v5ysA$lGor*KPc^b*tj$SR;YX#P`XVX#qg09ig*MBUD zG)3cA<4r!kQBYd45mMC2HVR2g#v%EoEM`yycLQN2^!|HWc%-P!Bjh3@VjvI;C zaVsoCxYh%6!q)z(dD{@0DcMaPHBN})5>SZdrI#cx?bVTu@$T;45Nxjj(8WlSg>f#i zVmcI;LztY6a2f)5v}Plo#@oa)$c>HbIyaN=Xm0mbIz4=kKj%zSas588ZQ8cu#AC(c0*869#up=rB)4dDhW@yBNHnTKd#{lTVtY(e#F^FQJWp(2oN%QtUBrM!XTA*(Oa93Ua zi!;l1lf^kiX|{YH4#1-~SjbVYOfGMiXr8UZ7VLeLTfg;cw)@7un3zQtUQbL72Npux zc6sH7Tl2WlehQSuc?0Gc=sjWZ-0bvi$U8gQMz%z*Yl7|B)gb?LK2K(4+qTn*V4H^C zhG~R^NiHNP1+pVa^ecqOorzP8J;U;t;f@#v5$D*`sj-sck$D7&3P6%!`8rQaOT=^k*`^Xg0~)W z7lQ>tgpzFZd*>np${@*wbx#DKG{OqC$Ty;)z_Y*ihDh=xlPMzwMKF938t!S8HUMD7Pzn#gU4>LH2Qx(pkkJXKIy%N>-{?^&fH>GH|(R z?X^f%7N?<5cYi$o_T>1FuMS@vJ#W{`;Hld~?4sW3(c70)lPlE~r2_frzm8Au02h@| z<>d76^!t+-QRSAT32uT)Yub=L7>5!9@%Fdk(c3bp@?L#5W;I&8+Q)$z?i2rB&xSwpQ`4^8_NUWZXwGAlAwfd&V#f2RkCyhD5+m9a)i3j-IpN+g8!UNtFf z%V;59-0T_l4nZMLYTEAbG}nHd*F~ym^7^C`~;Me`|Z)Cly7Yzoe-l z>7i4(o%M6M=Oc98%ONpNC+DM-Zv#rHzQ4EU8I{j$X0=#1A5%9Agw$L}%*_uRpxvb` zR!7Sne>z?i#e24;B4(_0HA-8(UQcIb-|OjUl9iX$d6r_2ykeL0YzA8c0;GlWX@9m~ zaV79;b1$gYyghqV0A)a$zgxG4aXz7OrVHBoF8h0ClM{QR^T&BQzhz%!;>U7a8qyB6 zBMpRpDQ^b!5EpU};i&%*I?jhg!X9!!mhE>HG-CiNj9beCkE%i;*b^toeqX#Ldq=Nc zAHAv=*O~qr7E^-~bEC-Y1RuX8h@)a2r)>2?y52aWr=fky6-fQd&M^KO< zelS2J3Z&g7UTQFaZ{}d2JXJhKYh?)|I1!uyTU$HHi-Ouuen{sz#3q8ZgRFPE*ZF91 zb?{^hK_o`mbg@e>%iRf*2Tt=by*`?bQ1(MUIpBrNZf11mQYlm~7sagqgqAD6STLmi zlY_0_+~GkU;BptClE%A_di%Y-hkUR05Gf|p#lr!A2Txe`X18|%>^+DH^3{_k_;>&7 zr+fat^gMa`m7e{7n(Tu}ur}OiCft+-}xe|GYhvVf)NYR4b=J7AFiCd?h=?3pt z$qC8D#4Uc5Gj~In52>GBwfT;YGiGc_O4^Y|6zK2Hr_;rO^b(aNTD~OCEPqR! zJF9Gv9ML4lNm@5dOIzKo79{VoL$nW|TOhaQ8v&YT&L!O&2wI`BP!nvjU>p0r-f=P2 z>n0NtrXU8{W?}aL7cfqbdQD_mW@-!ef$5MYg$~Gk=vrcM7CJ|@U!F@E6fB0w0~3O} zrHQ6WnufES*p`Y-u?(_?j>?%j?1F!H7G?&lYjcRIITH?KR1>8K;C~7yyeU z(uQds%52V4*S6u|uCH!o^@CyaL?&V~~kPx&9dy_4` zOvZ_6;gDe(XIVB{&h!iio-=%!03x$;Z1k|MGxTw|kiD4m`R>xyUgS44cO(&PL6RT=Mi8B7 zeo>5VahmKgy}LkeN6tj8bKZJqvUX~@FmES&l``pv zUQY=Jj^5}AAk^P_Pz(m;xLB0hj{%mk_0D7(e&({J`w4=)6ro{?nZ_d}xJ)*waEHQ( zP5FxH1^{bIPS7Q>w9@!z9#}!9hZHENC#l zs!AMdION{IQ=>~`(xc|qG`o&v-cuthOJh!>$qPO%iuaPSkg>OY_V27%^!oXScb!(x zZ>QQ<6HJ2Sc{{5*U064Jz|t-20`@y`!;SED9?iocv*TM)EO7Nt4p;BdlXoDuj z2W3YccFf}orZbB1x#|~%)Fx&pn~!gem=#~Po#e;4h!qL?&$i?p=wCl@!%QD#S8vnb zRBW3!dUay_*+xguqmIJ>1RTkBuhU!Q9&((f)hw>Gg}*DVq0QS zOU^DPw1T5u*Z})=6dW33L5wC9{8(4nGE5@PNNn9@o{Ean5|(zRycEnkd^Dld zZ~LwwCfr!Ma5J#51tlugl9C9`wlIX-CbZ`cc=1dPp=We9W+Ri>p5vJ*i)aK%5T^-- zC^42<;!!xp@-t0l1@Vh~y=A97`LkqI7BstMPuDb`mPR}fx!ul_$DwM;7K&nOAP%&* z^C|Mue<QtqVyv6K@FR*>svYVRIt4-qT<|bj=>HNAP0B8J|~m!<$_ zv*VK%HdirQ8^PKgwP6+pc%)#yz~H{{Rad&Lv5pr|`(O6DrW0mYgi~O)hmw>9E4mB` znPxXS#h737mUke}9FqwTX#@Jg)Gbb4*ooGLAm^`L!(-L#=DcIu7!7a@`MKf_#9s}+ zW0>)uqhr6lW5?J()bhVGrixC<;wN65-L5#86Oz+eS7jnf#`rX*PyGsK8}JV(p$Fu0 zHZsPHhNY`?U5XpN9t|^V#63`#VwSb&r>IxMte@FTs^}`f(y3u=C>sv!_fT9Ab-MMz zSGfLDiAkC5ibkgND27V1(3+K+UV=qm>VgLVj)AvO+`4R_t|xPZ?FHV8kgqz7dB&3U zFi56d$d+Y}5{r@~hpe8FXK6lX^PC)-tr7I*Q;3!Y>oJ^(W3gNe zq1JGGUonSX&&670PK$6&ur@D6MJxu9h$HMP^z#dIBJ-tovdvv&83<$CHfMZSjsu8f zogSBIKkn$!xFL&Rm?6giwbSu=mMd<5aea0Bh4E+FsU18duI;Kq0LOiroWs+BXE0=X zi+LmM4@F=ZDU?RngK}PAOK;HhUW}u1CFj=XcP=#~l ze;l>t4?h0$&x#N+7OJZlL6D4Is+0V?SR7Y}iif1t1;-W7FBJuKN>(s%VIA!us@hU| zL)K@lM_crHl~`X=R2 z2lmo9`yWAfJGy}zT@CG{h5l*gZapVw}f~Al!DX z!0{MB?+&AF>$;6&2GEHxi3Fj}6gGf1a2#aY*v6n64j<@@m**R9(sk%{au@Uxp+hx7 z8iB)VNPg)Jx?)@2aWnpp&(WuC#Q(8fwNBRBV5@z~Uf4-qvIR?dmCyL9JJ3R_Uej(9 zU92h+u#8GV3?KNO)Xd6OwUCKi8?~RM>~5HImu4jJWiE_`(ex(&ZJ`Oywi?#Iwt_m^ zO7-|MT4ALcC=#*PiaA`qF0%zYBPhHsEIMfw7S4!ZU~gAR{4$x~eQ?l)T8Pbg=2^dw zSP;ANCnm&6Q_gLlUEyH692>T6sNJay-N~?Khn342_{c>C0rIlsGOPA~W$6nePB@*o z*~@_%NV;>LPt*DBpiHOv;&w2EN~8w z+!dUq+T!daDjVFavli6gw7$J`Z)Jgk%lJhjf3WeSS8n0MiI>gCa-rF3)l-USnFQ@=(uO5qNMoeUu#P10Fc-E^|MYni)3QbNA$oI+$T!2zr_^PdD6>{oK;^n6MLz<7n`3f)5%wU>!gMZ^> z%2sck+|0So228ErHWb_z5;G=kiAS(lvQqnoO~fm`*hyX!!!ybMnvD*?^dM}G;I}o6 zk$J|IDE-M7I28z$ea#WLG?V~bl9LwL#}-udM&`#-R=iagMrUM45PX+#3>fnr0^ta@ zS{5~La9}m7ai(SK%zb1%s;^fwyRNHincj7bu6Z_Ncx|sgjqwep$zy-(Q&m?QBEa57 zb|8dj$j*?iVKHlgvUNJD#}mDvkS zTtWVEDmpV)b3A>?M+2R`4aTnS45&unsw8%)nEi`+E3PJ)M2{_)2s>|jr?G=7U-g*y z+dl%bZd@Q5gAhpN3ONsWO*yiw+={ijwY`cWM6Kr<1J&n)8g%KVQo#3S9Q!G&_orsv z(Om(i48e3c##&ZmqGMnr*E-U*3-U#A&UblPiE zX9WD&P^@`bcM|ihRVouEtfzYNNe{H`709r1^((q#fARZ?afq&u$PRG+%g`fotN(Gd50Pp57Q6MtEhv7)fH4Pa_-ZvQ&4tmegXIuvXjW#jxDxSJtJrqMP}ojoVq`dGb-J}pAy zDM%aa8MF%%9#L;VT}%p7O)L7jLlI;u}gk2JTEKc>GKQ39sz^NnCR~A8)*RCH?s) zllCchD*1nts6stDpMJMKa$4^=vO8ES%4qs)Z?@}m5VI%f_>!Y8<(m8G_<) z?=F{uuklEY^@}JUC)ZxTvRocq5#kQXyJApByid7as9b{H1@rEJZ=W=lX8iiKppsh9 ztXK1g2&6~7@t4Vm3rRjV0k185gV=!b>~`7l({n&vrVQ{fLzuarg)*4DPZM75hwAs|#q* zq7uc7(;u2a&x+X!8t8Hy!}r<`0c3^}b6xgmB7rB;JCW#v9v2br^SVaC`l=W=6=CJd zns%<*cA#h!W9P8h2Hb4y0ypd;P*9gV2&?D6GoO2Ykhn?gRtxgyJeZ>EL%>RAD|9#5;{fhtlHUIbD_`koyfA{zJzx%8c zWF$(yMJy)ff;r^N8yI7Vu^>2B63U1mT+-PL7g{=5GZy0!*X$BGx8N4Y791sK$%6V3 z3iciEln@rZpsmTK50L!JvKVr(D@2@OgB6X6Mgl{bwCPkP2-_!GXS>rCw`geD#kj(J za`EXi1lUkTtc`<5P)q?%79_0z08b@{(_5Ahhp4YziROcUGalS*c}|kd6{6+94^2!{ zIf&LcA7)e3qtQI%NNyyU5R9CSN`fA$S8;*(H*Dvklf`XcUupCkJ03)CAtIVX1j9A%+(aE1RB!Qy5y5AvT73tmYH&7HeyoD z0+$)roMY5UoU(BO13OVbhvSvNL8spxB`2@HJ^k_U?NM@klDv8Q`iJA^N6(Y(!xQ>V zOy`fsr{BH){xqQyZx3Ic{waC=ZF2bPPgM6mk6%6SCP)AE=IznRN%Hz_a{Thmi{m4D zcl_$vi|?NwzxreH4b^=0`ZRfQ{1R!2lGE2{k}4~>Pz5>*53G89==J4(5@w4w=9KKE7eE;^%>ysnu!gGT1>h-JRSKq#+mX2N?y*lkt z%k(Ze`hk8VC*K{uc)=q){GP_}mizeZ^_xGvJ^tf&r^$D(Upzmem*1c}FOCntd2uAo zz4}w~?8V{n%Wm@g@a5qjkGS4zg7cP(X%Bz=?ucJei-+{TXQ#)nUlAZ0%(K_8PT$gt zE{*o>sjd9u@ySs)IedG3f{FR|?Q3cP6H7H-GZ?D(>d3&sw1=xhh4B0PlOxxk=SPPx z2-*p%`-QQ%TY~{@TE2z@o9Uf8!lo>8xo|V#CfNXT3|lDVM5OcUNvE2IRO3@xlPbOr zqt$KCvssps1oH*``l4gP_HjF71U2#+-!c~}%rPMt@j^H&u!-Uq?W3&U?V#qCak^&e z*wu_KtQx++%@3Q(NO+!XipvrSo)N$+=3B*bPV{3wD@xqM@Fga~>3qmBV41vfK75!> zFY{@(!Ev5Eef;!mCfyJBenDmVWyDA9?@h}TcSa> zB*YeVD!+(W*ll6_`m(f^y;4f`vsnR{_mduyXXJz`M+Ny|EH%8N0XxUAC z=@8c0UM%+OoHm%=7(h>o4vO|Xzt~!YapWeWr#6JUM8k6y=^w*jI(}gj=2AH}>ckc^ zb7jEDU1c|;{4z%#Z_%Gpx#IlKE~qMVgYXqQZ&udQ zr1~n~Nl(jk8t#>eWoWFb1yRJ+7OYy^tSd%!;U3*r^pi|3SfVN_DFDS%UTcGIzd|u& zGyb2=0P~Mh{{Chi|4*EME&s><)2H|R|1a{~^Z)n!|2_YI&;S2A_T98=&kB`@(52y&g5Oi`G@t)a9cu3&VX^zDM$85qgSl=kLL@`A9Q!xR#> zR;83@flQK*@?s+K{LP^$TGB94QaT#xd*iBWOM6lc8;Gcj@HG)6+88_-8Vw23$#vFIE84mHl z*}u@{_V_-cT&8?-2rVP(a zmgZ#{6MH6P(mIlhwdWGL{3L0g0%DowBc#`OuYTnOlqM)6{u2>ZgYf!1m@W|usAOf%jE+Td1Ty0u)(ck?s{v^p0@8b z-4l<2CuVj_n}Nyy2V!XvibBvj#Kv6k?c48PF|{}W$$x%$dZ=j$HDEK67y&zxIZL2O z>~b+(lq@xXq!D3F!V~_j~Bj!aBFI9ZCTSUq~KD^ ztyeTy&vc8(peQoTxYW9nca_D#o--k(_i@Ww`}bQvJbr#rvvFq6374D+d5D);+%|0^ zUs@HWwOqu&Z(Sr2{I*k#-u8LIbmH`vH<{DhyG|yEQfxBm{aq)M_*5?faWIz%HvY*| zxo#Q*i`?4Ox@n|-_Iavw%Ji3;Mn{U*%;wiNy1F=fY4~SD#;xO+j9V2qu0W6x$IN_1 zQ$uBH0S2y39t+7-t(h#|XPIi@yi^vPkcjn25mR4+4c{Dr(m{7Rm9AW73s^Eb|IO*> zrc+xOKSl!;Aes;UJ(Bm@S1V9!XDHo+=(#x1Ka1HWz8Wz&@4{7f(+q8eTTr*kassbe z;U|^-Vvg;~mN98tAsfuXim=t!94jXLh1I~CJc3f$m3*Jw_B&K!?!7~vl6jf+vCy~I zNcU%A(eNRwFpQsOm>erSux(z(8XyI7ek;iZy`ajlpVfnSg5$&+l_MHDIWA7ea57_0 z-!7WczsxwQNo@#^s8F-Bu2sC9VS3s(B(vUh5}S3>H0l|2N228`2vhj%;GN$-bvJH| zZLr~S;^j;6SJ0aeHummC+N~zqY3iSpqrP-LVr~}u!l3ST>95>q8G}*k0z{kEZ;jeY zys=wMqb&8!PTJamXU=WeQOg zka^e_MPZLP`bAdGQl=$4rbiOEM3SMEXL}5N=zYh7)JWyoJ}vJ~Wgw+%asF{F?(VN> z@_@dsYI9YS-DKRCS)g6gm)`l^FmbWka|Y1C|5<*c$NFrS;+z1Ay&#^rB>-Dpo!`!y z;&W4czEN>gS_}W`6uTQfo9TZ?=aY|5|Gd`z_sRZ#HUIOIr;qRTzrVzDum8Q*|K96= z@AbdG4*hSaS*25_+3q)@(=;kAmPm1AeYDvJS`JMu z)Rd2EtwCr#lnq-0wPhnda`n-9Pya;#V4H+Y>50-Lqgs2^`!8Q}xuuhE&BHz)i>?h> z9l`CQU2tBcLUwrZY z+T5M9U4>Ns=jEyXnVu&8@Bb7ZYwcTqR`Y+4zkdAH*CGG6|MaWk_w*Gp64^W%6X6>BMsY>L3bO7NHmoo^*D0cOO_M(pAe+_wo`t<3-He?jCQvqVCMdyokf9pnypFX#g8sS>#r zXXA8%h#XK5;)<*&k}-%T??RiIe5uZ5oPreFS=npTPwFCH`-6OF#}Iwm8dT7+6XarC z#3zIADyGXD>C3+*FTdfAZ*7@Kj{`0|;KCZ|Rl$T`XyCLEIKk%2*Bd z5#Vr@LWN%`N1z>Ow*FpDy3C|Z(sEG^|7HJ{jJHXPO1A{_UFHfTE}wcJ!k{lwg@(J2 z69L)CHLWK?#xM^>;KVflocE6Aj%HQ!LX#pOnYrPzr1;`WAtRMF+cXLO1CXt+0E5U- zGKc}j&`MI11Ic!PbIFMPk=U?cf;0;d>iG6NBq9KHd6+OqVJjj|zJ@;L5*jySt0~wd z0jA*<-NZ5yNa9{PyYmGif^j%muqe`*eHh}HYhKF@3ZLl!fg8~L1VhbMk!c{m;vyLI z#e(mN74N)UBLS}1ErP*Rf;q#MVL3_uknC4t^7vanLMp14#a{Wwz()bhI*qB74_-ur zmwTf!>(O@Tt6RdgP!BUM-UB+J1cm+#FRG*0$o|7}zdsSCOU?1ArNbh3c-`&POL{%q z_X2e-O=`|rQ7)i>>5tRN`6xX|>H(PFE`{w?sls+!YqmRf!ovtUy)ObJ@^G_ zQ8!+#V$?FWQmVn~4M8SJBE@Crd4c$FuDX^;+SkNKv+od`eWbV?{)vf?r)gqOj{kW4 z>eNvd`h9xz_9aLX{rTPZZ#r@)%ki!zRNiPg@prKxn(l?sPcM<)XkmNkdM4L{R4KH{ zn`2g8LKu|@vHj`+^=R)eXM}&316;q4Ge3f1JQfyBq1aFOu@3Vo%aV#G{rH>)XYt z(;aH-nR&DPUxZVgiFWsyS%hGDSRV>y05}2MrrTnYV};J*B?7cdq&dPbwq9|&+3V}B z?dEQoUSvYWH19YkOE=c9DMa$IBW>rjt*&_P=eG}a^Fa+w$1%qkvtjANp;|*eJS2+C zw&CdK9`$`yKzlSch5YilYiR!OAjeCKb

eFNe0Faglg!qY}Ag_o-HbW%x69KqE z645ZsV&q_n9UinOb3t7_$hENX3drXy3o|X|{0K=~u^37~+cqRO)QBCUy^zISwGx$; z>#VIM(_dY$RqL>7)+>wj5wma7e=yTn6-rM%DYT~HP{lQ;B7>7h*RV7!>11qPPrHp_ zvD{-==`IU9Ce$KHE%RbF2T5S*bxI`FigJ8_5S4j7)x;5yof_n16>ZKn(KVb=PzafK zlSMu;bTS+h=)9Z<3Dfbl(r}X?{7mFulat%Zts?uT!EqW&o9+u8JxKF4z#C`(spB%a zPpJk9aW#iEAi*%+FO}Hq8jZT(A71gS@o_&FOZ91?@+g*&8CwU+?~VcEC1X>rUQ^+* zHFk;{U8*LT+C1A~sOgtO92DYdV!&}U>J+!Do}8c%C03_=^-o`eb?vNo*)zYWI^Tbe z0?r`}avc&$H$0+f?nhWC)cI8OO@4ZJW~L)dCEgpxwkIg3!@Pt-a4Bq>Y#{5H73qzz z1m^ICIc?XEILq_hxT?n&ig%9Lr&%riE!@tLtGY6oWjL9tjnUDB`V*Xx@yTvV7{_?h zudYSU!~3nEJkSi>e^0^H_^b(NJBC*l(c0cl6}3J z5#a+_r76`}Lq0V;a^3H1tb430m6!^R)oia9&=@aZ3EPFyK)IOr1LbQxWm?my0?-y^ z26bkLfkYXi#_F|_JIRYoEz}L|0L?(C6KgI)`+(Cd8ppc`5D`j@n8IW+g`tXf5cT5N zW18K7&GRv2R2__QY}PnY0A`C_w@0Q{e)jm()s_p*=M!7)BJ@LZDDP)|tf^wX1@)=X@r# zrz}?aFRTD&+$p?BE1uPhn_21WkF3b`#J5xatD3$l(D$;^o6|B0Db(W;lP}CnzOY(Y z>8MiyjWT{wTH@R_l(J+sEGyv&5bonS$EUj{wp9X3o+jr-fdz#LPCk_D8D9;;J+z0U ze{}XjwQGbi(LwxQ9)rm-roTL*Q=vK4$?S!g4r_4&C2*!<~8+>Dt{ypDP#0$?NNnCZWp&vU|W^%hPCEo z9mq71Pr>@$HrB^Ny&+6;)WK#f7QFIuB+uhnRCnqOgUQ^c{N`Vl`Jzq-@kCBsp01{2 z>kdaBp8uH>ZpD#~XVn3?(cy=ucyBKqYw#P-My?bl8#YgE7xiY#MY}Des(5b#776uq zXjAtjtciLAbVl=^pR46)IZLmnCJz22ou=$17~ZV))={X+t;s5~sxyQo%yc3Hil9TwTCJ*cx% zIP^36C?nxb9H(7ga5~7|jC~ zj@BDaiA*(h>hLJm|bkvrgb$PXAcUg;rXTVUnjGcJV9cU|87#NUtr-G`}%Qwq~vL91U zBkHlVy|tu=4-SSKHhNjJi80MZ(%%@zQsITITEnEfjd#Vr%L|UO3X{cCz|uRLBMej) zYn+jlXQOx%SD+haDn8!e_sP`~A`@-`oe3Dp0>!usg&$j8K`9oBB542vzV>RdOV9+^ zw&szc@C~PENZ2c=;E5-RSu2<321R(0RAiDx!Sv3VN6m-t$G6ql;owtQ3%yz7xb?-h z*ex^I4e&h$?$!}P0$N@6kAnMnT2}WjOhU1t*b?k&>1Rd51LW3ixOU^nhu)uQg4_GO z7~dJ3$DUAOt)te96pgn>GkYzh_eU7nHE4+i2BScCqa?RUa%E-jlB(B8)hnb*?nm3L z+U1XSex)R{$`-6ikdd3Az(_H2Hxv@8kG~$O>)b!`il_!ijg#+mO;p3o+Lh5pS4E-b zrgm*)HE8zosOg>~Tj6;@Rll%obd8cUA`s(*jR9QPUDsp>`s@o1{0MrDd>nqjz+8KK7ojwj&HG-0`k1 zT$9+n-KO_-0i<%yTMB!_`tE256vVN2<2 z-mrBcUs~nf{d=(1&APZX*Nd@g8>w#bH0VCL=~dTuJ1V>w3dMG7YDwByY0Z@-p3ibI z7Zi>X+j_5}w^u>W8a0}D?sVO19`>4>r3*bfR+d%T;t95^N-OsBKVK&M%{rZxgThn2 z_*+{xoXI977h?^m34RETZ$L%4C8ZtsP+aZAr*ve9SPoA>>{B(0x<-i02oSzi@X2T!;Up784ko?xTVnD_;(#o*Kqgo~g6qA+rPRVE6J(a5Sd5HpGh zivYZ&Z*vJMRodwMCV0%+a1Hh#ymQ2c?-#iX%iwN7jGKbT6IbJ8QO^=-Hz|fgxik?0 zgc(L$vI@`TV7M^cUd9Y#+TeTd9ZwNM>*v6z3^%w*4l(X=pvg4Km zl)cRKowmkA*6U1zd|<{(t#YHwDc?J|E*ndeF37^KRH{bsY?^C9j_Kbcq9i@fQ0bi#l4SWpO*phpA}RVe`rX5+20?}Z!Vo8YD5nrd%c3lk`#VN`Ak zavLkrwkZ?EN?hDDrlDu9_dxSco%b$v;=2&n1`D|Drt=sWvt+@B8*-p-T18MrC5P^8 z*^s&0*uV$gYVbj2uhh*s zCBxPgN1b-*NR77;P-7Ky1JL}8E&vDpArws7jM#D94bVlZDcY8mS#H2x+XqzR)Qz=X zE73n#dmvSM9Hn)4=`UeM9e{Q~Yk7K}VKJG23zB}CUo9?T<-Qm@V@H`LTTqSR_V?5GQy6A2mTq_T0O~*Ldgm?us>3n_!@yI7(4W#@{h^a8 zzas31(wTmpGI**r$NNATa*LYW<~S1jkOVs&>Fuh%@Nxvn_0f|33PL_}8m+$sQRkQ=dCv zW+8b{A{lt&IH+RNHJzTNN?^nL9NBNC=ltwAkz=o$y#CLm=DFn^y?3su8^saiRWsSP z6s+q=UZ-h|*Nh+j1Om7dorDS((ac}maS%>-)T@$OZzOENOWRq=>ciJ3MlrRwW)Lf|37?WULg_x9%hWaRES&SxVpIIGtgB^vJ_Y?^WcM!Hru6^{EulU&8clzp zgN|7Lj9bg8WqWx!IN>BzWXU7B+ zzST^4NM=?pM$}^OdY&(`_I8wkIdF$d(*Hx!)-&iq>76>A?FR;z{8Ll8^%|?uk!~rU z;y&LS2>cvR)BV4l0{6SgUUVHfBau^4Y~p9xO|?^ahV|1Cwk+UYQQ>7 zljYObKvB1%8Ylqxy)f{?{jY-aEs(Sh0=U~;?Fd~h!)n}pL=(DY4DTdox}@(S6MHIg z$93J{yyXl*jgO1E^>EyJ2rG3cq2m+h6(?xy+RK4`G!%-_v7@hbAn@)-PkPd;)ZZwJ zpqr+Lk!u7(p9E=!yC_bw=k!BI;b|V;iiB7PA$Z$0O`1A_V|JsPy9X^D>B&^CP^bq6 z1pZ{xCUceeT5gxw_8}U00g-E&YJ3m-0wY%P204zXxcd?{TN?EUnA8D&@s1J*QFWx!pxhdL`e zF;L^Ha+9v03tC*Gl&|Yd>;K{IJany8H}{(Ejq~$)ipZNA_D{;+ZZ=h>cT}kbPxZf-CU%K~0q6Dsx7qTmx4Q2l8KsJ{#|%b1rIcLJ4SX=q z@XEQ(+iN<^ZuKO!3p0|%Gz_<5Q;GzQ@p-eptsg8Sw9q!dSOI`A zaF%Tin$%k=ot<7@Qn5BQ5RKHXML>HaW$+-bdF#1VWHWo@vHcH9WhMy%YfnQpUevQ| zJxH4hmwDVe+=%a%yD6SPforC<)7h#|y&UzQaRGu8-DUxUi#RO~P5L$qc<7}625RgU$rA8#jw`_1gxF$bPE{cGV$}I_X#YOtSiNxEMOcUl;`NqlNY3JYi&21B%0+Kz5^LMiP0$9Pp62&#^0irP=dm^XKW`AF@Fyi z#Y6hl;VVbMTPkw4_paj*I4Uc8vm!Pl=0l?u)`o50x#6uqwb_p`T=zb+_s^$3>+gTC zEq~VgpS`C~zpmZ?e087y{TF$Nu#1@4t6l^7>mIuXa?0D4_dqQi&1DN0H}bbV{wC?I zXPMX{expT%bV}5(#g>VDs6!J1ZF9L*O1w-7Wy?1=z2wlQVN{qV0!dnCC^~)$YC@!GcgtGW#HqICA*3JHY zayd@^Li_`5xz+~iWPcsnt@4UknH8E&t4HO)Xl!uE@d;5Y*+9w4tI+Tfqee&lk+D$I z>%p!H2tcnmOsrQzPfa@R^&|1iXJ#Dlyfc;F_fGirYt|&R39KH#mD3j`{WACZ+OQ#zQ%an?$W z^9)w|*skoh>y9!w3a}GjLM%UL;`Ft~em8xs@sPGyHdM#u^OmZFc#dS$b1eJGH+h*cS9HrYdFuj`6Plw|`X7ZUg=9 z@I7R!RVv<(Q150j_3m9&yNB`aZ7kX{5Cef{DQO3CZb=`rPUtPi7s*LF6g4*_4`$i=W|8qBpW-{Hdfjzx>pf-}UWP)aNp1?|*}7EG0K!0q1L-1Kj6 zZ<9^zQGTP&kLag0tQ5EJHYTBO#*f4n33skpn+RZ3spTmN=@?@DANueP zyrUmdnWl{zpn|3m>4mg~i5aEykv6LE@b~~3MztLSACN^S$~*#ev4C=~KD<{S-m4Gq z)rY?p_2FI3L96`P-7P``2)=qRH2oY5t&--uo_&>0!KayBRVOg2 z4uExvuq~36N}Z6L0kKXSPG0KnJ9HsTnZSB9)5B1!Vk`B%BF;F?DV=OVlR{#p?c7TH zx1a-JT^HKZg~oO^ih*x3w+#- z>HRxw(f?jI?}A;|-INxpm3U9sgY34!{_cM0W?gSvhczA9``@mzxPLz5vtIvWuFyU{ z7sP7)&!eZru2l3tdrzO-+kgEcPow^)N#A1&Xxd5R<7@)3ry~itzI!px>0B7!ZVhRQ zi%I@hJ_Js5rQh-F5q8O zCa;^^1aE#!IUfN*<=}hYkoakR3bbJG4!P9dr?*o2Xu6zKO9$_Q(*7;5DIL6{($n)}jL>=#THEKca3 ziv33H&YQ09re57{#pqDcL z3-Bt6!v0__o`l^2@V>j$vo))u$6VEm)rVFu(?=7YCETbO5D!>&9PDpzTaj3!Glvd@ zU8i8EIuwT6xt^XKkA#mlzZCD?m6)ISa0lbP=4A6$hX8HDV#yIluMwa0z2?%%=g!(+ zqZ@m0v>61(!JcO;Eln^o5#CVPiaaf|vgfbXCwsoBdmoy{(^vntSQ%|!%j_SxLUrto zfn5#rjEDcOGW9)Gui&5RCuab9K!v|yqJLEj`0j)D?`k2Zj_Nzi*@{w(o~WqW@-vdj z!K|d0D~00*C8_lE1%UtC2EW-6t*ey*$dsL5VZ2_s;EQf?igw4t=uxmbG4` z8Rc~`r{gmW>=bt)Rdv}}!2@6&tL?7V^SW{P8OTXD`QnTB*A3&K23%A-&t(>!+pUE} zHm7~pS3=H(e4vbtyJg|p~!#H$p9mGg}vb+33un*xw6Y+$#{YqeEY zVEWH9smZL4ReLY9uFnX$9pyIVX1&q!k->kt95&7V6hLsrJ*FSEc+H^yCDv=nQ%SGpkwby|vFg5K z`Rz_s~=eBH*28hGF&M-f3jKC7AW`Es$!GEv^a)Pe+5rn@Hov zrpQe@73u|}3>oXzC_7(X;`)OAzgJQ1w_DGi;S-b@&zSWl{&3!^mHL6?)aQq%M<{z} zgK#nl$t2#Q3)?6+B?~&9s@6GGPxJ6|C%PJzLhGWf+8IcZVi*2<7M*m-`QDNRn+k*H zHzhSVieyaGvOzHo{(9-WrXQ4A`!%8TVvr4lfvHVdd1-;<+atc=v*pgR6EL z^A%;t`b489xZa$((;e5LVP(IUoX6gDQs}0Ou&A`rur@&)OmyFdu+P1GH1+ORZdXmM z2Wg^bA zxmYdUzyBZ~5Y1mUwcVy&HUGZV>5)_hvPm-s8C@Vp#K0-!+<0$R4HXYFXFBBARGBjM zQTYfG;6~{oMgTP>|nY z22@d1ctyibejG8bNsqvO4@q(jxE#SYLF+zypY!EFyY>J4?LqmE)&p1n4&N%P$Q%o2PDa6K`xKTiTL@7TTucxnOVe)0EiK2W{6$ zYNFHG@=@elp|hcEQ(eWor6=nH`;X+BLIi6QmlOIMZ4iUBr2(M<2gw`WS*8=4%1rbH2XJ-*HrOl zP?tF}H3JNXrnV`l>EdZIY*O4QM*hq*BhG0rA?CNJz2aVnY9p4F(% z#T9S*kjJ@hbwUkslvwQ~FQF(!_Rg}*FJ%n6L@tyV1T`D`z6t<*ef^A+)9ac}D*+7b zZ8(y5#A6sWrar4)Pm6sQ#-ee^=JC#g#-nayV0hHo*Bqd|s`GC%1&`*ZTG`gx&M;i+ zN1Hlf4w{Ax;bAZ6SU)e)`H1sS&X=>r`gZtW*Z}r=NIN!DV!2peTnG|DQH!L9G>%S4 z@|wXADF7*i?-9n%rW@G3oW2|ioV73vdU)C&T1eIYtT?!NxX~-67<9#vc4e>nkM4#a z*c#fMKhb`LV&;kkkFZgzI~??)#$dRwD+U6?;l;dNJ3^BO-pSnQB){!*IS=OQu3cPd zek(-q>odQ>7QX(u?eBt>3|BK(YaSc6*tXv!BT~nPGlf6CMkL^^{ti z%UL4~GpB{IyMy4yj|uw6Y|19J-(}<3DV?6aRtMn{R%vxi-S9rVs{XDib1GV(s;!wh zFV$W;E{E$`Y@6`A=Zm};LhLfk3;y9B8hjbiS9{)@gXAV;|C_GXtn6$w{{eC>&is%_-;vYoK<#k9KL|*k^ z4ORoQ^K)-HUwziwfBW`fKS%t}y{BJ){Z%dg=hG+m_TRt6)9nB6ulpxB|HC8jA-nYH z@f`KcU+ux}cctCRCg&NHthnM5Z|)08WNtn{Z@$PTeA8C(Wg@ey39BOc(Y11b~K2JLLz)jgDCilYpsfGhzEbwZyP zS1oXJa}F4toQuUHYtI=*5BAUQC-g%tRu35+h6yrF6aeD9r>f*gFXzs!RwdjMBJHhk zGFwbCT9p}UN!U731cOz}M|4bxhFWGD#6l(xgn5Ptm>&n|0LSc1FCntpYbHRHnO>iAdmmvQ1W z!Mufz;KOHPpaaIar!s?)#%NR2;z4P*3_~59Skdc=;1KG@4wk(;vfbKe=BfGo9TF#< zT9dUc@9bl?_cjvt2{%Hs*}Ry|LCr&U+_23tmV7k}tzuW;c%pJ@izY_}V4EZ{o%@D- zRSypQZh$2sfswp@X#n6%z-+UMzDKeG2v?;go+ZYoM5Wg*5&3+0#l18j)6$=0x!bO0 zV#F-9Xqj8PBRSTP#9o8f^0oFwXb8Em%k7j3Y>P5Xr^OWAHX~_gmp$h^n3Z-u95S^6 z=8e;Zg@=foL!ld%d+1@t9QJo*wp-E(O6S%G^|z?O*xvvKEc{nP zdDIMrZTUhV7H?yZXIpI0LXamNU#GWl^GsOxu?wjFDtaP6{BKG`Fd2ydrz_2rZ}zUgH) z>hzL;%9!YAkY^v1elE+_-}Ndz+$Cxlv!fVR@!QRV_4|Xpb_r=o--bu?N&P{^P0l=# zqroVtjH^-DLxxgILh-rUUeTtV6z8X0pA;aqpUlR2H~W$7X;mfhg+Ev*zXJUnhe@n# zyH9`}nYayLcy(7q$7}K&yuYUZWu7Me&&zC5 z$=6@q>wkWU=UxYKuY4J)j1Lolh~X}R3U4tr27{! zDVEbka>bEa2&sY0ZtKB0e~IABMk<9xGQ2K}WvQV9N_7!}g(@{UzhzZ-X2}&%f+a{1 zwZ5Df5Z7~I*9{21TpnqV3Gv>-Gw#^RnTSKHdJlYh-=HSh z6iOVLXFy#_uqGa;;>v(VDk4t17A3@=ioU7-aV_%%3xut$4Y6DI69jk#KnL$d=KBYn zr>a9LrZC!|9XU5j7V9K=;6*e$Pv-*?UDOua6zQy6r}*+{rwo)&S)RLWo#({Bv|BWi zR@V-Q_E5tPq5`$ix`jczs4pS+HXlZOLO%b_1@P{K?Snn;-VD6%m9YSX+P1BKNMpg*&GMh0x<*&Vz;Kd#W}R`xMu?S&i=9_ z_~Tpl#pfYx8T~RHS-9`AgGp5ltOp>UeMka6=350m-Jp5Kp~F6O)2%cqhj~VQmI>GV z;pIZ_OzZ)fen|5%U*vG&V_gTkTs=9IF%nME_QTQJKP7)WdUf>n@I`XDBt#LBl3@|x>TuR8c&~s^bSG4jHw~6MiH2i#8zNh8xCfmkpR1aAK25?qpGlE;+f({#k@T3~5X(uSi?*P5)4o$`Hc zv4iaO^2e)es@>G+WmtHJeTjFa0e%DYu;7mu&haed+q#&RMB?I1GZCP$2|g^@I?!lgK)(PR-$j0l~!1P6y#Y%6tP z5nr8RD>7l%pBN~%sm3MTYV+ZiMXXZq4{>e)>Q!1^Y2Jl!ZXF)AN;e}tvGW=hgp5Oo zgr;{w)u;I|14RPOxI#RQ>mnJ=(`$r{HXQTY<&*>8da?Je=Ed^z3S>&Ui?!SpWk2hQ zCK+}~UQR%kOTKzwP;HJ(mC?a^G0tvs8b(LrJp6fCf=Z8cUonkkTkla0`_VCk;gg(6 zvfh&!xtb3V#tXYUyt^^(pM<7%WPNuCmuXsRu?8zuH>QYk&Cw)^`3U!Zg_$|I@{^O0 zH;dH^ifOr?2D%ljcEIl0Ibj_4qc|^z%Q@A$B{T9yspuAz#->mQohKx@K>o(KojQVVcb51r)8ae(YO*39BOI!vW5(=cOkX+0QA=;Hw1iXpaU+C1tF*m&}dNHt5 zt6t1o&qQkCSm_+ATjR#-`!LS5xfuNh_i0j2cG^e6%tZ@m>VqAlxnY=70Wxf&bkVgl&f`18TnBUump zp}IW0F54Z~^VuBSEowbT+NOV9*D3vz=fRt+;bdl~iVr-m3$++4dW2A&wgsluJivH1 zi0Gog_5zd@?U>G41#J&?&F3EOmbvWW6xn%(Rf1@9vpLur{K;b@Bx>#=*AOXCCR*?k z_~*jnEq`g1At}~IOAd_fpR$~`4IRAYu<9o0O7$>Cqo~Te+hZ3#cF$qE=|zy(>N*NF zP`1nMJnOd*Xr8i`p|)@xGTq>KR6|3e>rf4F4mfV9iqbeS7g&^)HIm>qW5lAobCLk}OHP*sp(~@gu1wRNSr*W+8b{vD}e->>-BtER{c>Cv< z$$qGNsMk<(;QsdOgt5jdv)T5?66_IHgKfS=s!RfhVp2?Z$R;Xt27-v`4NO*rQ4RMO z`en8k8x5`9R~AL@xUZ^xSBtVycB8ox!3cxdW;hxZ&0Z0(Q0A*mb%W=NrUCwoY(Y2O zW-2g8*2>H5LZh6>ryeV2a>AQASLSx(ZQ67ub4U=#X7deBXGQwniX%QejSXx`0>OWN z4$@IO7=4YXB_L;2^5P|}SQTq&J>jSm8_{G3V-5+3UpVKi2_l)gZ!I|6Ghvz!Cg_} z`H~o+MZOqke%L6QU4G})VAhOZ@;jd;gvwDZ-YhQ;se9^|@@>{+2NxFtxD?t$vMrAxaiDvhn)htm200Y_YT{xp1LXH=WUT)PGd01krCj zC17p{v%+aQzNIQ`rF3sId2cg$Z!>vsGx;m9nPho8d29ZfXEyKc)zV8{i!8Y20w9~rIB|{n zor7n!GNi<1-BOVdq1Qc`kcj6z+kMn~VlF43E}Ir6m}|#s#zp4d(Ms5`v#{dWNgGBc z!YwEZ?!@e)XIj~nM0D5=m6&_)XqJiCWc`lGmgnDFmW9H&&XNf(xa{p=pvF&;kyUni zQD${O3gldF8CgEdQrwub@MUkV5E9>ws%Iq~ot&JwY{_vqoXj+V9!4a8nQ>I3)fnMe zW~QDy##*o>2L6_Xyx>7;R$MVLGxqI5DK z;`#QN=;Pq+(H}wVi9`)fk4{bp-yJ^t&sL}2_Z_I!BpOyagQ}hbYC&DVuHD2x+>nd} zMVjc`F2)5X+e;qxzLHyHqgC5UTDy;1Hl9df7KsY-G5M(XwcgGyI4#7aC_x;+JQT~) zxOdXt(R|k?tHV_~d@nwEa7hMWaI*9460yFxDe>GiYDDxxL1|xTBZ=&2j5y5jh!!CF zAic(UMaSsdN;P6AXrA;wSgd{^iksu(8MGM?$Sv8hlTw`j zIV1=~bXg>)4_$;e6UAYxQx6rU6}()GT&HbY>vAtT4ON3^Vl3K2l{df%GQbhi_SBPs z64LHGbJ+AI@9}3l)c!?o^t7JIafW&0BxmyFPB(fx4&(RDK~;lmPM?r8*_iUpX@6>c zM@AB!nlaGL8@hQHwlL3VD?<@{eb$m6tuWUGGLbcYtO1wY1ynFTpR3m)8ZkfCL@>rJ z4@C?i1df2T4G8H6$n{1Pd!*WIMU+-c$icT@w{bHTAT8!<5_6f@*JLk>49F^KwGOl33_2XJ5+I7 zX;*i<8L>%cHbl`$FMjP^fnVjL+gNg~hotwqM0aj~db zWMK7Zrsks4EAzja^g?6E!i|YVC0CLVVwj;2W5hRVSH(R4YeACl*g@sM^BMr%g|RY_ zCh1K+Sxz)zi>``utI=SB)*E3qFFxcWv6Q@qAa4PkXNzmx-acir{5!uZVn3-uGKu}O zt1f;qDvIX9L1D$gJol5o8QVS<3-|XP?(gyc_a0`8VLXRZ&JNu?fyu*%$y1*)10V9J zA%fO$QzPx>sv$M6udfh5Eaw0KmH6;zH%E*6d%^nvlK0~azo7Oar?;+%yN#fQ8t~ky z5p@%q>?Ch7hskn=gOn3QVV7KHb2nFJZuohhw)sNP9E{y$ zzk_+PZ#pze`@`&z7lbGE?NPbwV1l>?Ebd%eVBXLcKC@5D=Eo>|47|p^q!iyqTLD{8Wti9n_7!2A0(q_EQ5XI6!r`0AA}Ex29uV z7&BJNwpR)9PPAC8S3J@FHWty>p$0)l=rpw7W5e&9Cn6S3*I+ZlU7IFoO zDr6^M`KkSMVe^@>ohuA%jqJha5v@yVO-UUXbBdrG40Cn2(rKZgY!N#+FQrwBHO#=;9xz%b79{ z%0zSy8m_4=MILxHDk;UZ=}gFP0b)F2Vx3ZFf4IckxN8 zOmd~~AB{RO2p^eKgym0|mA=pE_&`Frbodu9j|h4dA^sqX?&h z_rsm00yRFt0&l)17?X9{7LWG+)qOc9yL!3;jb-@ba5%}kDt@3& z0)xF)H~Hx&$vv28=tjSlioM|-W=f?$1iNKb>T*ge)xPk&h0LXGI_EoFyn?Y+-@NH@ z-f2|mUx=9=Y~%W^dCOUKQqg>RQLwC)`YP>o4%T3v4i{}L(=jc$%&F3^zvi=8zv#8? z*IzHFTl%8+|IKIL;>JKb74*mWI{Z~!cyMXxGm)cw?%fE`FBEEr=G8TVN5yehL8Z$! z{ik|yP@xrm3RydWlC)cM*;3zNjxNpWEa1gpG0!q?;lPfEexa+~MEhF0{OoKjMSZ(Y z$M3ybrsf5^KH9<+@qX$}10$Vn=wezksWt`k@3fsI!{8BuI zMSnen)sXhlxS&`kfA`5-P23Oo>u@BSj_(q8ahock0a!^M+;`yN_PytgD8Wnq;7*`y z5-hc8Vt1EQDD>6D&x=6-Eh%m&-9|Y7^GXmB)oT?5p2QUp`1>jVm<#`TyX1QSE*RK8 z+C}F-)Y!V^ma=8v56W3OWQ9X3X?-!)z25Ov+aMc60xP~3ilXnYZBay99T3y7(4)oH zv~|+#GOxxkp@#Xjmqvqs4`%a%E#OQs)-2fCSkNMkvx|k-btLE-46jFFw}|)}#txlNNpqSIv zN?7jhw(Q~lYq?-s8*lZ5o`uz_RL+%W)CpUub(YHv;6+_}Wm@6>sBE|Z_5E+KeQ#A^ zkMr|^o3nWP;)UK=2CUw4Hdx>uWKp_4a!7Zt-12pZRMtk?qt~sV z5aA!sTrhEe@yf6Xc^%Lqori_&sJue;5__x0rDgUvDD!A#8J07uT@K4I=TsjSzxKLK z^L!3lw6GW(Vg!Y(IzHLRrznKT&ejF`BO926q1K6P z6mJb$-!5S=jC8pE+wW()i((gf?t*Xc9C%Awg9aY7g><2V?O^MAZhG{8r}&iXc!U1<fIwNt#2 zK5;#NAa07vmS4Iub%`ABY-t#S5NS``K$`2B4cn^J)A#Y3{}4H(G6QA`1wii$sUZ`! zv)-Fiq7SlX-(b*N&Ol~FVN~Tem7MJ)-x^~h-^a#y@@UW5i9*KMbDdyk2X_zTys0

P<0~m6^auat_pL z)A<6v#OJy$=IETn?&1JYvR+P7vH5$G_VMbKGf7_=1G7=D6P?`}ewhJ(nY3CPqLoaD z+O<$!-!~qQHQg}Z_rvcnjq|=fI#^{bUu`VjQX0zaUMp>1;cJ2M7MJ~Kme)hftr~o< zys;^Xl{aQkAjO=bGnnc&tv82ak65L>#=b2ek@2jrfC8nDGD=`38 z`6$?;L>5ujZvk(k)*yPDVSbAFPU}H=_Mm)+JZL{n-41|UTP{W)MzxWtMitCxJUI{qXdNsa2Nr$aAvAs%+?v-YH8PRQf(-~53TQ=bO0{IVjVV=SxzqXZD*pie?>I+taAO;m zF0Pt5Ka?yb$}iV7H&yTnx3@GLZA6*v1=8bHh??ZJNAO^KCu6(NN!k)BCRZnej$w7! z-aPjhFE}|MF0HWnPIis?O*-=pQi5vV(58(O4N^%UH9QB ziw?s%qYO_A<66LmpeaGwMw{CvW`UAVabt{jK?>IFuu|iCQMtiGX=8n*ZlUO15C3VW zosM^NjJQ42mk0n@Y2)9sT`u6=)9Z}$S8syLVV)0IA8zc5Q2={>yqwUK)Ce9P+)5AJ z8H+0RXJ4IYZVpIpjMfgrSJ@(0wEeUBKsgYuRy-U8}t9|X3$0kc(xRM8f zBm{hXP`13we!i*q3Oz8R4DaMwk)T(tl|@1;!Qk6`Gmxx`U)5lcZNQyXJiGD>T#fr< zO?LU`q6V4E{vp|m0sa!1e|g$aDjbBdA`RA!e^u;zWX9|aKM--U?ORc5gph&8`v{T9 zp-4PrnF$ht+zcD90h82zopf;jtKuE8R9&?c?{iHl-)`-;I%m6&4ys|1mQ!^#p}(re zBLV~;`R`6xRs4_o$tuhUmmy|{HBU?~f%1%735T;X7@MhI!65af9}Eyv$>m$O~G!14u8XahfV!yA)RguqON{>;i@I?LKR z$vAO#+h=>bzkBy(r}st6xA$8NuY?h}>G6+tCW29>20vgSXmZ%*=UuGc;DXB!E;taP zz9(!NTm)eXU>&JnqsZlX@L7Ce_@HT%-sQYl&f5DO>Ds=Koxk7r=_TY5HE6*b5 zrMQAr4hH?@xlr^TU$h!f^vzscYL*KZg5QfPrOqa*Z;db8CGIvouM}tC*{vIKR&4Xw zZ5G901-pSot#!9@mx^b3l$sxdC^yh^=7s*_KmG$IRP+z581l-}DrJ9H4VEFp z7ncZEicZ5XkrUj+^ivl^_Bjbgk0dyJ+*|X+;7WOe8E9}H;e%LV$EIVIHE{^C5fUEP zTIlYm(-OG5G&#C#d3e-go8(ZwX9kKb4vh!ohq@zE6VXl2OFBgs8Ouv7q5!1OvI@l= zdKU$*4deS3ZAs0X#GaK&6b=cdQV&KwYKT$E!m4iYz?9G)K{!+@7E$J8k_l*1^PS+} zh5$N&&=o(ZQ%MSO!9Kwny7)Z1%sHN&lj}9gKIfhB4)_&vm~kR24e$5Uv=u5+PylXn zeb|T84RA~4`pdI@PpGxFVssV=S#Kp@w7M2rM+xelYm2X(Fckx1X(d3~gmYzI{;tp# zbg##TIwFQg3vox$$ukHFrbvYoRu{19N@F%u)N1IPX92Co&wScJ18vbj@Ap6FW>ApP z4e`161KRpb8-mWMK{oxMlVH--FMU0Q3kP(j>BG20O)CXI05$>}aONUUw(9!V%B$2M z-)4lEi6lO0gDpn32c-!KW2*g@ke$#2F2yl9S#gB(oVa)DhDYQ9d?k&y zJtvOmAZefdH2SIc?u(9il6V8{+6GBDeRA-w4(tp;ol+7{J2lt48Y{SVjp7oh>o@+W z3e+3$ENf>~xsf%K+B~sAR2jT7o5jf91En3oj#31c;L}BDimR>BsQKD`Ia!iG z3*B$?En;ns)fr-j`vP|rXr`4v^1PDZvkqB-Oz4;12Uf29s`DAEuX44?3`KNTFVK*Z zD7W$JY@VT>G-F%QGh&m5o~2VDrAc74jgD{o*72Ky=G&Hi|K8h1qV4On_D=9$B?#d} zds0=BIVsEm=~0g-C1UwpBpLjW9x-2F*WcTeY}U=-{9R)3Q+$}Z^2fgU;}QR{|L*MZ zy9V6l8`_URT(*2+@jJxJi1;2xVypu66%Wx}Rq0l3C}KAjj3F>3TXkTW(KyY$#)Pq< zR?`XFPIJ6`Hbp}qHI4`5r2`a$DB>Tl(hs==!Q+q+k_=dLQi!oy;~5w9`3|RC&1~)t z=Uh;?#=%yOIey*l)QX)EvsPi*YHXz6ZYO!yc-(N<9c7$5uyg7FVRyiS-)^m*&ZImIC5wK7xslc|Ga93KmLdX zT(N%kE-q*vtGCyzsG}M;`qO~4OM396vU;NEULQD9m#?Cwjx3>aw8pX_9p|a9 z^7jl!2JSd;mwg{LF&!BdgD<=gvk{u-KqrWw_JJj|rme4`*gmYZ$r(;YuRN(D7$3*^ z)MjK9Xs?+oV6U$}FrA|}%HUfxDj=3N^f4*ReDhH5&&mLcazjX6sT8f6*sbSHB&3xv zCfgOpM2A!7P>lcToDTkT6VlUlSKq2TEcJZN(5H(Tg9)+D+l5g{m^x@hI-@NiqT5#nw081FbvZ>-J|M@Wu$ zGl;2<3@ZX4W~{Y-7R+E*28xzjxmw@UN{JnYeKRFIa$VOb-?|a=t=t`kRCC?E{!4C#OiI&}ANKj50dqTm*{L5i{{HX} z35)(49}?eD!cSh|cE z$++RY_2>si-8CKyl6Rsf|Fq3H-1Vlg>*lI*!&r3177m|kmmVB}q}|$m*6P9|nJ2Ys zdWc6@?<1$cKz0QiEOwyZHH_{ja5HXn*)2Va?jG1&>gPJ#W z+(t)VF%7yp!0i|u-SzAGBP239DrZN)9r3xCl#(NhU71sb0M6 zFHYADEW&6vDE@uXE)i|K31PFQQH^sHYbOx>+*)jiJz^C3pQGif_2MPJYt5t4O20U=)tJ{pzLVmZ0@MP zJOS>7XrU%)Rm5;s$V!q!i)KN}`f_sHo6V{YHeB&gVr1|YlR5o&^=?{4sf;p6=b%9m zBwr)9g-v=KmQg3!xE=v$;h@>{ls(NagCbz z9znPXy!_`ArSBjd%Z|7gnSVVZ^QQ>7v^73jzWsc%sgT*IRB7ZREBuLt!d)&IXpA`b zNg60Fr}YEaC_3~lT;r3L?#@^_ekYy&~O=E?lga=2fhk>u9xOu05PcYcH* zuXf(|a(rZy{48R;VYB{cq<9+Fzqk-D?Riei+8X<%dL*;sNo>Nkc7r|lb4!WZ=3T@? z*LxE%?TmlD`WJuyC?q$snc9%oV6u{i(p$H|TeJM3h~*Wdh~qi3i0BILaYc`rhK zP7%^<&wCN_UWB|CAp;STw#L7_2uWl7Tq0zhVdW>}TB*HHnH_byhTX#`F z1ddc*kA7zTgR&dHr2b*PoPM1C0U-mN{^5+305!$KE~o72zj^zb)f3^vA~lDcx9<3d zoCrvZ2Y_Ol3-hq+QE{PJJZj;UsucNi>n}c8@h}B|X|c-&akaq}#wDyPeyw5xVmw;d zCoY&NanlZ@an~D?o>ho85#_{ro)%Lo1fV3YL;v;nr}c^VKnWZIC36SRRXT+9X35g}X}k53(mtHOpgVtA=Gh{tKhHVD`?^`?ozOpdM%Z72(AX0$9)kHW z?SKFF1^VOC2H{Oo+aD+PdqUrRC?8NgmI{Z5BXM`lT6w{ce|F`Gp)DKRuDcjo9P}9~ z3@>vsw-{-Wy_OwUeXYA^L*i}# zk!{+D21!XD_C-ZDC(5^bq1>fd zU~ed`Zz3{NbDv#L^$R(1L^OUPjY}+gB{`fi#FjE2ZLqbZ5(&9og8?7!v;B9qmG-t# zq!)I71@LUJD@^Z8Zp-Pb^EV=rqCXn_8@dEo{bSSfS;a&_|cU4V4W&^K0p;V|y zsGqy*;Yo3T3RnPlYH;MP@PLWT5eeY=hkUe5$IMu|3f(%gbWKP6Xmqu+N_bO0f}o2S z5(cX0eA6~)Vz)}8nqq!};!nLW+m)TOHpZuP$3&!t!hPDQp06>zqSn(0`kX&S^3?;x zO{dGc``gZ!`yCHlL&OEth@SeV_mI#uYJzx5MZ z$)=YiqWO?R)T`x>nP-#YLpt^s-^1<2WF}i4xrzsiY%-IS+~Gbo;cbLZ_1D)xx532A ztK}je_vRDiK?h}=B)fA--r21;><1k8cN`(~bm1|)wZQN7?HPoD4~}~G!_9lxi%312 zYPTPG*$Bt!PEB?_Ma_zMHhB-w586TPvLd0h<%Z#)ax)Rsp&1^I>VT?wQndimat>$7 zE>Re0EKIdB&K8@~WBJ+rD$;ElC1lOs}Wq2$aJdw9P)1jS=G6wAk3VSG%g6pZL z6!$S8QaH@*9iWSTiB$@chR1NM&IPM{+7SQF7hvU=CVdZCC~$*@aOY)uUivX~JF9?3 z0`F=IUB6_6eU~|H?)S=VY8q*RYiDu|{7pCX+V;$HuWQeg3(%*^n7hqZ2?6HVQ?1{a zW5UKi_IUX!Aq>GS=05qT6!l^<2T__OLi66^5r>Mk)$6lllesrfkS${7M$&o@^nDY^ z{q-RC4aha19<2xUs0u2ar#qf=t-Bu&9`O!Y5W{*XpW%)|!++F>Jst_uB`MPKX+YX* zI?y_m(BcvGh=IA9LYVqBZL4hsyi{{5qpYm)oVIv1S8OoJ6;5JU%x;yJ0M|6f{OKGj zxGp!gk*HHHFE)^zqy3ZX!a*f>pT3=>GY(uR@aqa&U7`?1gN4UkCvM&3sdj}XZ?lR4!&+jXutMS5v_S;qq`T`(Pxg z0SLG~?YwLUVj~eEA7ah5;~H2my=L7y!&*_N-=` z^0B+YvN{c`XwLynlE5UX_2%u-$T`fzmmuJ`4^~R;b z;OTp;`6^+sR@idukxEs$pe8ZIZDMTL@jDyE@PFUT3>#6!Tl_z8(!r7&*}K$d$pK%lG&HQ z@f3O2aR21qE}Qwa@LR#CUxUfTy|+t)-3B(qufW~qmv+r)2EjPHnct0VxL<_+KC?F>4+M1D@ak$J-tZG^^$cz7Aj@h$Ar|^R{?~7Rs)`XUpCr)dfAW3Pp%`F{KowDCK zhX;*yO$HAt5y{VkW~snV@}d}~)#H~afvJ%04`t0(mHH+%b!9zS~gn`H0j?38(yxO7XB z-<*%K>@uA;mRtAzpY+-JZSrtg&L5uV(}&sgLn7MltsUA3Vwtc@wCkiGq=!Kv-Yn)z za2Z_WSuwkv=a*NDq&@5;k7zNw`0peB?_>V&6aMd0{_j`(->><<|Hl9Q9saw&$N$~u zj78Gwx8Q#y?=9MSlg&B%qa=d7%I4YmZE`tJr;BVvr@%a8&$HnbZYaCZ+99j+jOZ3} zNS+fj!6}<}XehY35fcmoRGM)?^OcrmG0f8iJlBdLb9Sn!V~(u7xXK8^wwa6V4mUK) z(y>WZYo`g3hLI$m4n;ahImRxm{f6V^Ncv$4jPpsZtpET|B_{@4mPr0YeeEVhq4Ep- zoAKag%kyzwUUdn|C@(c7P8aEw#`!RtmXZtfA)H0C@fa}j%%+qxJ(PT*i-Lg6FtrPv zEN=VyO8o%scxb9Fmh&k!3#pF=UC`8Vn`{Eda5zmV@g>DIRHQ^zMmd|a9Pk33a!Tdm zL&k#=l%cbNdMrJ`YRuds>!{ZsP#+vM=o zpQ!GC9>03tO^*HxTK|*e_1on5<(n7BNA&La)w37hKZg$V8>;#0^=b0r_~r2_fjxbV zCaJQ5J3cxAz?VmFpM6I^55GBnaeVrxZu0H%=_^3}mcSk+Zw}v{9zXm3#o^oJ&G&EL zygoUiE<7hFuU@}8e)TQ1tw%49UY+)+WqOw!{XjpGlkW~+yxsP05=|z`D`}WjU{_*(asGA(VJwCz2eEaq_HGqkw8m}1) z)q8bh;9%Or)uBT8{r$<2>(BF}!xseY1l9e*y{)YQ@u44bHv58{uwBlF4~3EowgGnsj~_dEJu@@W6y@&3Wy<3uU@HozF;F^2M}x8K|2*Bp0AUd{pM?B(m{$KTTO zzjL37O_37I^W}80aW|j!p{f7o=#Mm= zC#QsBhi{)N^*wp^_V^7U&Cwwtjqcx^Ete#n>tdHS?d~is7q}F;%!fn{=u--H6S(tT zWQ``Gi%2cJ3dt~?^4?u!a}1WIg?8{9^6L1OsUZ=wi;E1Vp4tap3Gw~N>DZxNuLlCP zskU-9t)f^S_mbrJBDpP=N;Gi{5NVlS3WWy6&y*TdAp%qq6&@4mA5;H_a^+JHxVd(_ zltD>~VAO^c%T<<+kktJ%F9i-^vRdy0dN*y#^C8CU8=<4mNW@Jc*=p3e#xpW%M+?rJ-ETPl< zO3nd70UT{|lE|0KY?{$2%7ebnl36}{pDbtY!1S|4<0bD#f+08oR9KEBV<}o~&DrcH z%Xyvc3=Sxp-fR#6#Oz9r$u4up!YNb!h1kx1?l&aOWgo7ASSr*S4A?-gg+VbGMXV%t!u`(X#$-BFRHhi|BhME zd2q`Im_*=+1a+4Bd=O7{_ZcKs^-H@|vp0g$y_SrSue-x#A^0<#@hrfjpa zz?vHfG*mFQ0`A*cr6#lyppIj+AW-ItEYGiZk!jIeEh$_n)CxzY?N^|Nx@KFr8y4)i zx@+rnwkl)fvLt#l%?rV9P!@I)VrEdAiU%bW4gNHM5uRss0L_tZd5(=!s9y|U`xCtZ zh-g1r@W8`J0E$lQ!&>Z3{r?Q=(6omq|Jr z$>DZ+*($xmfa?X`POzH`S{FT_kq>B$s?kvuj`l6A^4u%_s#vfH=9=Jci(1K7tn+-B zbr|)`$t;>T-T_2gpr&K(8_5QbAGOTD`LkTN(LQXC^Y_{Kwv#+N_(?YW7r)nnvY!js z2e^-N{T{UM0CwshL*%n1yGiRse$JjzM9_Bi#G{Go1dH@~s+>WaIP&BF>Lg}dP*z{D zp#jL}1a>0BVnSqZD5B&NRwN39DNjmg)Zc5IbbiGY5!IqiC8*m~Xiy+6kysnf&+cdi38A_)DdBuzv#u++0(;! zA~;d+P_M<3TKKY6(Lwrj*1wlA1+F-vs$Q;Ed`vyk>h(g(g00S{+hJ?fNqKlt6ycos z^TJbG_I&aar+XNe8GuMDJ=2QLN zGCH&s!(uw=3VmEp!*0a7khC+nBN3}IRc0FVf_*OScIqhV_3SPAGxsdtxqF>c~+H8iLNFEfOB$ZiIxFPPs96pPJML>ZfEKxQEcL4jm9otbA|V4ebazkz1p}GFyzSMQ7kaCf475fAajzfvY)Vt+?*^8H1ZmI?+a&N! z#`(p=f>l8@$jHZDwMPxaTfcTTX|U4N$G0Goso%oN?PDSEeOU2cC>k%A^1vSV+cHX3 zLJr9FZrNGt=x!I}$YFkF6g4GKIs4uOM^)sl(j$S=q zqZs?VEmrTUx6NJkb|7VNPkJZJy;#07SJ@w{i2Z$C%-@TWpD6LTv?8|=IiQbV`oQWq zQBstPbg?W&92phUOi;^ju801#?=M;Zg&y8(`Q`NIyaHZ*|NCU`>#x49-2Xn_dwhTY z`-?pH_rLe|zxVgQ_xHd5+V{V5z};T~N4U=WE8zPp;QK3Jy#l_!>$|_}yT9wZzw5id z>$|_}yT9ve!iW32zD;MY0cFgm`@6pTyFLr%{;u!-u1{LHzw7&7aM$Nw=7EV!7rgc) z;6l0`Ly1hQq{W7BFiIzeo8wm}}9o1bvkG=B>e^D76r2rm$yiC%1L3&Cb4w*!UM z#-3ycOlzKO-kj}zeejOY~#~}(5eFt_Ej6})>iO=dZ~Z)%oJCCBjBZ-GdqVe!|h2pe_)9T zv-z#ZIRgYINS!149Y@9Uy3HiJUj=%0kb9oL7qc!47UB#&%7o1o@t0~ztW;5cX!ZO{ zcr)MST(XaJiHJ5@L@W!Q_JA7Sxj!9$X;5NZq+y9}Gf-Nz;h2^;`F5OMUS{*Q@}*VVP=EdtOtQk*4Y) z^@FB3yqt8Y4xR@6ycj$K1=k2p9K~z!+$rBgVZ6Q(_ zZuI#FqqS*%!m-+j5u^>>O*{+_Huwit{$%t|dz*{)w!d^MExWW@+6JOBv$2@BKR`z$ z+bW$6QdZfnYx_Lqc>K~Vegt2L*aaWy_a=;o_Nm%Ogg7{RP`=}c1PZRp?Reeh5DW|A znb!5(7PLLIZ#LW;P z`+QWcT`v@JYofX-YEoZ}vKfmV_C?OR7B4^@L&z`dK=7^#CwON-;qAwvL71Zi{7cR$ zR5-;49V>0_T&I@T#ISACVwAysMT2cf@w5D0*N%#RY@6CfvZi%4jidFon7aMb)PfMc zORCdqVWlqeYz&p+-?&aoc3KO6A9!0&j*OOF>Xv@BFSG!6ooj0We$n0fhcu=Mg0wBO z1}zlUs$3LU#Qkau(N{dud{TX-SMGp?(BTocGt^o!F$`L}VtU0yS6XxAYGNXn+lk_0 zwtlxl9Ho{JwFtR1-PLha$)I6}4iaofdgreqe)7Cnw3(;Zro;H%)o_db?d^_HWb&0A zrz@2}DROx?5{rZs{Sqg(YZ#2-7Ho=QV|cRBae%$uG1^RA`ghGH;madbN~dy(gm7ZS zy25jccfu%J;4*)jmy3L;NiRiu0jhC-T?(oiU0`UY7*Dq3p87I5YgtJqAGO|XvT+rd zbgn(p$Y*+25$@mj*uVJh$*X3!a%dX4OQ8KTo44P0kUW9@5S)15>A3ybLL?I?O9F;Z zF^((Mm>&}${UG@pGzrjA%Vr&e2Dkpvs;vHy1ifsu8UaGi+s$KrYB!n40D#Wg9mh(D zVCv)Lm#u@=JFMz}V<061Wa3E9War=5VVUjjWJK4Mcj&_4!uIBe>eebV9EvTWftXn@ zl3947Y00D&up-$^mO;BtM6*J-F`MC{$VzeQ+6WP=E|QxQDZFi{;3zima-)zg|Zz~LynxFly0a0%x)X_N6&ht(LB zPpej#B5&(R@9H7v;v0=PJX^XVq;H4QF-bjK*j4qh@mT zEUKVQlBZD{J^!XAZj-c&ECE*39i2}G$|apos&cjYNHb*qtQC-NTONEEvs?HVo?L-P ztPHp7t6~l!XNws;+b+y2Xx-k%}sS5{rAxODB_f(C#8{4Ws>?W^I_zz<8Vn@;dc$=*!0zk71U zO_$M2oX45Z6>aD=1u09tGRlLQ-uPC0U=Le;m}L=9iGD!u&9(5V&V#gUc` z$;zE}uj7`gbfrQ^ykV}SAD`N~H_LT`;dLU!Kf{w)x!mlp&whPxF196b^VSyP3X{~6 zXrdrLK@i3j%?$KED!}e+$ zf-~GBL5Ho^Noy<)TG%cqF3EzBNFK<$wwg^3l-RKB_D*SCU0O(#Hz=^SJQmBOpph#u z2?CE9<{s}%r!yaj}7%lQooX6 zsK&jS`;W9!2ayQ~UaUG0)upZiaM6@c=p~D#gO@2dhrhK{tWEfy(6{|*Ts12aum`R4FLvTDn+Mp0k z^bm|T#laUJg*Ba3NvP#ZMSZt|n~m4hlq|t|H)J@7cet9;Ab^w$B)i+2c7QF=unTsX z40yFy-0o1(I)J%!#F7yYlxIp4X}Oe&OjeqD&mS9lZgNmra?}LGm1uzU!KQ{_TIRuV zhC%!6%A9o;9sw3Moit4*PCA5`W3xqt4T`{(|-f9{|A=l=QGpZ`C& KT++7yh#dfDMz~@C From 86bbfe9603e61d5e3c22ea8f153629ba5fd12e2e Mon Sep 17 00:00:00 2001 From: Nathan Baltzell Date: Tue, 16 Jul 2024 17:31:29 -0400 Subject: [PATCH 16/26] install scripts, for user addition --- CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 60bfd1c2..cdb7ab6e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -16,4 +16,5 @@ add_subdirectory(src) install(PROGRAMS bin/ccdb DESTINATION ${CMAKE_INSTALL_PREFIX}/bin) install(DIRECTORY python DESTINATION ${CMAKE_INSTALL_PREFIX}) +install(DIRECTORY scripts DESTIONATION ${CMAKE_INSTALL_PREFIX}) From 76e9f103c9b346482fd21d445416fb80f17759b6 Mon Sep 17 00:00:00 2001 From: Nathan Baltzell Date: Wed, 28 Aug 2024 16:50:42 -0400 Subject: [PATCH 17/26] use $CCDB_USER --- python/ccdb/provider.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/ccdb/provider.py b/python/ccdb/provider.py index 8ec1726d..b50d3632 100644 --- a/python/ccdb/provider.py +++ b/python/ccdb/provider.py @@ -50,7 +50,7 @@ def __init__(self): self.path_name_regex = re.compile('^[\w\-_]+$', re.IGNORECASE) self._connection_string = "" self._auth = Authentication(self) - self._auth.current_user_name = "anonymous" + self._auth.current_user_name = str(os.getenv("CCDB_USER"),"anonymous") self.logging_enabled = True self._no_structure_message = "No database structure found. Possibly you are trying to connect " + \ "to wrong SQLite file or to MySQL database without schema. " + \ From bb829133e1ff2b632d3270b7570d02625b699441 Mon Sep 17 00:00:00 2001 From: Nathan Baltzell Date: Wed, 28 Aug 2024 18:09:36 -0400 Subject: [PATCH 18/26] remove ide stuff --- projects/CLion/CMakeLists.txt | 67 - projects/Netbeans/Library/Makefile | 128 - .../Library/nbproject/Makefile-Debug.mk | 284 - .../Library/nbproject/Makefile-Release.mk | 282 - .../Library/nbproject/Makefile-impl.mk | 133 - .../Library/nbproject/Makefile-variables.mk | 35 - .../Library/nbproject/Package-Debug.bash | 76 - .../Library/nbproject/Package-Release.bash | 76 - .../Library/nbproject/configurations.xml | 848 - .../nbproject/private/Makefile-variables.mk | 7 - .../nbproject/private/configurations.xml | 72 - .../nbproject/private/launcher.properties | 40 - .../Library/nbproject/private/private.xml | 12 - .../Netbeans/Library/nbproject/project.xml | 32 - projects/Netbeans/Tests/Makefile | 128 - .../Tests/nbproject/Makefile-Debug.mk | 226 - .../Tests/nbproject/Makefile-Release.mk | 228 - .../Netbeans/Tests/nbproject/Makefile-impl.mk | 133 - .../Tests/nbproject/Makefile-variables.mk | 35 - .../Tests/nbproject/Package-Debug.bash | 76 - .../Tests/nbproject/Package-Release.bash | 76 - .../Tests/nbproject/configurations.xml | 408 - .../nbproject/private/Makefile-variables.mk | 7 - .../nbproject/private/configurations.xml | 73 - .../nbproject/private/launcher.properties | 40 - .../Tests/nbproject/private/private.xml | 11 - projects/Netbeans/Tests/nbproject/project.xml | 33 - projects/PyCharm/python | 1 - .../Benchmarks/Benchmarks.vcxproj | 145 - .../Benchmarks/Benchmarks.vcxproj.filters | 39 - projects/VisualStudio/CCDB.sln | 121 - projects/VisualStudio/Jana/Jana.vcxproj | 133 - projects/VisualStudio/Library/Library.vcxproj | 220 - .../Library/Library.vcxproj.filters | 260 - projects/VisualStudio/SQLite/shell.c | 3137 - projects/VisualStudio/SQLite/sqlite3.c | 138243 --------------- projects/VisualStudio/SQLite/sqlite3.def | 205 - projects/VisualStudio/SQLite/sqlite3.dll | Bin 599419 -> 0 bytes projects/VisualStudio/SQLite/sqlite3.exe | Bin 486400 -> 0 bytes projects/VisualStudio/SQLite/sqlite3.h | 7055 - .../VisualStudio/SQLite/sqlite3_analyzer.exe | Bin 1331200 -> 0 bytes projects/VisualStudio/SQLite/sqlite3ext.h | 447 - projects/VisualStudio/Tests/Tests.vcxproj | 214 - .../VisualStudio/Tests/Tests.vcxproj.filters | 84 - projects/VisualStudio/WorkDir/libmysql.dll | Bin 2492416 -> 0 bytes projects/VisualStudio/WorkDir/pthreadVC2.dll | Bin 48128 -> 0 bytes 46 files changed, 153870 deletions(-) delete mode 100644 projects/CLion/CMakeLists.txt delete mode 100644 projects/Netbeans/Library/Makefile delete mode 100644 projects/Netbeans/Library/nbproject/Makefile-Debug.mk delete mode 100644 projects/Netbeans/Library/nbproject/Makefile-Release.mk delete mode 100644 projects/Netbeans/Library/nbproject/Makefile-impl.mk delete mode 100644 projects/Netbeans/Library/nbproject/Makefile-variables.mk delete mode 100644 projects/Netbeans/Library/nbproject/Package-Debug.bash delete mode 100644 projects/Netbeans/Library/nbproject/Package-Release.bash delete mode 100644 projects/Netbeans/Library/nbproject/configurations.xml delete mode 100644 projects/Netbeans/Library/nbproject/private/Makefile-variables.mk delete mode 100644 projects/Netbeans/Library/nbproject/private/configurations.xml delete mode 100644 projects/Netbeans/Library/nbproject/private/launcher.properties delete mode 100644 projects/Netbeans/Library/nbproject/private/private.xml delete mode 100644 projects/Netbeans/Library/nbproject/project.xml delete mode 100644 projects/Netbeans/Tests/Makefile delete mode 100644 projects/Netbeans/Tests/nbproject/Makefile-Debug.mk delete mode 100644 projects/Netbeans/Tests/nbproject/Makefile-Release.mk delete mode 100644 projects/Netbeans/Tests/nbproject/Makefile-impl.mk delete mode 100644 projects/Netbeans/Tests/nbproject/Makefile-variables.mk delete mode 100644 projects/Netbeans/Tests/nbproject/Package-Debug.bash delete mode 100644 projects/Netbeans/Tests/nbproject/Package-Release.bash delete mode 100644 projects/Netbeans/Tests/nbproject/configurations.xml delete mode 100644 projects/Netbeans/Tests/nbproject/private/Makefile-variables.mk delete mode 100644 projects/Netbeans/Tests/nbproject/private/configurations.xml delete mode 100644 projects/Netbeans/Tests/nbproject/private/launcher.properties delete mode 100644 projects/Netbeans/Tests/nbproject/private/private.xml delete mode 100644 projects/Netbeans/Tests/nbproject/project.xml delete mode 120000 projects/PyCharm/python delete mode 100644 projects/VisualStudio/Benchmarks/Benchmarks.vcxproj delete mode 100644 projects/VisualStudio/Benchmarks/Benchmarks.vcxproj.filters delete mode 100644 projects/VisualStudio/CCDB.sln delete mode 100644 projects/VisualStudio/Jana/Jana.vcxproj delete mode 100644 projects/VisualStudio/Library/Library.vcxproj delete mode 100644 projects/VisualStudio/Library/Library.vcxproj.filters delete mode 100644 projects/VisualStudio/SQLite/shell.c delete mode 100644 projects/VisualStudio/SQLite/sqlite3.c delete mode 100644 projects/VisualStudio/SQLite/sqlite3.def delete mode 100644 projects/VisualStudio/SQLite/sqlite3.dll delete mode 100644 projects/VisualStudio/SQLite/sqlite3.exe delete mode 100644 projects/VisualStudio/SQLite/sqlite3.h delete mode 100644 projects/VisualStudio/SQLite/sqlite3_analyzer.exe delete mode 100644 projects/VisualStudio/SQLite/sqlite3ext.h delete mode 100644 projects/VisualStudio/Tests/Tests.vcxproj delete mode 100644 projects/VisualStudio/Tests/Tests.vcxproj.filters delete mode 100644 projects/VisualStudio/WorkDir/libmysql.dll delete mode 100644 projects/VisualStudio/WorkDir/pthreadVC2.dll diff --git a/projects/CLion/CMakeLists.txt b/projects/CLion/CMakeLists.txt deleted file mode 100644 index d25630ee..00000000 --- a/projects/CLion/CMakeLists.txt +++ /dev/null @@ -1,67 +0,0 @@ -cmake_minimum_required(VERSION 3.3) -project(CCDB) - -include_directories( - ../../include - ../../include/SQLite - Source - Tests - ../Utilities/Source -) - -find_package (Threads) -SET(GCC_COVERAGE_LINK_FLAGS "-pthread") - -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") - -set(SOURCE_FILES - -#some global objects - ../../src/Library/Console.cc - ../../src/Library/Log.cc - ../../src/Library/CCDBError.cc - ../../src/Library/GlobalMutex.cc - ../../src/Library/IMutex.cc - ../../src/Library/ISyncObject.cc - ../../src/Library/PthreadMutex.cc - ../../src/Library/PthreadSyncObject.cc - -#user api - ../../src/Library/Calibration.cc - ../../src/Library/CalibrationGenerator.cc - ../../src/Library/SQLiteCalibration.cc - -#helper classes - ../../src/Library/Helpers/StringUtils.cc - ../../src/Library/Helpers/PathUtils.cc - ../../src/Library/Helpers/WorkUtils.cc - ../../src/Library/Helpers/TimeProvider.cc - -#model and provider - ../../src/Library/Model/ObjectsOwner.cc - ../../src/Library/Model/StoredObject.cc - ../../src/Library/Model/Assignment.cc - ../../src/Library/Model/ConstantsTypeColumn.cc - ../../src/Library/Model/ConstantsTypeTable.cc - ../../src/Library/Model/Directory.cc - ../../src/Library/Model/EventRange.cc - ../../src/Library/Model/RunRange.cc - ../../src/Library/Model/Variation.cc - ../../src/Library/Providers/DataProvider.cc - ../../src/Library/Providers/FileDataProvider.cc - ../../src/Library/Providers/SQLiteDataProvider.cc - ../../src/Library/Providers/IAuthentication.cc - ../../src/Library/Providers/EnvironmentAuthentication.cc - -# Tests - ../../src/Tests/tests.cc - - ../../src/Tests/test_TimeProvider.cc - ../../src/Tests/test_PathUtils.cc - -# SQLite - ../../src/SQLite/sqlite3.c - - ) -add_executable(CCDB ${SOURCE_FILES}) -target_link_libraries (CCDB) \ No newline at end of file diff --git a/projects/Netbeans/Library/Makefile b/projects/Netbeans/Library/Makefile deleted file mode 100644 index 05de621e..00000000 --- a/projects/Netbeans/Library/Makefile +++ /dev/null @@ -1,128 +0,0 @@ -# -# There exist several targets which are by default empty and which can be -# used for execution of your targets. These targets are usually executed -# before and after some main targets. They are: -# -# .build-pre: called before 'build' target -# .build-post: called after 'build' target -# .clean-pre: called before 'clean' target -# .clean-post: called after 'clean' target -# .clobber-pre: called before 'clobber' target -# .clobber-post: called after 'clobber' target -# .all-pre: called before 'all' target -# .all-post: called after 'all' target -# .help-pre: called before 'help' target -# .help-post: called after 'help' target -# -# Targets beginning with '.' are not intended to be called on their own. -# -# Main targets can be executed directly, and they are: -# -# build build a specific configuration -# clean remove built files from a configuration -# clobber remove all built files -# all build all configurations -# help print help mesage -# -# Targets .build-impl, .clean-impl, .clobber-impl, .all-impl, and -# .help-impl are implemented in nbproject/makefile-impl.mk. -# -# Available make variables: -# -# CND_BASEDIR base directory for relative paths -# CND_DISTDIR default top distribution directory (build artifacts) -# CND_BUILDDIR default top build directory (object files, ...) -# CONF name of current configuration -# CND_PLATFORM_${CONF} platform name (current configuration) -# CND_ARTIFACT_DIR_${CONF} directory of build artifact (current configuration) -# CND_ARTIFACT_NAME_${CONF} name of build artifact (current configuration) -# CND_ARTIFACT_PATH_${CONF} path to build artifact (current configuration) -# CND_PACKAGE_DIR_${CONF} directory of package (current configuration) -# CND_PACKAGE_NAME_${CONF} name of package (current configuration) -# CND_PACKAGE_PATH_${CONF} path to package (current configuration) -# -# NOCDDL - - -# Environment -MKDIR=mkdir -CP=cp -CCADMIN=CCadmin - - -# build -build: .build-post - -.build-pre: -# Add your pre 'build' code here... - -.build-post: .build-impl -# Add your post 'build' code here... - - -# clean -clean: .clean-post - -.clean-pre: -# Add your pre 'clean' code here... - -.clean-post: .clean-impl -# Add your post 'clean' code here... - - -# clobber -clobber: .clobber-post - -.clobber-pre: -# Add your pre 'clobber' code here... - -.clobber-post: .clobber-impl -# Add your post 'clobber' code here... - - -# all -all: .all-post - -.all-pre: -# Add your pre 'all' code here... - -.all-post: .all-impl -# Add your post 'all' code here... - - -# build tests -build-tests: .build-tests-post - -.build-tests-pre: -# Add your pre 'build-tests' code here... - -.build-tests-post: .build-tests-impl -# Add your post 'build-tests' code here... - - -# run tests -test: .test-post - -.test-pre: build-tests -# Add your pre 'test' code here... - -.test-post: .test-impl -# Add your post 'test' code here... - - -# help -help: .help-post - -.help-pre: -# Add your pre 'help' code here... - -.help-post: .help-impl -# Add your post 'help' code here... - - - -# include project implementation makefile -include nbproject/Makefile-impl.mk - -# include project make variables -include nbproject/Makefile-variables.mk diff --git a/projects/Netbeans/Library/nbproject/Makefile-Debug.mk b/projects/Netbeans/Library/nbproject/Makefile-Debug.mk deleted file mode 100644 index 9297e507..00000000 --- a/projects/Netbeans/Library/nbproject/Makefile-Debug.mk +++ /dev/null @@ -1,284 +0,0 @@ -# -# Generated Makefile - do not edit! -# -# Edit the Makefile in the project folder instead (../Makefile). Each target -# has a -pre and a -post target defined where you can add customized code. -# -# This makefile implements configuration specific macros and targets. - - -# Environment -MKDIR=mkdir -CP=cp -GREP=grep -NM=nm -CCADMIN=CCadmin -RANLIB=ranlib -CC=clang -CCC=clang++ -CXX=clang++ -FC=gfortran -AS=as - -# Macros -CND_PLATFORM=CLang-Linux-x86 -CND_DLIB_EXT=so -CND_CONF=Debug -CND_DISTDIR=dist -CND_BUILDDIR=build - -# Include project Makefile -include Makefile - -# Object Directory -OBJECTDIR=${CND_BUILDDIR}/${CND_CONF}/${CND_PLATFORM} - -# Object Files -OBJECTFILES= \ - ${OBJECTDIR}/_ext/1931398335/CCDBError.o \ - ${OBJECTDIR}/_ext/1931398335/Calibration.o \ - ${OBJECTDIR}/_ext/1931398335/CalibrationGenerator.o \ - ${OBJECTDIR}/_ext/1931398335/Console.o \ - ${OBJECTDIR}/_ext/1931398335/GlobalMutex.o \ - ${OBJECTDIR}/_ext/150347159/PathUtils.o \ - ${OBJECTDIR}/_ext/150347159/Stopwatch.o \ - ${OBJECTDIR}/_ext/150347159/StringUtils.o \ - ${OBJECTDIR}/_ext/150347159/TimeProvider.o \ - ${OBJECTDIR}/_ext/150347159/WorkUtils.o \ - ${OBJECTDIR}/_ext/1931398335/IMutex.o \ - ${OBJECTDIR}/_ext/1931398335/ISyncObject.o \ - ${OBJECTDIR}/_ext/1931398335/Log.o \ - ${OBJECTDIR}/_ext/1091096251/Assignment.o \ - ${OBJECTDIR}/_ext/1091096251/ConstantsTypeColumn.o \ - ${OBJECTDIR}/_ext/1091096251/ConstantsTypeTable.o \ - ${OBJECTDIR}/_ext/1091096251/Directory.o \ - ${OBJECTDIR}/_ext/1091096251/EventRange.o \ - ${OBJECTDIR}/_ext/1091096251/ObjectsOwner.o \ - ${OBJECTDIR}/_ext/1091096251/RunRange.o \ - ${OBJECTDIR}/_ext/1091096251/StoredObject.o \ - ${OBJECTDIR}/_ext/1091096251/Variation.o \ - ${OBJECTDIR}/_ext/1931398335/MySQLCalibration.o \ - ${OBJECTDIR}/_ext/899248948/DataProvider.o \ - ${OBJECTDIR}/_ext/899248948/EnvironmentAuthentication.o \ - ${OBJECTDIR}/_ext/899248948/FileDataProvider.o \ - ${OBJECTDIR}/_ext/899248948/IAuthentication.o \ - ${OBJECTDIR}/_ext/899248948/MySQLConnectionInfo.o \ - ${OBJECTDIR}/_ext/899248948/MySQLDataProvider.o \ - ${OBJECTDIR}/_ext/899248948/SQLiteDataProvider.o \ - ${OBJECTDIR}/_ext/1931398335/PthreadMutex.o \ - ${OBJECTDIR}/_ext/1931398335/PthreadSyncObject.o \ - ${OBJECTDIR}/_ext/1931398335/SQLiteCalibration.o \ - ${OBJECTDIR}/_ext/936628307/sqlite3.o - - -# C Compiler Flags -CFLAGS= - -# CC Compiler Flags -CCFLAGS= -CXXFLAGS= - -# Fortran Compiler Flags -FFLAGS= - -# Assembler Flags -ASFLAGS= - -# Link Libraries and Options -LDLIBSOPTIONS=-lpthread /usr/lib/x86_64-linux-gnu/libmysqlclient.so - -# Build Targets -.build-conf: ${BUILD_SUBPROJECTS} - "${MAKE}" -f nbproject/Makefile-${CND_CONF}.mk ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/libLibrary.${CND_DLIB_EXT} - -${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/libLibrary.${CND_DLIB_EXT}: /usr/lib/x86_64-linux-gnu/libmysqlclient.so - -${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/libLibrary.${CND_DLIB_EXT}: ${OBJECTFILES} - ${MKDIR} -p ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM} - ${LINK.cc} -o ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/libLibrary.${CND_DLIB_EXT} ${OBJECTFILES} ${LDLIBSOPTIONS} -shared -fPIC - -${OBJECTDIR}/_ext/1931398335/CCDBError.o: ../../../src/Library/CCDBError.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1931398335 - ${RM} "$@.d" - $(COMPILE.cc) -g -DCCDB_MYSQL -I../../../include -I../../../include/SQLite -I/usr/include/mysql -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1931398335/CCDBError.o ../../../src/Library/CCDBError.cc - -${OBJECTDIR}/_ext/1931398335/Calibration.o: ../../../src/Library/Calibration.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1931398335 - ${RM} "$@.d" - $(COMPILE.cc) -g -DCCDB_MYSQL -I../../../include -I../../../include/SQLite -I/usr/include/mysql -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1931398335/Calibration.o ../../../src/Library/Calibration.cc - -${OBJECTDIR}/_ext/1931398335/CalibrationGenerator.o: ../../../src/Library/CalibrationGenerator.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1931398335 - ${RM} "$@.d" - $(COMPILE.cc) -g -DCCDB_MYSQL -I../../../include -I../../../include/SQLite -I/usr/include/mysql -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1931398335/CalibrationGenerator.o ../../../src/Library/CalibrationGenerator.cc - -${OBJECTDIR}/_ext/1931398335/Console.o: ../../../src/Library/Console.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1931398335 - ${RM} "$@.d" - $(COMPILE.cc) -g -DCCDB_MYSQL -I../../../include -I../../../include/SQLite -I/usr/include/mysql -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1931398335/Console.o ../../../src/Library/Console.cc - -${OBJECTDIR}/_ext/1931398335/GlobalMutex.o: ../../../src/Library/GlobalMutex.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1931398335 - ${RM} "$@.d" - $(COMPILE.cc) -g -DCCDB_MYSQL -I../../../include -I../../../include/SQLite -I/usr/include/mysql -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1931398335/GlobalMutex.o ../../../src/Library/GlobalMutex.cc - -${OBJECTDIR}/_ext/150347159/PathUtils.o: ../../../src/Library/Helpers/PathUtils.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/150347159 - ${RM} "$@.d" - $(COMPILE.cc) -g -DCCDB_MYSQL -I../../../include -I../../../include/SQLite -I/usr/include/mysql -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/150347159/PathUtils.o ../../../src/Library/Helpers/PathUtils.cc - -${OBJECTDIR}/_ext/150347159/Stopwatch.o: ../../../src/Library/Helpers/Stopwatch.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/150347159 - ${RM} "$@.d" - $(COMPILE.cc) -g -DCCDB_MYSQL -I../../../include -I../../../include/SQLite -I/usr/include/mysql -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/150347159/Stopwatch.o ../../../src/Library/Helpers/Stopwatch.cc - -${OBJECTDIR}/_ext/150347159/StringUtils.o: ../../../src/Library/Helpers/StringUtils.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/150347159 - ${RM} "$@.d" - $(COMPILE.cc) -g -DCCDB_MYSQL -I../../../include -I../../../include/SQLite -I/usr/include/mysql -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/150347159/StringUtils.o ../../../src/Library/Helpers/StringUtils.cc - -${OBJECTDIR}/_ext/150347159/TimeProvider.o: ../../../src/Library/Helpers/TimeProvider.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/150347159 - ${RM} "$@.d" - $(COMPILE.cc) -g -DCCDB_MYSQL -I../../../include -I../../../include/SQLite -I/usr/include/mysql -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/150347159/TimeProvider.o ../../../src/Library/Helpers/TimeProvider.cc - -${OBJECTDIR}/_ext/150347159/WorkUtils.o: ../../../src/Library/Helpers/WorkUtils.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/150347159 - ${RM} "$@.d" - $(COMPILE.cc) -g -DCCDB_MYSQL -I../../../include -I../../../include/SQLite -I/usr/include/mysql -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/150347159/WorkUtils.o ../../../src/Library/Helpers/WorkUtils.cc - -${OBJECTDIR}/_ext/1931398335/IMutex.o: ../../../src/Library/IMutex.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1931398335 - ${RM} "$@.d" - $(COMPILE.cc) -g -DCCDB_MYSQL -I../../../include -I../../../include/SQLite -I/usr/include/mysql -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1931398335/IMutex.o ../../../src/Library/IMutex.cc - -${OBJECTDIR}/_ext/1931398335/ISyncObject.o: ../../../src/Library/ISyncObject.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1931398335 - ${RM} "$@.d" - $(COMPILE.cc) -g -DCCDB_MYSQL -I../../../include -I../../../include/SQLite -I/usr/include/mysql -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1931398335/ISyncObject.o ../../../src/Library/ISyncObject.cc - -${OBJECTDIR}/_ext/1931398335/Log.o: ../../../src/Library/Log.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1931398335 - ${RM} "$@.d" - $(COMPILE.cc) -g -DCCDB_MYSQL -I../../../include -I../../../include/SQLite -I/usr/include/mysql -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1931398335/Log.o ../../../src/Library/Log.cc - -${OBJECTDIR}/_ext/1091096251/Assignment.o: ../../../src/Library/Model/Assignment.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1091096251 - ${RM} "$@.d" - $(COMPILE.cc) -g -DCCDB_MYSQL -I../../../include -I../../../include/SQLite -I/usr/include/mysql -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1091096251/Assignment.o ../../../src/Library/Model/Assignment.cc - -${OBJECTDIR}/_ext/1091096251/ConstantsTypeColumn.o: ../../../src/Library/Model/ConstantsTypeColumn.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1091096251 - ${RM} "$@.d" - $(COMPILE.cc) -g -DCCDB_MYSQL -I../../../include -I../../../include/SQLite -I/usr/include/mysql -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1091096251/ConstantsTypeColumn.o ../../../src/Library/Model/ConstantsTypeColumn.cc - -${OBJECTDIR}/_ext/1091096251/ConstantsTypeTable.o: ../../../src/Library/Model/ConstantsTypeTable.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1091096251 - ${RM} "$@.d" - $(COMPILE.cc) -g -DCCDB_MYSQL -I../../../include -I../../../include/SQLite -I/usr/include/mysql -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1091096251/ConstantsTypeTable.o ../../../src/Library/Model/ConstantsTypeTable.cc - -${OBJECTDIR}/_ext/1091096251/Directory.o: ../../../src/Library/Model/Directory.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1091096251 - ${RM} "$@.d" - $(COMPILE.cc) -g -DCCDB_MYSQL -I../../../include -I../../../include/SQLite -I/usr/include/mysql -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1091096251/Directory.o ../../../src/Library/Model/Directory.cc - -${OBJECTDIR}/_ext/1091096251/EventRange.o: ../../../src/Library/Model/EventRange.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1091096251 - ${RM} "$@.d" - $(COMPILE.cc) -g -DCCDB_MYSQL -I../../../include -I../../../include/SQLite -I/usr/include/mysql -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1091096251/EventRange.o ../../../src/Library/Model/EventRange.cc - -${OBJECTDIR}/_ext/1091096251/ObjectsOwner.o: ../../../src/Library/Model/ObjectsOwner.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1091096251 - ${RM} "$@.d" - $(COMPILE.cc) -g -DCCDB_MYSQL -I../../../include -I../../../include/SQLite -I/usr/include/mysql -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1091096251/ObjectsOwner.o ../../../src/Library/Model/ObjectsOwner.cc - -${OBJECTDIR}/_ext/1091096251/RunRange.o: ../../../src/Library/Model/RunRange.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1091096251 - ${RM} "$@.d" - $(COMPILE.cc) -g -DCCDB_MYSQL -I../../../include -I../../../include/SQLite -I/usr/include/mysql -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1091096251/RunRange.o ../../../src/Library/Model/RunRange.cc - -${OBJECTDIR}/_ext/1091096251/StoredObject.o: ../../../src/Library/Model/StoredObject.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1091096251 - ${RM} "$@.d" - $(COMPILE.cc) -g -DCCDB_MYSQL -I../../../include -I../../../include/SQLite -I/usr/include/mysql -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1091096251/StoredObject.o ../../../src/Library/Model/StoredObject.cc - -${OBJECTDIR}/_ext/1091096251/Variation.o: ../../../src/Library/Model/Variation.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1091096251 - ${RM} "$@.d" - $(COMPILE.cc) -g -DCCDB_MYSQL -I../../../include -I../../../include/SQLite -I/usr/include/mysql -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1091096251/Variation.o ../../../src/Library/Model/Variation.cc - -${OBJECTDIR}/_ext/1931398335/MySQLCalibration.o: ../../../src/Library/MySQLCalibration.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1931398335 - ${RM} "$@.d" - $(COMPILE.cc) -g -DCCDB_MYSQL -I../../../include -I../../../include/SQLite -I/usr/include/mysql -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1931398335/MySQLCalibration.o ../../../src/Library/MySQLCalibration.cc - -${OBJECTDIR}/_ext/899248948/DataProvider.o: ../../../src/Library/Providers/DataProvider.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/899248948 - ${RM} "$@.d" - $(COMPILE.cc) -g -DCCDB_MYSQL -I../../../include -I../../../include/SQLite -I/usr/include/mysql -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/899248948/DataProvider.o ../../../src/Library/Providers/DataProvider.cc - -${OBJECTDIR}/_ext/899248948/EnvironmentAuthentication.o: ../../../src/Library/Providers/EnvironmentAuthentication.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/899248948 - ${RM} "$@.d" - $(COMPILE.cc) -g -DCCDB_MYSQL -I../../../include -I../../../include/SQLite -I/usr/include/mysql -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/899248948/EnvironmentAuthentication.o ../../../src/Library/Providers/EnvironmentAuthentication.cc - -${OBJECTDIR}/_ext/899248948/FileDataProvider.o: ../../../src/Library/Providers/FileDataProvider.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/899248948 - ${RM} "$@.d" - $(COMPILE.cc) -g -DCCDB_MYSQL -I../../../include -I../../../include/SQLite -I/usr/include/mysql -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/899248948/FileDataProvider.o ../../../src/Library/Providers/FileDataProvider.cc - -${OBJECTDIR}/_ext/899248948/IAuthentication.o: ../../../src/Library/Providers/IAuthentication.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/899248948 - ${RM} "$@.d" - $(COMPILE.cc) -g -DCCDB_MYSQL -I../../../include -I../../../include/SQLite -I/usr/include/mysql -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/899248948/IAuthentication.o ../../../src/Library/Providers/IAuthentication.cc - -${OBJECTDIR}/_ext/899248948/MySQLConnectionInfo.o: ../../../src/Library/Providers/MySQLConnectionInfo.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/899248948 - ${RM} "$@.d" - $(COMPILE.cc) -g -DCCDB_MYSQL -I../../../include -I../../../include/SQLite -I/usr/include/mysql -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/899248948/MySQLConnectionInfo.o ../../../src/Library/Providers/MySQLConnectionInfo.cc - -${OBJECTDIR}/_ext/899248948/MySQLDataProvider.o: ../../../src/Library/Providers/MySQLDataProvider.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/899248948 - ${RM} "$@.d" - $(COMPILE.cc) -g -DCCDB_MYSQL -I../../../include -I../../../include/SQLite -I/usr/include/mysql -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/899248948/MySQLDataProvider.o ../../../src/Library/Providers/MySQLDataProvider.cc - -${OBJECTDIR}/_ext/899248948/SQLiteDataProvider.o: ../../../src/Library/Providers/SQLiteDataProvider.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/899248948 - ${RM} "$@.d" - $(COMPILE.cc) -g -DCCDB_MYSQL -I../../../include -I../../../include/SQLite -I/usr/include/mysql -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/899248948/SQLiteDataProvider.o ../../../src/Library/Providers/SQLiteDataProvider.cc - -${OBJECTDIR}/_ext/1931398335/PthreadMutex.o: ../../../src/Library/PthreadMutex.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1931398335 - ${RM} "$@.d" - $(COMPILE.cc) -g -DCCDB_MYSQL -I../../../include -I../../../include/SQLite -I/usr/include/mysql -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1931398335/PthreadMutex.o ../../../src/Library/PthreadMutex.cc - -${OBJECTDIR}/_ext/1931398335/PthreadSyncObject.o: ../../../src/Library/PthreadSyncObject.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1931398335 - ${RM} "$@.d" - $(COMPILE.cc) -g -DCCDB_MYSQL -I../../../include -I../../../include/SQLite -I/usr/include/mysql -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1931398335/PthreadSyncObject.o ../../../src/Library/PthreadSyncObject.cc - -${OBJECTDIR}/_ext/1931398335/SQLiteCalibration.o: ../../../src/Library/SQLiteCalibration.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1931398335 - ${RM} "$@.d" - $(COMPILE.cc) -g -DCCDB_MYSQL -I../../../include -I../../../include/SQLite -I/usr/include/mysql -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1931398335/SQLiteCalibration.o ../../../src/Library/SQLiteCalibration.cc - -${OBJECTDIR}/_ext/936628307/sqlite3.o: /home/romanov/halld/ccdb/trunk/src/SQLite/sqlite3.c - ${MKDIR} -p ${OBJECTDIR}/_ext/936628307 - ${RM} "$@.d" - $(COMPILE.c) -g -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/936628307/sqlite3.o /home/romanov/halld/ccdb/trunk/src/SQLite/sqlite3.c - -# Subprojects -.build-subprojects: - -# Clean Targets -.clean-conf: ${CLEAN_SUBPROJECTS} - ${RM} -r ${CND_BUILDDIR}/${CND_CONF} - ${RM} ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/libLibrary.${CND_DLIB_EXT} - -# Subprojects -.clean-subprojects: - -# Enable dependency checking -.dep.inc: .depcheck-impl - -include .dep.inc diff --git a/projects/Netbeans/Library/nbproject/Makefile-Release.mk b/projects/Netbeans/Library/nbproject/Makefile-Release.mk deleted file mode 100644 index 03d1ffc7..00000000 --- a/projects/Netbeans/Library/nbproject/Makefile-Release.mk +++ /dev/null @@ -1,282 +0,0 @@ -# -# Generated Makefile - do not edit! -# -# Edit the Makefile in the project folder instead (../Makefile). Each target -# has a -pre and a -post target defined where you can add customized code. -# -# This makefile implements configuration specific macros and targets. - - -# Environment -MKDIR=mkdir -CP=cp -GREP=grep -NM=nm -CCADMIN=CCadmin -RANLIB=ranlib -CC=clang -CCC=clang++ -CXX=clang++ -FC=gfortran -AS=as - -# Macros -CND_PLATFORM=CLang-Linux-x86 -CND_DLIB_EXT=so -CND_CONF=Release -CND_DISTDIR=dist -CND_BUILDDIR=build - -# Include project Makefile -include Makefile - -# Object Directory -OBJECTDIR=${CND_BUILDDIR}/${CND_CONF}/${CND_PLATFORM} - -# Object Files -OBJECTFILES= \ - ${OBJECTDIR}/_ext/1931398335/CCDBError.o \ - ${OBJECTDIR}/_ext/1931398335/Calibration.o \ - ${OBJECTDIR}/_ext/1931398335/CalibrationGenerator.o \ - ${OBJECTDIR}/_ext/1931398335/Console.o \ - ${OBJECTDIR}/_ext/1931398335/GlobalMutex.o \ - ${OBJECTDIR}/_ext/150347159/PathUtils.o \ - ${OBJECTDIR}/_ext/150347159/Stopwatch.o \ - ${OBJECTDIR}/_ext/150347159/StringUtils.o \ - ${OBJECTDIR}/_ext/150347159/TimeProvider.o \ - ${OBJECTDIR}/_ext/150347159/WorkUtils.o \ - ${OBJECTDIR}/_ext/1931398335/IMutex.o \ - ${OBJECTDIR}/_ext/1931398335/ISyncObject.o \ - ${OBJECTDIR}/_ext/1931398335/Log.o \ - ${OBJECTDIR}/_ext/1091096251/Assignment.o \ - ${OBJECTDIR}/_ext/1091096251/ConstantsTypeColumn.o \ - ${OBJECTDIR}/_ext/1091096251/ConstantsTypeTable.o \ - ${OBJECTDIR}/_ext/1091096251/Directory.o \ - ${OBJECTDIR}/_ext/1091096251/EventRange.o \ - ${OBJECTDIR}/_ext/1091096251/ObjectsOwner.o \ - ${OBJECTDIR}/_ext/1091096251/RunRange.o \ - ${OBJECTDIR}/_ext/1091096251/StoredObject.o \ - ${OBJECTDIR}/_ext/1091096251/Variation.o \ - ${OBJECTDIR}/_ext/1931398335/MySQLCalibration.o \ - ${OBJECTDIR}/_ext/899248948/DataProvider.o \ - ${OBJECTDIR}/_ext/899248948/EnvironmentAuthentication.o \ - ${OBJECTDIR}/_ext/899248948/FileDataProvider.o \ - ${OBJECTDIR}/_ext/899248948/IAuthentication.o \ - ${OBJECTDIR}/_ext/899248948/MySQLConnectionInfo.o \ - ${OBJECTDIR}/_ext/899248948/MySQLDataProvider.o \ - ${OBJECTDIR}/_ext/899248948/SQLiteDataProvider.o \ - ${OBJECTDIR}/_ext/1931398335/PthreadMutex.o \ - ${OBJECTDIR}/_ext/1931398335/PthreadSyncObject.o \ - ${OBJECTDIR}/_ext/1931398335/SQLiteCalibration.o \ - ${OBJECTDIR}/_ext/936628307/sqlite3.o - - -# C Compiler Flags -CFLAGS= - -# CC Compiler Flags -CCFLAGS= -CXXFLAGS= - -# Fortran Compiler Flags -FFLAGS= - -# Assembler Flags -ASFLAGS= - -# Link Libraries and Options -LDLIBSOPTIONS= - -# Build Targets -.build-conf: ${BUILD_SUBPROJECTS} - "${MAKE}" -f nbproject/Makefile-${CND_CONF}.mk ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/libLibrary.${CND_DLIB_EXT} - -${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/libLibrary.${CND_DLIB_EXT}: ${OBJECTFILES} - ${MKDIR} -p ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM} - ${LINK.cc} -o ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/libLibrary.${CND_DLIB_EXT} ${OBJECTFILES} ${LDLIBSOPTIONS} -shared -fPIC - -${OBJECTDIR}/_ext/1931398335/CCDBError.o: ../../../src/Library/CCDBError.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1931398335 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1931398335/CCDBError.o ../../../src/Library/CCDBError.cc - -${OBJECTDIR}/_ext/1931398335/Calibration.o: ../../../src/Library/Calibration.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1931398335 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1931398335/Calibration.o ../../../src/Library/Calibration.cc - -${OBJECTDIR}/_ext/1931398335/CalibrationGenerator.o: ../../../src/Library/CalibrationGenerator.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1931398335 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1931398335/CalibrationGenerator.o ../../../src/Library/CalibrationGenerator.cc - -${OBJECTDIR}/_ext/1931398335/Console.o: ../../../src/Library/Console.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1931398335 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1931398335/Console.o ../../../src/Library/Console.cc - -${OBJECTDIR}/_ext/1931398335/GlobalMutex.o: ../../../src/Library/GlobalMutex.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1931398335 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1931398335/GlobalMutex.o ../../../src/Library/GlobalMutex.cc - -${OBJECTDIR}/_ext/150347159/PathUtils.o: ../../../src/Library/Helpers/PathUtils.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/150347159 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/150347159/PathUtils.o ../../../src/Library/Helpers/PathUtils.cc - -${OBJECTDIR}/_ext/150347159/Stopwatch.o: ../../../src/Library/Helpers/Stopwatch.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/150347159 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/150347159/Stopwatch.o ../../../src/Library/Helpers/Stopwatch.cc - -${OBJECTDIR}/_ext/150347159/StringUtils.o: ../../../src/Library/Helpers/StringUtils.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/150347159 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/150347159/StringUtils.o ../../../src/Library/Helpers/StringUtils.cc - -${OBJECTDIR}/_ext/150347159/TimeProvider.o: ../../../src/Library/Helpers/TimeProvider.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/150347159 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/150347159/TimeProvider.o ../../../src/Library/Helpers/TimeProvider.cc - -${OBJECTDIR}/_ext/150347159/WorkUtils.o: ../../../src/Library/Helpers/WorkUtils.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/150347159 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/150347159/WorkUtils.o ../../../src/Library/Helpers/WorkUtils.cc - -${OBJECTDIR}/_ext/1931398335/IMutex.o: ../../../src/Library/IMutex.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1931398335 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1931398335/IMutex.o ../../../src/Library/IMutex.cc - -${OBJECTDIR}/_ext/1931398335/ISyncObject.o: ../../../src/Library/ISyncObject.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1931398335 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1931398335/ISyncObject.o ../../../src/Library/ISyncObject.cc - -${OBJECTDIR}/_ext/1931398335/Log.o: ../../../src/Library/Log.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1931398335 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1931398335/Log.o ../../../src/Library/Log.cc - -${OBJECTDIR}/_ext/1091096251/Assignment.o: ../../../src/Library/Model/Assignment.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1091096251 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1091096251/Assignment.o ../../../src/Library/Model/Assignment.cc - -${OBJECTDIR}/_ext/1091096251/ConstantsTypeColumn.o: ../../../src/Library/Model/ConstantsTypeColumn.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1091096251 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1091096251/ConstantsTypeColumn.o ../../../src/Library/Model/ConstantsTypeColumn.cc - -${OBJECTDIR}/_ext/1091096251/ConstantsTypeTable.o: ../../../src/Library/Model/ConstantsTypeTable.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1091096251 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1091096251/ConstantsTypeTable.o ../../../src/Library/Model/ConstantsTypeTable.cc - -${OBJECTDIR}/_ext/1091096251/Directory.o: ../../../src/Library/Model/Directory.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1091096251 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1091096251/Directory.o ../../../src/Library/Model/Directory.cc - -${OBJECTDIR}/_ext/1091096251/EventRange.o: ../../../src/Library/Model/EventRange.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1091096251 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1091096251/EventRange.o ../../../src/Library/Model/EventRange.cc - -${OBJECTDIR}/_ext/1091096251/ObjectsOwner.o: ../../../src/Library/Model/ObjectsOwner.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1091096251 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1091096251/ObjectsOwner.o ../../../src/Library/Model/ObjectsOwner.cc - -${OBJECTDIR}/_ext/1091096251/RunRange.o: ../../../src/Library/Model/RunRange.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1091096251 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1091096251/RunRange.o ../../../src/Library/Model/RunRange.cc - -${OBJECTDIR}/_ext/1091096251/StoredObject.o: ../../../src/Library/Model/StoredObject.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1091096251 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1091096251/StoredObject.o ../../../src/Library/Model/StoredObject.cc - -${OBJECTDIR}/_ext/1091096251/Variation.o: ../../../src/Library/Model/Variation.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1091096251 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1091096251/Variation.o ../../../src/Library/Model/Variation.cc - -${OBJECTDIR}/_ext/1931398335/MySQLCalibration.o: ../../../src/Library/MySQLCalibration.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1931398335 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1931398335/MySQLCalibration.o ../../../src/Library/MySQLCalibration.cc - -${OBJECTDIR}/_ext/899248948/DataProvider.o: ../../../src/Library/Providers/DataProvider.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/899248948 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/899248948/DataProvider.o ../../../src/Library/Providers/DataProvider.cc - -${OBJECTDIR}/_ext/899248948/EnvironmentAuthentication.o: ../../../src/Library/Providers/EnvironmentAuthentication.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/899248948 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/899248948/EnvironmentAuthentication.o ../../../src/Library/Providers/EnvironmentAuthentication.cc - -${OBJECTDIR}/_ext/899248948/FileDataProvider.o: ../../../src/Library/Providers/FileDataProvider.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/899248948 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/899248948/FileDataProvider.o ../../../src/Library/Providers/FileDataProvider.cc - -${OBJECTDIR}/_ext/899248948/IAuthentication.o: ../../../src/Library/Providers/IAuthentication.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/899248948 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/899248948/IAuthentication.o ../../../src/Library/Providers/IAuthentication.cc - -${OBJECTDIR}/_ext/899248948/MySQLConnectionInfo.o: ../../../src/Library/Providers/MySQLConnectionInfo.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/899248948 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/899248948/MySQLConnectionInfo.o ../../../src/Library/Providers/MySQLConnectionInfo.cc - -${OBJECTDIR}/_ext/899248948/MySQLDataProvider.o: ../../../src/Library/Providers/MySQLDataProvider.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/899248948 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/899248948/MySQLDataProvider.o ../../../src/Library/Providers/MySQLDataProvider.cc - -${OBJECTDIR}/_ext/899248948/SQLiteDataProvider.o: ../../../src/Library/Providers/SQLiteDataProvider.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/899248948 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/899248948/SQLiteDataProvider.o ../../../src/Library/Providers/SQLiteDataProvider.cc - -${OBJECTDIR}/_ext/1931398335/PthreadMutex.o: ../../../src/Library/PthreadMutex.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1931398335 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1931398335/PthreadMutex.o ../../../src/Library/PthreadMutex.cc - -${OBJECTDIR}/_ext/1931398335/PthreadSyncObject.o: ../../../src/Library/PthreadSyncObject.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1931398335 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1931398335/PthreadSyncObject.o ../../../src/Library/PthreadSyncObject.cc - -${OBJECTDIR}/_ext/1931398335/SQLiteCalibration.o: ../../../src/Library/SQLiteCalibration.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1931398335 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1931398335/SQLiteCalibration.o ../../../src/Library/SQLiteCalibration.cc - -${OBJECTDIR}/_ext/936628307/sqlite3.o: /home/romanov/halld/ccdb/trunk/src/SQLite/sqlite3.c - ${MKDIR} -p ${OBJECTDIR}/_ext/936628307 - ${RM} "$@.d" - $(COMPILE.c) -O2 -fPIC -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/936628307/sqlite3.o /home/romanov/halld/ccdb/trunk/src/SQLite/sqlite3.c - -# Subprojects -.build-subprojects: - -# Clean Targets -.clean-conf: ${CLEAN_SUBPROJECTS} - ${RM} -r ${CND_BUILDDIR}/${CND_CONF} - ${RM} ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/libLibrary.${CND_DLIB_EXT} - -# Subprojects -.clean-subprojects: - -# Enable dependency checking -.dep.inc: .depcheck-impl - -include .dep.inc diff --git a/projects/Netbeans/Library/nbproject/Makefile-impl.mk b/projects/Netbeans/Library/nbproject/Makefile-impl.mk deleted file mode 100644 index 4164d589..00000000 --- a/projects/Netbeans/Library/nbproject/Makefile-impl.mk +++ /dev/null @@ -1,133 +0,0 @@ -# -# Generated Makefile - do not edit! -# -# Edit the Makefile in the project folder instead (../Makefile). Each target -# has a pre- and a post- target defined where you can add customization code. -# -# This makefile implements macros and targets common to all configurations. -# -# NOCDDL - - -# Building and Cleaning subprojects are done by default, but can be controlled with the SUB -# macro. If SUB=no, subprojects will not be built or cleaned. The following macro -# statements set BUILD_SUB-CONF and CLEAN_SUB-CONF to .build-reqprojects-conf -# and .clean-reqprojects-conf unless SUB has the value 'no' -SUB_no=NO -SUBPROJECTS=${SUB_${SUB}} -BUILD_SUBPROJECTS_=.build-subprojects -BUILD_SUBPROJECTS_NO= -BUILD_SUBPROJECTS=${BUILD_SUBPROJECTS_${SUBPROJECTS}} -CLEAN_SUBPROJECTS_=.clean-subprojects -CLEAN_SUBPROJECTS_NO= -CLEAN_SUBPROJECTS=${CLEAN_SUBPROJECTS_${SUBPROJECTS}} - - -# Project Name -PROJECTNAME=Library - -# Active Configuration -DEFAULTCONF=Debug -CONF=${DEFAULTCONF} - -# All Configurations -ALLCONFS=Debug Release - - -# build -.build-impl: .build-pre .validate-impl .depcheck-impl - @#echo "=> Running $@... Configuration=$(CONF)" - "${MAKE}" -f nbproject/Makefile-${CONF}.mk QMAKE=${QMAKE} SUBPROJECTS=${SUBPROJECTS} .build-conf - - -# clean -.clean-impl: .clean-pre .validate-impl .depcheck-impl - @#echo "=> Running $@... Configuration=$(CONF)" - "${MAKE}" -f nbproject/Makefile-${CONF}.mk QMAKE=${QMAKE} SUBPROJECTS=${SUBPROJECTS} .clean-conf - - -# clobber -.clobber-impl: .clobber-pre .depcheck-impl - @#echo "=> Running $@..." - for CONF in ${ALLCONFS}; \ - do \ - "${MAKE}" -f nbproject/Makefile-$${CONF}.mk QMAKE=${QMAKE} SUBPROJECTS=${SUBPROJECTS} .clean-conf; \ - done - -# all -.all-impl: .all-pre .depcheck-impl - @#echo "=> Running $@..." - for CONF in ${ALLCONFS}; \ - do \ - "${MAKE}" -f nbproject/Makefile-$${CONF}.mk QMAKE=${QMAKE} SUBPROJECTS=${SUBPROJECTS} .build-conf; \ - done - -# build tests -.build-tests-impl: .build-impl .build-tests-pre - @#echo "=> Running $@... Configuration=$(CONF)" - "${MAKE}" -f nbproject/Makefile-${CONF}.mk SUBPROJECTS=${SUBPROJECTS} .build-tests-conf - -# run tests -.test-impl: .build-tests-impl .test-pre - @#echo "=> Running $@... Configuration=$(CONF)" - "${MAKE}" -f nbproject/Makefile-${CONF}.mk SUBPROJECTS=${SUBPROJECTS} .test-conf - -# dependency checking support -.depcheck-impl: - @echo "# This code depends on make tool being used" >.dep.inc - @if [ -n "${MAKE_VERSION}" ]; then \ - echo "DEPFILES=\$$(wildcard \$$(addsuffix .d, \$${OBJECTFILES}))" >>.dep.inc; \ - echo "ifneq (\$${DEPFILES},)" >>.dep.inc; \ - echo "include \$${DEPFILES}" >>.dep.inc; \ - echo "endif" >>.dep.inc; \ - else \ - echo ".KEEP_STATE:" >>.dep.inc; \ - echo ".KEEP_STATE_FILE:.make.state.\$${CONF}" >>.dep.inc; \ - fi - -# configuration validation -.validate-impl: - @if [ ! -f nbproject/Makefile-${CONF}.mk ]; \ - then \ - echo ""; \ - echo "Error: can not find the makefile for configuration '${CONF}' in project ${PROJECTNAME}"; \ - echo "See 'make help' for details."; \ - echo "Current directory: " `pwd`; \ - echo ""; \ - fi - @if [ ! -f nbproject/Makefile-${CONF}.mk ]; \ - then \ - exit 1; \ - fi - - -# help -.help-impl: .help-pre - @echo "This makefile supports the following configurations:" - @echo " ${ALLCONFS}" - @echo "" - @echo "and the following targets:" - @echo " build (default target)" - @echo " clean" - @echo " clobber" - @echo " all" - @echo " help" - @echo "" - @echo "Makefile Usage:" - @echo " make [CONF=] [SUB=no] build" - @echo " make [CONF=] [SUB=no] clean" - @echo " make [SUB=no] clobber" - @echo " make [SUB=no] all" - @echo " make help" - @echo "" - @echo "Target 'build' will build a specific configuration and, unless 'SUB=no'," - @echo " also build subprojects." - @echo "Target 'clean' will clean a specific configuration and, unless 'SUB=no'," - @echo " also clean subprojects." - @echo "Target 'clobber' will remove all built files from all configurations and," - @echo " unless 'SUB=no', also from subprojects." - @echo "Target 'all' will will build all configurations and, unless 'SUB=no'," - @echo " also build subprojects." - @echo "Target 'help' prints this message." - @echo "" - diff --git a/projects/Netbeans/Library/nbproject/Makefile-variables.mk b/projects/Netbeans/Library/nbproject/Makefile-variables.mk deleted file mode 100644 index 18546b56..00000000 --- a/projects/Netbeans/Library/nbproject/Makefile-variables.mk +++ /dev/null @@ -1,35 +0,0 @@ -# -# Generated - do not edit! -# -# NOCDDL -# -CND_BASEDIR=`pwd` -CND_BUILDDIR=build -CND_DISTDIR=dist -# Debug configuration -CND_PLATFORM_Debug=CLang-Linux-x86 -CND_ARTIFACT_DIR_Debug=dist/Debug/CLang-Linux-x86 -CND_ARTIFACT_NAME_Debug=libLibrary.so -CND_ARTIFACT_PATH_Debug=dist/Debug/CLang-Linux-x86/libLibrary.so -CND_PACKAGE_DIR_Debug=dist/Debug/CLang-Linux-x86/package -CND_PACKAGE_NAME_Debug=libLibrary.so.tar -CND_PACKAGE_PATH_Debug=dist/Debug/CLang-Linux-x86/package/libLibrary.so.tar -# Release configuration -CND_PLATFORM_Release=CLang-Linux-x86 -CND_ARTIFACT_DIR_Release=dist/Release/CLang-Linux-x86 -CND_ARTIFACT_NAME_Release=libLibrary.so -CND_ARTIFACT_PATH_Release=dist/Release/CLang-Linux-x86/libLibrary.so -CND_PACKAGE_DIR_Release=dist/Release/CLang-Linux-x86/package -CND_PACKAGE_NAME_Release=libLibrary.so.tar -CND_PACKAGE_PATH_Release=dist/Release/CLang-Linux-x86/package/libLibrary.so.tar -# -# include compiler specific variables -# -# dmake command -ROOT:sh = test -f nbproject/private/Makefile-variables.mk || \ - (mkdir -p nbproject/private && touch nbproject/private/Makefile-variables.mk) -# -# gmake command -.PHONY: $(shell test -f nbproject/private/Makefile-variables.mk || (mkdir -p nbproject/private && touch nbproject/private/Makefile-variables.mk)) -# -include nbproject/private/Makefile-variables.mk diff --git a/projects/Netbeans/Library/nbproject/Package-Debug.bash b/projects/Netbeans/Library/nbproject/Package-Debug.bash deleted file mode 100644 index aa05bcb4..00000000 --- a/projects/Netbeans/Library/nbproject/Package-Debug.bash +++ /dev/null @@ -1,76 +0,0 @@ -#!/bin/bash -x - -# -# Generated - do not edit! -# - -# Macros -TOP=`pwd` -CND_PLATFORM=CLang-Linux-x86 -CND_CONF=Debug -CND_DISTDIR=dist -CND_BUILDDIR=build -CND_DLIB_EXT=so -NBTMPDIR=${CND_BUILDDIR}/${CND_CONF}/${CND_PLATFORM}/tmp-packaging -TMPDIRNAME=tmp-packaging -OUTPUT_PATH=${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/libLibrary.${CND_DLIB_EXT} -OUTPUT_BASENAME=libLibrary.${CND_DLIB_EXT} -PACKAGE_TOP_DIR=libLibrary.so/ - -# Functions -function checkReturnCode -{ - rc=$? - if [ $rc != 0 ] - then - exit $rc - fi -} -function makeDirectory -# $1 directory path -# $2 permission (optional) -{ - mkdir -p "$1" - checkReturnCode - if [ "$2" != "" ] - then - chmod $2 "$1" - checkReturnCode - fi -} -function copyFileToTmpDir -# $1 from-file path -# $2 to-file path -# $3 permission -{ - cp "$1" "$2" - checkReturnCode - if [ "$3" != "" ] - then - chmod $3 "$2" - checkReturnCode - fi -} - -# Setup -cd "${TOP}" -mkdir -p ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package -rm -rf ${NBTMPDIR} -mkdir -p ${NBTMPDIR} - -# Copy files and create directories and links -cd "${TOP}" -makeDirectory "${NBTMPDIR}/libLibrary.so/lib" -copyFileToTmpDir "${OUTPUT_PATH}" "${NBTMPDIR}/${PACKAGE_TOP_DIR}lib/${OUTPUT_BASENAME}" 0644 - - -# Generate tar file -cd "${TOP}" -rm -f ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/libLibrary.so.tar -cd ${NBTMPDIR} -tar -vcf ../../../../${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/libLibrary.so.tar * -checkReturnCode - -# Cleanup -cd "${TOP}" -rm -rf ${NBTMPDIR} diff --git a/projects/Netbeans/Library/nbproject/Package-Release.bash b/projects/Netbeans/Library/nbproject/Package-Release.bash deleted file mode 100644 index 12a34e05..00000000 --- a/projects/Netbeans/Library/nbproject/Package-Release.bash +++ /dev/null @@ -1,76 +0,0 @@ -#!/bin/bash -x - -# -# Generated - do not edit! -# - -# Macros -TOP=`pwd` -CND_PLATFORM=CLang-Linux-x86 -CND_CONF=Release -CND_DISTDIR=dist -CND_BUILDDIR=build -CND_DLIB_EXT=so -NBTMPDIR=${CND_BUILDDIR}/${CND_CONF}/${CND_PLATFORM}/tmp-packaging -TMPDIRNAME=tmp-packaging -OUTPUT_PATH=${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/libLibrary.${CND_DLIB_EXT} -OUTPUT_BASENAME=libLibrary.${CND_DLIB_EXT} -PACKAGE_TOP_DIR=libLibrary.so/ - -# Functions -function checkReturnCode -{ - rc=$? - if [ $rc != 0 ] - then - exit $rc - fi -} -function makeDirectory -# $1 directory path -# $2 permission (optional) -{ - mkdir -p "$1" - checkReturnCode - if [ "$2" != "" ] - then - chmod $2 "$1" - checkReturnCode - fi -} -function copyFileToTmpDir -# $1 from-file path -# $2 to-file path -# $3 permission -{ - cp "$1" "$2" - checkReturnCode - if [ "$3" != "" ] - then - chmod $3 "$2" - checkReturnCode - fi -} - -# Setup -cd "${TOP}" -mkdir -p ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package -rm -rf ${NBTMPDIR} -mkdir -p ${NBTMPDIR} - -# Copy files and create directories and links -cd "${TOP}" -makeDirectory "${NBTMPDIR}/libLibrary.so/lib" -copyFileToTmpDir "${OUTPUT_PATH}" "${NBTMPDIR}/${PACKAGE_TOP_DIR}lib/${OUTPUT_BASENAME}" 0644 - - -# Generate tar file -cd "${TOP}" -rm -f ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/libLibrary.so.tar -cd ${NBTMPDIR} -tar -vcf ../../../../${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/libLibrary.so.tar * -checkReturnCode - -# Cleanup -cd "${TOP}" -rm -rf ${NBTMPDIR} diff --git a/projects/Netbeans/Library/nbproject/configurations.xml b/projects/Netbeans/Library/nbproject/configurations.xml deleted file mode 100644 index 89497168..00000000 --- a/projects/Netbeans/Library/nbproject/configurations.xml +++ /dev/null @@ -1,848 +0,0 @@ - - - - - - - ../../../include/CCDB/Helpers/PathUtils.h - ../../../include/CCDB/Helpers/Stopwatch.h - ../../../include/CCDB/Helpers/StringUtils.h - ../../../include/CCDB/Helpers/TStopwatch.h - ../../../include/CCDB/Helpers/TimeProvider.h - ../../../include/CCDB/Helpers/Varargs.h - ../../../include/CCDB/Helpers/WorkUtils.h - - - ../../../include/CCDB/Model/Assignment.h - ../../../include/CCDB/Model/ConstantsTypeColumn.h - ../../../include/CCDB/Model/ConstantsTypeTable.h - ../../../include/CCDB/Model/Directory.h - ../../../include/CCDB/Model/EventRange.h - ../../../include/CCDB/Model/ObjectsOwner.h - ../../../include/CCDB/Model/RunRange.h - ../../../include/CCDB/Model/StoredObject.h - ../../../include/CCDB/Model/Variation.h - - - ../../../include/CCDB/Providers/DataProvider.h - ../../../include/CCDB/Providers/EnvironmentAuthentication.h - ../../../include/CCDB/Providers/FileDataProvider.h - ../../../include/CCDB/Providers/IAuthentication.h - ../../../include/CCDB/Providers/MySQLConnectionInfo.h - ../../../include/CCDB/Providers/MySQLDataProvider.h - ../../../include/CCDB/Providers/SQLiteDataProvider.h - - ../../../include/CCDB/CCDBError.h - ../../../include/CCDB/Calibration.h - ../../../include/CCDB/CalibrationGenerator.h - ../../../include/CCDB/Console.h - ../../../include/CCDB/ConstantsTable.h - ../../../include/CCDB/GlobalMutex.h - ../../../include/CCDB/Globals.h - ../../../include/CCDB/IMutex.h - ../../../include/CCDB/ISyncObject.h - ../../../include/CCDB/Log.h - ../../../include/CCDB/MySQLCalibration.h - ../../../include/CCDB/PthreadMutex.h - ../../../include/CCDB/PthreadSyncObject.h - ../../../include/CCDB/SQLiteCalibration.h - - - - - - - - ../../../src/Library/Helpers/PathUtils.cc - ../../../src/Library/Helpers/Stopwatch.cc - ../../../src/Library/Helpers/StringUtils.cc - ../../../src/Library/Helpers/TimeProvider.cc - ../../../src/Library/Helpers/WorkUtils.cc - - - ../../../src/Library/Model/Assignment.cc - ../../../src/Library/Model/ConstantsTypeColumn.cc - ../../../src/Library/Model/ConstantsTypeTable.cc - ../../../src/Library/Model/Directory.cc - ../../../src/Library/Model/EventRange.cc - ../../../src/Library/Model/ObjectsOwner.cc - ../../../src/Library/Model/RunRange.cc - ../../../src/Library/Model/StoredObject.cc - ../../../src/Library/Model/Variation.cc - - - ../../../src/Library/Providers/DataProvider.cc - ../../../src/Library/Providers/EnvironmentAuthentication.cc - ../../../src/Library/Providers/FileDataProvider.cc - ../../../src/Library/Providers/IAuthentication.cc - ../../../src/Library/Providers/MySQLConnectionInfo.cc - ../../../src/Library/Providers/MySQLDataProvider.cc - ../../../src/Library/Providers/SQLiteDataProvider.cc - - ../../../src/Library/CCDBError.cc - ../../../src/Library/Calibration.cc - ../../../src/Library/CalibrationGenerator.cc - ../../../src/Library/Console.cc - ../../../src/Library/GlobalMutex.cc - ../../../src/Library/IMutex.cc - ../../../src/Library/ISyncObject.cc - ../../../src/Library/Log.cc - ../../../src/Library/MySQLCalibration.cc - ../../../src/Library/PthreadMutex.cc - ../../../src/Library/PthreadSyncObject.cc - ../../../src/Library/SConscript - ../../../src/Library/SQLiteCalibration.cc - ../../../src/Library/control.in - - - /home/romanov/halld/ccdb/trunk/src/SQLite/SConscript - /home/romanov/halld/ccdb/trunk/src/SQLite/sqlite3.c - - - - - - Makefile - - - - ../../../include/CCDB - ../../../src/Library - /home/romanov/halld/ccdb/trunk/src/SQLite - - Makefile - - - - CLang|CLang - true - false - - - - - ../../../include - ../../../include/SQLite - /usr/include/mysql - - - CCDB_MYSQL - - - - - PosixThreads - /usr/lib/x86_64-linux-gnu/libmysqlclient.so - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - CLang|CLang - true - false - - - - 5 - - - 5 - - - 5 - - - 5 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/projects/Netbeans/Library/nbproject/private/Makefile-variables.mk b/projects/Netbeans/Library/nbproject/private/Makefile-variables.mk deleted file mode 100644 index a64183e1..00000000 --- a/projects/Netbeans/Library/nbproject/private/Makefile-variables.mk +++ /dev/null @@ -1,7 +0,0 @@ -# -# Generated - do not edit! -# -# NOCDDL -# -# Debug configuration -# Release configuration diff --git a/projects/Netbeans/Library/nbproject/private/configurations.xml b/projects/Netbeans/Library/nbproject/private/configurations.xml deleted file mode 100644 index f6fc2887..00000000 --- a/projects/Netbeans/Library/nbproject/private/configurations.xml +++ /dev/null @@ -1,72 +0,0 @@ - - - Makefile - - - - localhost - 2 - - - - - - - - - - - - - - - gdb - - - - "${OUTPUT_PATH}" - - "${OUTPUT_PATH}" - - true - 0 - 0 - - - - - - - localhost - 2 - - - - - - - - - - - - - - - gdb - - - - "${OUTPUT_PATH}" - - "${OUTPUT_PATH}" - - true - 0 - 0 - - - - - - diff --git a/projects/Netbeans/Library/nbproject/private/launcher.properties b/projects/Netbeans/Library/nbproject/private/launcher.properties deleted file mode 100644 index 6cc2127d..00000000 --- a/projects/Netbeans/Library/nbproject/private/launcher.properties +++ /dev/null @@ -1,40 +0,0 @@ -# Launchers File syntax: -# -# [Must-have property line] -# launcher1.runCommand= -# [Optional extra properties] -# launcher1.displayName= -# launcher1.buildCommand= -# launcher1.runDir= -# launcher1.symbolFiles= -# launcher1.env.= -# (If this value is quoted with ` it is handled as a native command which execution result will become the value) -# [Common launcher properties] -# common.runDir= -# (This value is overwritten by a launcher specific runDir value if the latter exists) -# common.env.= -# (Environment variables from common launcher are merged with launcher specific variables) -# common.symbolFiles= -# (This value is overwritten by a launcher specific symbolFiles value if the latter exists) -# -# In runDir, symbolFiles and env fields you can use these macroses: -# ${PROJECT_DIR} - project directory absolute path -# ${OUTPUT_PATH} - linker output path (relative to project directory path) -# ${OUTPUT_BASENAME}- linker output filename -# ${TESTDIR} - test files directory (relative to project directory path) -# ${OBJECTDIR} - object files directory (relative to project directory path) -# ${CND_DISTDIR} - distribution directory (relative to project directory path) -# ${CND_BUILDDIR} - build directory (relative to project directory path) -# ${CND_PLATFORM} - platform name -# ${CND_CONF} - configuration name -# ${CND_DLIB_EXT} - dynamic library extension -# -# All the project launchers must be listed in the file! -# -# launcher1.runCommand=... -# launcher2.runCommand=... -# ... -# common.runDir=... -# common.env.KEY=VALUE - -# launcher1.runCommand= \ No newline at end of file diff --git a/projects/Netbeans/Library/nbproject/private/private.xml b/projects/Netbeans/Library/nbproject/private/private.xml deleted file mode 100644 index 1a3de34f..00000000 --- a/projects/Netbeans/Library/nbproject/private/private.xml +++ /dev/null @@ -1,12 +0,0 @@ - - - - 2 - 0 - - - - - - - diff --git a/projects/Netbeans/Library/nbproject/project.xml b/projects/Netbeans/Library/nbproject/project.xml deleted file mode 100644 index b5697baf..00000000 --- a/projects/Netbeans/Library/nbproject/project.xml +++ /dev/null @@ -1,32 +0,0 @@ - - - org.netbeans.modules.cnd.makeproject - - - Library - c - cc - h - UTF-8 - - - ../../../include/CCDB - ../../../src/Library - /home/romanov/halld/ccdb/trunk/src/SQLite - - - - Debug - 2 - - - Release - 2 - - - - false - - - - diff --git a/projects/Netbeans/Tests/Makefile b/projects/Netbeans/Tests/Makefile deleted file mode 100644 index 05de621e..00000000 --- a/projects/Netbeans/Tests/Makefile +++ /dev/null @@ -1,128 +0,0 @@ -# -# There exist several targets which are by default empty and which can be -# used for execution of your targets. These targets are usually executed -# before and after some main targets. They are: -# -# .build-pre: called before 'build' target -# .build-post: called after 'build' target -# .clean-pre: called before 'clean' target -# .clean-post: called after 'clean' target -# .clobber-pre: called before 'clobber' target -# .clobber-post: called after 'clobber' target -# .all-pre: called before 'all' target -# .all-post: called after 'all' target -# .help-pre: called before 'help' target -# .help-post: called after 'help' target -# -# Targets beginning with '.' are not intended to be called on their own. -# -# Main targets can be executed directly, and they are: -# -# build build a specific configuration -# clean remove built files from a configuration -# clobber remove all built files -# all build all configurations -# help print help mesage -# -# Targets .build-impl, .clean-impl, .clobber-impl, .all-impl, and -# .help-impl are implemented in nbproject/makefile-impl.mk. -# -# Available make variables: -# -# CND_BASEDIR base directory for relative paths -# CND_DISTDIR default top distribution directory (build artifacts) -# CND_BUILDDIR default top build directory (object files, ...) -# CONF name of current configuration -# CND_PLATFORM_${CONF} platform name (current configuration) -# CND_ARTIFACT_DIR_${CONF} directory of build artifact (current configuration) -# CND_ARTIFACT_NAME_${CONF} name of build artifact (current configuration) -# CND_ARTIFACT_PATH_${CONF} path to build artifact (current configuration) -# CND_PACKAGE_DIR_${CONF} directory of package (current configuration) -# CND_PACKAGE_NAME_${CONF} name of package (current configuration) -# CND_PACKAGE_PATH_${CONF} path to package (current configuration) -# -# NOCDDL - - -# Environment -MKDIR=mkdir -CP=cp -CCADMIN=CCadmin - - -# build -build: .build-post - -.build-pre: -# Add your pre 'build' code here... - -.build-post: .build-impl -# Add your post 'build' code here... - - -# clean -clean: .clean-post - -.clean-pre: -# Add your pre 'clean' code here... - -.clean-post: .clean-impl -# Add your post 'clean' code here... - - -# clobber -clobber: .clobber-post - -.clobber-pre: -# Add your pre 'clobber' code here... - -.clobber-post: .clobber-impl -# Add your post 'clobber' code here... - - -# all -all: .all-post - -.all-pre: -# Add your pre 'all' code here... - -.all-post: .all-impl -# Add your post 'all' code here... - - -# build tests -build-tests: .build-tests-post - -.build-tests-pre: -# Add your pre 'build-tests' code here... - -.build-tests-post: .build-tests-impl -# Add your post 'build-tests' code here... - - -# run tests -test: .test-post - -.test-pre: build-tests -# Add your pre 'test' code here... - -.test-post: .test-impl -# Add your post 'test' code here... - - -# help -help: .help-post - -.help-pre: -# Add your pre 'help' code here... - -.help-post: .help-impl -# Add your post 'help' code here... - - - -# include project implementation makefile -include nbproject/Makefile-impl.mk - -# include project make variables -include nbproject/Makefile-variables.mk diff --git a/projects/Netbeans/Tests/nbproject/Makefile-Debug.mk b/projects/Netbeans/Tests/nbproject/Makefile-Debug.mk deleted file mode 100644 index bc33e6b0..00000000 --- a/projects/Netbeans/Tests/nbproject/Makefile-Debug.mk +++ /dev/null @@ -1,226 +0,0 @@ -# -# Generated Makefile - do not edit! -# -# Edit the Makefile in the project folder instead (../Makefile). Each target -# has a -pre and a -post target defined where you can add customized code. -# -# This makefile implements configuration specific macros and targets. - - -# Environment -MKDIR=mkdir -CP=cp -GREP=grep -NM=nm -CCADMIN=CCadmin -RANLIB=ranlib -CC=clang -CCC=clang++ -CXX=clang++ -FC=gfortran -AS=as - -# Macros -CND_PLATFORM=CLang-Linux-x86 -CND_DLIB_EXT=so -CND_CONF=Debug -CND_DISTDIR=dist -CND_BUILDDIR=build - -# Include project Makefile -include Makefile - -# Object Directory -OBJECTDIR=${CND_BUILDDIR}/${CND_CONF}/${CND_PLATFORM} - -# Object Files -OBJECTFILES= \ - ${OBJECTDIR}/_ext/1001602298/test_Authentication.o \ - ${OBJECTDIR}/_ext/1001602298/test_ModelObjects.o \ - ${OBJECTDIR}/_ext/1001602298/test_MySQLProvider.o \ - ${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_Assignments.o \ - ${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_Connection.o \ - ${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_Directories.o \ - ${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_Other.o \ - ${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_RunRanges.o \ - ${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_TypeTables.o \ - ${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_Variations.o \ - ${OBJECTDIR}/_ext/1001602298/test_MySqlUserAPI.o \ - ${OBJECTDIR}/_ext/1001602298/test_NoMySqlUserAPI.o \ - ${OBJECTDIR}/_ext/1001602298/test_PathUtils.o \ - ${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider.o \ - ${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_Assignments.o \ - ${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_Connection.o \ - ${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_Directories.o \ - ${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_Other.o \ - ${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_RunRanges.o \ - ${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_TypeTables.o \ - ${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_Variations.o \ - ${OBJECTDIR}/_ext/1001602298/test_StringUtils.o \ - ${OBJECTDIR}/_ext/1001602298/test_TimeProvider.o \ - ${OBJECTDIR}/_ext/1001602298/tests.o - - -# C Compiler Flags -CFLAGS= - -# CC Compiler Flags -CCFLAGS= -CXXFLAGS= - -# Fortran Compiler Flags -FFLAGS= - -# Assembler Flags -ASFLAGS= - -# Link Libraries and Options -LDLIBSOPTIONS=-Wl,-rpath,/home/romanov/halld/ccdb/trunk/projects/Netbeans/Library/dist/Debug/CLang-Linux-x86 -L/home/romanov/halld/ccdb/trunk/projects/Netbeans/Library/dist/Debug/CLang-Linux-x86 -lLibrary -lm -lpthread - -# Build Targets -.build-conf: ${BUILD_SUBPROJECTS} - "${MAKE}" -f nbproject/Makefile-${CND_CONF}.mk ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/tests - -${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/tests: /home/romanov/halld/ccdb/trunk/projects/Netbeans/Library/dist/Debug/CLang-Linux-x86/libLibrary.so - -${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/tests: ${OBJECTFILES} - ${MKDIR} -p ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM} - ${LINK.cc} -o ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/tests ${OBJECTFILES} ${LDLIBSOPTIONS} - -${OBJECTDIR}/_ext/1001602298/test_Authentication.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_Authentication.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -g -I../../../include -I/usr/include/mysql -I../../../include/SQLite -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_Authentication.o /home/romanov/halld/ccdb/trunk/src/Tests/test_Authentication.cc - -${OBJECTDIR}/_ext/1001602298/test_ModelObjects.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_ModelObjects.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -g -I../../../include -I/usr/include/mysql -I../../../include/SQLite -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_ModelObjects.o /home/romanov/halld/ccdb/trunk/src/Tests/test_ModelObjects.cc - -${OBJECTDIR}/_ext/1001602298/test_MySQLProvider.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_MySQLProvider.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -g -I../../../include -I/usr/include/mysql -I../../../include/SQLite -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_MySQLProvider.o /home/romanov/halld/ccdb/trunk/src/Tests/test_MySQLProvider.cc - -${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_Assignments.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_MySQLProvider_Assignments.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -g -I../../../include -I/usr/include/mysql -I../../../include/SQLite -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_Assignments.o /home/romanov/halld/ccdb/trunk/src/Tests/test_MySQLProvider_Assignments.cc - -${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_Connection.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_MySQLProvider_Connection.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -g -I../../../include -I/usr/include/mysql -I../../../include/SQLite -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_Connection.o /home/romanov/halld/ccdb/trunk/src/Tests/test_MySQLProvider_Connection.cc - -${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_Directories.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_MySQLProvider_Directories.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -g -I../../../include -I/usr/include/mysql -I../../../include/SQLite -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_Directories.o /home/romanov/halld/ccdb/trunk/src/Tests/test_MySQLProvider_Directories.cc - -${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_Other.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_MySQLProvider_Other.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -g -I../../../include -I/usr/include/mysql -I../../../include/SQLite -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_Other.o /home/romanov/halld/ccdb/trunk/src/Tests/test_MySQLProvider_Other.cc - -${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_RunRanges.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_MySQLProvider_RunRanges.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -g -I../../../include -I/usr/include/mysql -I../../../include/SQLite -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_RunRanges.o /home/romanov/halld/ccdb/trunk/src/Tests/test_MySQLProvider_RunRanges.cc - -${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_TypeTables.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_MySQLProvider_TypeTables.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -g -I../../../include -I/usr/include/mysql -I../../../include/SQLite -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_TypeTables.o /home/romanov/halld/ccdb/trunk/src/Tests/test_MySQLProvider_TypeTables.cc - -${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_Variations.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_MySQLProvider_Variations.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -g -I../../../include -I/usr/include/mysql -I../../../include/SQLite -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_Variations.o /home/romanov/halld/ccdb/trunk/src/Tests/test_MySQLProvider_Variations.cc - -${OBJECTDIR}/_ext/1001602298/test_MySqlUserAPI.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_MySqlUserAPI.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -g -I../../../include -I/usr/include/mysql -I../../../include/SQLite -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_MySqlUserAPI.o /home/romanov/halld/ccdb/trunk/src/Tests/test_MySqlUserAPI.cc - -${OBJECTDIR}/_ext/1001602298/test_NoMySqlUserAPI.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_NoMySqlUserAPI.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -g -I../../../include -I/usr/include/mysql -I../../../include/SQLite -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_NoMySqlUserAPI.o /home/romanov/halld/ccdb/trunk/src/Tests/test_NoMySqlUserAPI.cc - -${OBJECTDIR}/_ext/1001602298/test_PathUtils.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_PathUtils.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -g -I../../../include -I/usr/include/mysql -I../../../include/SQLite -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_PathUtils.o /home/romanov/halld/ccdb/trunk/src/Tests/test_PathUtils.cc - -${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_SQLiteProvider.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -g -I../../../include -I/usr/include/mysql -I../../../include/SQLite -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider.o /home/romanov/halld/ccdb/trunk/src/Tests/test_SQLiteProvider.cc - -${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_Assignments.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_SQLiteProvider_Assignments.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -g -I../../../include -I/usr/include/mysql -I../../../include/SQLite -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_Assignments.o /home/romanov/halld/ccdb/trunk/src/Tests/test_SQLiteProvider_Assignments.cc - -${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_Connection.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_SQLiteProvider_Connection.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -g -I../../../include -I/usr/include/mysql -I../../../include/SQLite -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_Connection.o /home/romanov/halld/ccdb/trunk/src/Tests/test_SQLiteProvider_Connection.cc - -${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_Directories.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_SQLiteProvider_Directories.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -g -I../../../include -I/usr/include/mysql -I../../../include/SQLite -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_Directories.o /home/romanov/halld/ccdb/trunk/src/Tests/test_SQLiteProvider_Directories.cc - -${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_Other.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_SQLiteProvider_Other.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -g -I../../../include -I/usr/include/mysql -I../../../include/SQLite -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_Other.o /home/romanov/halld/ccdb/trunk/src/Tests/test_SQLiteProvider_Other.cc - -${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_RunRanges.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_SQLiteProvider_RunRanges.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -g -I../../../include -I/usr/include/mysql -I../../../include/SQLite -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_RunRanges.o /home/romanov/halld/ccdb/trunk/src/Tests/test_SQLiteProvider_RunRanges.cc - -${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_TypeTables.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_SQLiteProvider_TypeTables.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -g -I../../../include -I/usr/include/mysql -I../../../include/SQLite -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_TypeTables.o /home/romanov/halld/ccdb/trunk/src/Tests/test_SQLiteProvider_TypeTables.cc - -${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_Variations.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_SQLiteProvider_Variations.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -g -I../../../include -I/usr/include/mysql -I../../../include/SQLite -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_Variations.o /home/romanov/halld/ccdb/trunk/src/Tests/test_SQLiteProvider_Variations.cc - -${OBJECTDIR}/_ext/1001602298/test_StringUtils.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_StringUtils.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -g -I../../../include -I/usr/include/mysql -I../../../include/SQLite -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_StringUtils.o /home/romanov/halld/ccdb/trunk/src/Tests/test_StringUtils.cc - -${OBJECTDIR}/_ext/1001602298/test_TimeProvider.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_TimeProvider.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -g -I../../../include -I/usr/include/mysql -I../../../include/SQLite -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_TimeProvider.o /home/romanov/halld/ccdb/trunk/src/Tests/test_TimeProvider.cc - -${OBJECTDIR}/_ext/1001602298/tests.o: /home/romanov/halld/ccdb/trunk/src/Tests/tests.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -g -I../../../include -I/usr/include/mysql -I../../../include/SQLite -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/tests.o /home/romanov/halld/ccdb/trunk/src/Tests/tests.cc - -# Subprojects -.build-subprojects: - cd /home/romanov/halld/ccdb/trunk/projects/Netbeans/Library && ${MAKE} -f Makefile CONF=Debug - -# Clean Targets -.clean-conf: ${CLEAN_SUBPROJECTS} - ${RM} -r ${CND_BUILDDIR}/${CND_CONF} - ${RM} ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/tests - -# Subprojects -.clean-subprojects: - cd /home/romanov/halld/ccdb/trunk/projects/Netbeans/Library && ${MAKE} -f Makefile CONF=Debug clean - -# Enable dependency checking -.dep.inc: .depcheck-impl - -include .dep.inc diff --git a/projects/Netbeans/Tests/nbproject/Makefile-Release.mk b/projects/Netbeans/Tests/nbproject/Makefile-Release.mk deleted file mode 100644 index 8579ceb6..00000000 --- a/projects/Netbeans/Tests/nbproject/Makefile-Release.mk +++ /dev/null @@ -1,228 +0,0 @@ -# -# Generated Makefile - do not edit! -# -# Edit the Makefile in the project folder instead (../Makefile). Each target -# has a -pre and a -post target defined where you can add customized code. -# -# This makefile implements configuration specific macros and targets. - - -# Environment -MKDIR=mkdir -CP=cp -GREP=grep -NM=nm -CCADMIN=CCadmin -RANLIB=ranlib -CC=clang -CCC=clang++ -CXX=clang++ -FC=gfortran -AS=as - -# Macros -CND_PLATFORM=CLang-Linux-x86 -CND_DLIB_EXT=so -CND_CONF=Release -CND_DISTDIR=dist -CND_BUILDDIR=build - -# Include project Makefile -include Makefile - -# Object Directory -OBJECTDIR=${CND_BUILDDIR}/${CND_CONF}/${CND_PLATFORM} - -# Object Files -OBJECTFILES= \ - ${OBJECTDIR}/_ext/1001602298/test_Authentication.o \ - ${OBJECTDIR}/_ext/1001602298/test_Console.o \ - ${OBJECTDIR}/_ext/1001602298/test_ModelObjects.o \ - ${OBJECTDIR}/_ext/1001602298/test_MySQLProvider.o \ - ${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_Assignments.o \ - ${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_Connection.o \ - ${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_Directories.o \ - ${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_Other.o \ - ${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_RunRanges.o \ - ${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_TypeTables.o \ - ${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_Variations.o \ - ${OBJECTDIR}/_ext/1001602298/test_MySqlUserAPI.o \ - ${OBJECTDIR}/_ext/1001602298/test_NoMySqlUserAPI.o \ - ${OBJECTDIR}/_ext/1001602298/test_PathUtils.o \ - ${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider.o \ - ${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_Assignments.o \ - ${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_Connection.o \ - ${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_Directories.o \ - ${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_Other.o \ - ${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_RunRanges.o \ - ${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_TypeTables.o \ - ${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_Variations.o \ - ${OBJECTDIR}/_ext/1001602298/test_StringUtils.o \ - ${OBJECTDIR}/_ext/1001602298/test_TimeProvider.o \ - ${OBJECTDIR}/_ext/1001602298/tests.o - - -# C Compiler Flags -CFLAGS= - -# CC Compiler Flags -CCFLAGS= -CXXFLAGS= - -# Fortran Compiler Flags -FFLAGS= - -# Assembler Flags -ASFLAGS= - -# Link Libraries and Options -LDLIBSOPTIONS= - -# Build Targets -.build-conf: ${BUILD_SUBPROJECTS} - "${MAKE}" -f nbproject/Makefile-${CND_CONF}.mk ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/tests - -${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/tests: ${OBJECTFILES} - ${MKDIR} -p ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM} - ${LINK.cc} -o ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/tests ${OBJECTFILES} ${LDLIBSOPTIONS} - -${OBJECTDIR}/_ext/1001602298/test_Authentication.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_Authentication.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_Authentication.o /home/romanov/halld/ccdb/trunk/src/Tests/test_Authentication.cc - -${OBJECTDIR}/_ext/1001602298/test_Console.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_Console.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_Console.o /home/romanov/halld/ccdb/trunk/src/Tests/test_Console.cc - -${OBJECTDIR}/_ext/1001602298/test_ModelObjects.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_ModelObjects.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_ModelObjects.o /home/romanov/halld/ccdb/trunk/src/Tests/test_ModelObjects.cc - -${OBJECTDIR}/_ext/1001602298/test_MySQLProvider.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_MySQLProvider.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_MySQLProvider.o /home/romanov/halld/ccdb/trunk/src/Tests/test_MySQLProvider.cc - -${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_Assignments.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_MySQLProvider_Assignments.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_Assignments.o /home/romanov/halld/ccdb/trunk/src/Tests/test_MySQLProvider_Assignments.cc - -${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_Connection.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_MySQLProvider_Connection.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_Connection.o /home/romanov/halld/ccdb/trunk/src/Tests/test_MySQLProvider_Connection.cc - -${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_Directories.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_MySQLProvider_Directories.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_Directories.o /home/romanov/halld/ccdb/trunk/src/Tests/test_MySQLProvider_Directories.cc - -${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_Other.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_MySQLProvider_Other.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_Other.o /home/romanov/halld/ccdb/trunk/src/Tests/test_MySQLProvider_Other.cc - -${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_RunRanges.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_MySQLProvider_RunRanges.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_RunRanges.o /home/romanov/halld/ccdb/trunk/src/Tests/test_MySQLProvider_RunRanges.cc - -${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_TypeTables.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_MySQLProvider_TypeTables.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_TypeTables.o /home/romanov/halld/ccdb/trunk/src/Tests/test_MySQLProvider_TypeTables.cc - -${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_Variations.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_MySQLProvider_Variations.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_MySQLProvider_Variations.o /home/romanov/halld/ccdb/trunk/src/Tests/test_MySQLProvider_Variations.cc - -${OBJECTDIR}/_ext/1001602298/test_MySqlUserAPI.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_MySqlUserAPI.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_MySqlUserAPI.o /home/romanov/halld/ccdb/trunk/src/Tests/test_MySqlUserAPI.cc - -${OBJECTDIR}/_ext/1001602298/test_NoMySqlUserAPI.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_NoMySqlUserAPI.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_NoMySqlUserAPI.o /home/romanov/halld/ccdb/trunk/src/Tests/test_NoMySqlUserAPI.cc - -${OBJECTDIR}/_ext/1001602298/test_PathUtils.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_PathUtils.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_PathUtils.o /home/romanov/halld/ccdb/trunk/src/Tests/test_PathUtils.cc - -${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_SQLiteProvider.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider.o /home/romanov/halld/ccdb/trunk/src/Tests/test_SQLiteProvider.cc - -${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_Assignments.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_SQLiteProvider_Assignments.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_Assignments.o /home/romanov/halld/ccdb/trunk/src/Tests/test_SQLiteProvider_Assignments.cc - -${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_Connection.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_SQLiteProvider_Connection.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_Connection.o /home/romanov/halld/ccdb/trunk/src/Tests/test_SQLiteProvider_Connection.cc - -${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_Directories.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_SQLiteProvider_Directories.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_Directories.o /home/romanov/halld/ccdb/trunk/src/Tests/test_SQLiteProvider_Directories.cc - -${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_Other.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_SQLiteProvider_Other.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_Other.o /home/romanov/halld/ccdb/trunk/src/Tests/test_SQLiteProvider_Other.cc - -${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_RunRanges.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_SQLiteProvider_RunRanges.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_RunRanges.o /home/romanov/halld/ccdb/trunk/src/Tests/test_SQLiteProvider_RunRanges.cc - -${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_TypeTables.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_SQLiteProvider_TypeTables.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_TypeTables.o /home/romanov/halld/ccdb/trunk/src/Tests/test_SQLiteProvider_TypeTables.cc - -${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_Variations.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_SQLiteProvider_Variations.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_SQLiteProvider_Variations.o /home/romanov/halld/ccdb/trunk/src/Tests/test_SQLiteProvider_Variations.cc - -${OBJECTDIR}/_ext/1001602298/test_StringUtils.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_StringUtils.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_StringUtils.o /home/romanov/halld/ccdb/trunk/src/Tests/test_StringUtils.cc - -${OBJECTDIR}/_ext/1001602298/test_TimeProvider.o: /home/romanov/halld/ccdb/trunk/src/Tests/test_TimeProvider.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/test_TimeProvider.o /home/romanov/halld/ccdb/trunk/src/Tests/test_TimeProvider.cc - -${OBJECTDIR}/_ext/1001602298/tests.o: /home/romanov/halld/ccdb/trunk/src/Tests/tests.cc - ${MKDIR} -p ${OBJECTDIR}/_ext/1001602298 - ${RM} "$@.d" - $(COMPILE.cc) -O2 -MMD -MP -MF "$@.d" -o ${OBJECTDIR}/_ext/1001602298/tests.o /home/romanov/halld/ccdb/trunk/src/Tests/tests.cc - -# Subprojects -.build-subprojects: - -# Clean Targets -.clean-conf: ${CLEAN_SUBPROJECTS} - ${RM} -r ${CND_BUILDDIR}/${CND_CONF} - ${RM} ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/tests - -# Subprojects -.clean-subprojects: - -# Enable dependency checking -.dep.inc: .depcheck-impl - -include .dep.inc diff --git a/projects/Netbeans/Tests/nbproject/Makefile-impl.mk b/projects/Netbeans/Tests/nbproject/Makefile-impl.mk deleted file mode 100644 index fe6bf0f4..00000000 --- a/projects/Netbeans/Tests/nbproject/Makefile-impl.mk +++ /dev/null @@ -1,133 +0,0 @@ -# -# Generated Makefile - do not edit! -# -# Edit the Makefile in the project folder instead (../Makefile). Each target -# has a pre- and a post- target defined where you can add customization code. -# -# This makefile implements macros and targets common to all configurations. -# -# NOCDDL - - -# Building and Cleaning subprojects are done by default, but can be controlled with the SUB -# macro. If SUB=no, subprojects will not be built or cleaned. The following macro -# statements set BUILD_SUB-CONF and CLEAN_SUB-CONF to .build-reqprojects-conf -# and .clean-reqprojects-conf unless SUB has the value 'no' -SUB_no=NO -SUBPROJECTS=${SUB_${SUB}} -BUILD_SUBPROJECTS_=.build-subprojects -BUILD_SUBPROJECTS_NO= -BUILD_SUBPROJECTS=${BUILD_SUBPROJECTS_${SUBPROJECTS}} -CLEAN_SUBPROJECTS_=.clean-subprojects -CLEAN_SUBPROJECTS_NO= -CLEAN_SUBPROJECTS=${CLEAN_SUBPROJECTS_${SUBPROJECTS}} - - -# Project Name -PROJECTNAME=Tests - -# Active Configuration -DEFAULTCONF=Debug -CONF=${DEFAULTCONF} - -# All Configurations -ALLCONFS=Debug Release - - -# build -.build-impl: .build-pre .validate-impl .depcheck-impl - @#echo "=> Running $@... Configuration=$(CONF)" - "${MAKE}" -f nbproject/Makefile-${CONF}.mk QMAKE=${QMAKE} SUBPROJECTS=${SUBPROJECTS} .build-conf - - -# clean -.clean-impl: .clean-pre .validate-impl .depcheck-impl - @#echo "=> Running $@... Configuration=$(CONF)" - "${MAKE}" -f nbproject/Makefile-${CONF}.mk QMAKE=${QMAKE} SUBPROJECTS=${SUBPROJECTS} .clean-conf - - -# clobber -.clobber-impl: .clobber-pre .depcheck-impl - @#echo "=> Running $@..." - for CONF in ${ALLCONFS}; \ - do \ - "${MAKE}" -f nbproject/Makefile-$${CONF}.mk QMAKE=${QMAKE} SUBPROJECTS=${SUBPROJECTS} .clean-conf; \ - done - -# all -.all-impl: .all-pre .depcheck-impl - @#echo "=> Running $@..." - for CONF in ${ALLCONFS}; \ - do \ - "${MAKE}" -f nbproject/Makefile-$${CONF}.mk QMAKE=${QMAKE} SUBPROJECTS=${SUBPROJECTS} .build-conf; \ - done - -# build tests -.build-tests-impl: .build-impl .build-tests-pre - @#echo "=> Running $@... Configuration=$(CONF)" - "${MAKE}" -f nbproject/Makefile-${CONF}.mk SUBPROJECTS=${SUBPROJECTS} .build-tests-conf - -# run tests -.test-impl: .build-tests-impl .test-pre - @#echo "=> Running $@... Configuration=$(CONF)" - "${MAKE}" -f nbproject/Makefile-${CONF}.mk SUBPROJECTS=${SUBPROJECTS} .test-conf - -# dependency checking support -.depcheck-impl: - @echo "# This code depends on make tool being used" >.dep.inc - @if [ -n "${MAKE_VERSION}" ]; then \ - echo "DEPFILES=\$$(wildcard \$$(addsuffix .d, \$${OBJECTFILES}))" >>.dep.inc; \ - echo "ifneq (\$${DEPFILES},)" >>.dep.inc; \ - echo "include \$${DEPFILES}" >>.dep.inc; \ - echo "endif" >>.dep.inc; \ - else \ - echo ".KEEP_STATE:" >>.dep.inc; \ - echo ".KEEP_STATE_FILE:.make.state.\$${CONF}" >>.dep.inc; \ - fi - -# configuration validation -.validate-impl: - @if [ ! -f nbproject/Makefile-${CONF}.mk ]; \ - then \ - echo ""; \ - echo "Error: can not find the makefile for configuration '${CONF}' in project ${PROJECTNAME}"; \ - echo "See 'make help' for details."; \ - echo "Current directory: " `pwd`; \ - echo ""; \ - fi - @if [ ! -f nbproject/Makefile-${CONF}.mk ]; \ - then \ - exit 1; \ - fi - - -# help -.help-impl: .help-pre - @echo "This makefile supports the following configurations:" - @echo " ${ALLCONFS}" - @echo "" - @echo "and the following targets:" - @echo " build (default target)" - @echo " clean" - @echo " clobber" - @echo " all" - @echo " help" - @echo "" - @echo "Makefile Usage:" - @echo " make [CONF=] [SUB=no] build" - @echo " make [CONF=] [SUB=no] clean" - @echo " make [SUB=no] clobber" - @echo " make [SUB=no] all" - @echo " make help" - @echo "" - @echo "Target 'build' will build a specific configuration and, unless 'SUB=no'," - @echo " also build subprojects." - @echo "Target 'clean' will clean a specific configuration and, unless 'SUB=no'," - @echo " also clean subprojects." - @echo "Target 'clobber' will remove all built files from all configurations and," - @echo " unless 'SUB=no', also from subprojects." - @echo "Target 'all' will will build all configurations and, unless 'SUB=no'," - @echo " also build subprojects." - @echo "Target 'help' prints this message." - @echo "" - diff --git a/projects/Netbeans/Tests/nbproject/Makefile-variables.mk b/projects/Netbeans/Tests/nbproject/Makefile-variables.mk deleted file mode 100644 index 07df0de3..00000000 --- a/projects/Netbeans/Tests/nbproject/Makefile-variables.mk +++ /dev/null @@ -1,35 +0,0 @@ -# -# Generated - do not edit! -# -# NOCDDL -# -CND_BASEDIR=`pwd` -CND_BUILDDIR=build -CND_DISTDIR=dist -# Debug configuration -CND_PLATFORM_Debug=CLang-Linux-x86 -CND_ARTIFACT_DIR_Debug=dist/Debug/CLang-Linux-x86 -CND_ARTIFACT_NAME_Debug=tests -CND_ARTIFACT_PATH_Debug=dist/Debug/CLang-Linux-x86/tests -CND_PACKAGE_DIR_Debug=dist/Debug/CLang-Linux-x86/package -CND_PACKAGE_NAME_Debug=tests.tar -CND_PACKAGE_PATH_Debug=dist/Debug/CLang-Linux-x86/package/tests.tar -# Release configuration -CND_PLATFORM_Release=CLang-Linux-x86 -CND_ARTIFACT_DIR_Release=dist/Release/CLang-Linux-x86 -CND_ARTIFACT_NAME_Release=tests -CND_ARTIFACT_PATH_Release=dist/Release/CLang-Linux-x86/tests -CND_PACKAGE_DIR_Release=dist/Release/CLang-Linux-x86/package -CND_PACKAGE_NAME_Release=tests.tar -CND_PACKAGE_PATH_Release=dist/Release/CLang-Linux-x86/package/tests.tar -# -# include compiler specific variables -# -# dmake command -ROOT:sh = test -f nbproject/private/Makefile-variables.mk || \ - (mkdir -p nbproject/private && touch nbproject/private/Makefile-variables.mk) -# -# gmake command -.PHONY: $(shell test -f nbproject/private/Makefile-variables.mk || (mkdir -p nbproject/private && touch nbproject/private/Makefile-variables.mk)) -# -include nbproject/private/Makefile-variables.mk diff --git a/projects/Netbeans/Tests/nbproject/Package-Debug.bash b/projects/Netbeans/Tests/nbproject/Package-Debug.bash deleted file mode 100644 index edd7d4cd..00000000 --- a/projects/Netbeans/Tests/nbproject/Package-Debug.bash +++ /dev/null @@ -1,76 +0,0 @@ -#!/bin/bash -x - -# -# Generated - do not edit! -# - -# Macros -TOP=`pwd` -CND_PLATFORM=CLang-Linux-x86 -CND_CONF=Debug -CND_DISTDIR=dist -CND_BUILDDIR=build -CND_DLIB_EXT=so -NBTMPDIR=${CND_BUILDDIR}/${CND_CONF}/${CND_PLATFORM}/tmp-packaging -TMPDIRNAME=tmp-packaging -OUTPUT_PATH=${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/tests -OUTPUT_BASENAME=tests -PACKAGE_TOP_DIR=tests/ - -# Functions -function checkReturnCode -{ - rc=$? - if [ $rc != 0 ] - then - exit $rc - fi -} -function makeDirectory -# $1 directory path -# $2 permission (optional) -{ - mkdir -p "$1" - checkReturnCode - if [ "$2" != "" ] - then - chmod $2 "$1" - checkReturnCode - fi -} -function copyFileToTmpDir -# $1 from-file path -# $2 to-file path -# $3 permission -{ - cp "$1" "$2" - checkReturnCode - if [ "$3" != "" ] - then - chmod $3 "$2" - checkReturnCode - fi -} - -# Setup -cd "${TOP}" -mkdir -p ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package -rm -rf ${NBTMPDIR} -mkdir -p ${NBTMPDIR} - -# Copy files and create directories and links -cd "${TOP}" -makeDirectory "${NBTMPDIR}/tests/bin" -copyFileToTmpDir "${OUTPUT_PATH}" "${NBTMPDIR}/${PACKAGE_TOP_DIR}bin/${OUTPUT_BASENAME}" 0755 - - -# Generate tar file -cd "${TOP}" -rm -f ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/tests.tar -cd ${NBTMPDIR} -tar -vcf ../../../../${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/tests.tar * -checkReturnCode - -# Cleanup -cd "${TOP}" -rm -rf ${NBTMPDIR} diff --git a/projects/Netbeans/Tests/nbproject/Package-Release.bash b/projects/Netbeans/Tests/nbproject/Package-Release.bash deleted file mode 100644 index 8811040e..00000000 --- a/projects/Netbeans/Tests/nbproject/Package-Release.bash +++ /dev/null @@ -1,76 +0,0 @@ -#!/bin/bash -x - -# -# Generated - do not edit! -# - -# Macros -TOP=`pwd` -CND_PLATFORM=CLang-Linux-x86 -CND_CONF=Release -CND_DISTDIR=dist -CND_BUILDDIR=build -CND_DLIB_EXT=so -NBTMPDIR=${CND_BUILDDIR}/${CND_CONF}/${CND_PLATFORM}/tmp-packaging -TMPDIRNAME=tmp-packaging -OUTPUT_PATH=${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/tests -OUTPUT_BASENAME=tests -PACKAGE_TOP_DIR=tests/ - -# Functions -function checkReturnCode -{ - rc=$? - if [ $rc != 0 ] - then - exit $rc - fi -} -function makeDirectory -# $1 directory path -# $2 permission (optional) -{ - mkdir -p "$1" - checkReturnCode - if [ "$2" != "" ] - then - chmod $2 "$1" - checkReturnCode - fi -} -function copyFileToTmpDir -# $1 from-file path -# $2 to-file path -# $3 permission -{ - cp "$1" "$2" - checkReturnCode - if [ "$3" != "" ] - then - chmod $3 "$2" - checkReturnCode - fi -} - -# Setup -cd "${TOP}" -mkdir -p ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package -rm -rf ${NBTMPDIR} -mkdir -p ${NBTMPDIR} - -# Copy files and create directories and links -cd "${TOP}" -makeDirectory "${NBTMPDIR}/tests/bin" -copyFileToTmpDir "${OUTPUT_PATH}" "${NBTMPDIR}/${PACKAGE_TOP_DIR}bin/${OUTPUT_BASENAME}" 0755 - - -# Generate tar file -cd "${TOP}" -rm -f ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/tests.tar -cd ${NBTMPDIR} -tar -vcf ../../../../${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/tests.tar * -checkReturnCode - -# Cleanup -cd "${TOP}" -rm -rf ${NBTMPDIR} diff --git a/projects/Netbeans/Tests/nbproject/configurations.xml b/projects/Netbeans/Tests/nbproject/configurations.xml deleted file mode 100644 index 47e632ba..00000000 --- a/projects/Netbeans/Tests/nbproject/configurations.xml +++ /dev/null @@ -1,408 +0,0 @@ - - - - - - /home/romanov/halld/ccdb/trunk/include/Tests/catch.h - /home/romanov/halld/ccdb/trunk/include/Tests/catch.hpp - /home/romanov/halld/ccdb/trunk/include/Tests/tests.h - - - - - - - /home/romanov/halld/ccdb/trunk/src/Tests/SConscript - /home/romanov/halld/ccdb/trunk/src/Tests/test_Authentication.cc - /home/romanov/halld/ccdb/trunk/src/Tests/test_Console.cc - /home/romanov/halld/ccdb/trunk/src/Tests/test_ModelObjects.cc - /home/romanov/halld/ccdb/trunk/src/Tests/test_MySQLProvider.cc - /home/romanov/halld/ccdb/trunk/src/Tests/test_MySQLProvider_Assignments.cc - /home/romanov/halld/ccdb/trunk/src/Tests/test_MySQLProvider_Connection.cc - /home/romanov/halld/ccdb/trunk/src/Tests/test_MySQLProvider_Directories.cc - /home/romanov/halld/ccdb/trunk/src/Tests/test_MySQLProvider_Other.cc - /home/romanov/halld/ccdb/trunk/src/Tests/test_MySQLProvider_RunRanges.cc - /home/romanov/halld/ccdb/trunk/src/Tests/test_MySQLProvider_TypeTables.cc - /home/romanov/halld/ccdb/trunk/src/Tests/test_MySQLProvider_Variations.cc - /home/romanov/halld/ccdb/trunk/src/Tests/test_MySqlUserAPI.cc - /home/romanov/halld/ccdb/trunk/src/Tests/test_NoMySqlUserAPI.cc - /home/romanov/halld/ccdb/trunk/src/Tests/test_PathUtils.cc - /home/romanov/halld/ccdb/trunk/src/Tests/test_SQLiteProvider.cc - /home/romanov/halld/ccdb/trunk/src/Tests/test_SQLiteProvider_Assignments.cc - /home/romanov/halld/ccdb/trunk/src/Tests/test_SQLiteProvider_Connection.cc - /home/romanov/halld/ccdb/trunk/src/Tests/test_SQLiteProvider_Directories.cc - /home/romanov/halld/ccdb/trunk/src/Tests/test_SQLiteProvider_Other.cc - /home/romanov/halld/ccdb/trunk/src/Tests/test_SQLiteProvider_RunRanges.cc - /home/romanov/halld/ccdb/trunk/src/Tests/test_SQLiteProvider_TypeTables.cc - /home/romanov/halld/ccdb/trunk/src/Tests/test_SQLiteProvider_Variations.cc - /home/romanov/halld/ccdb/trunk/src/Tests/test_StringUtils.cc - /home/romanov/halld/ccdb/trunk/src/Tests/test_TimeProvider.cc - /home/romanov/halld/ccdb/trunk/src/Tests/tests.cc - - - - - - Makefile - - - - /home/romanov/halld/ccdb/trunk/include/Tests - /home/romanov/halld/ccdb/trunk/src/Tests - - Makefile - - - - CLang|CLang - true - false - - - - - ../../../include - /usr/include/mysql - ../../../include/SQLite - - - - - - - - - Mathematics - PosixThreads - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - CLang|CLang - true - false - - - - 5 - - - 5 - - - 5 - - - 5 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/projects/Netbeans/Tests/nbproject/private/Makefile-variables.mk b/projects/Netbeans/Tests/nbproject/private/Makefile-variables.mk deleted file mode 100644 index a64183e1..00000000 --- a/projects/Netbeans/Tests/nbproject/private/Makefile-variables.mk +++ /dev/null @@ -1,7 +0,0 @@ -# -# Generated - do not edit! -# -# NOCDDL -# -# Debug configuration -# Release configuration diff --git a/projects/Netbeans/Tests/nbproject/private/configurations.xml b/projects/Netbeans/Tests/nbproject/private/configurations.xml deleted file mode 100644 index db085c13..00000000 --- a/projects/Netbeans/Tests/nbproject/private/configurations.xml +++ /dev/null @@ -1,73 +0,0 @@ - - - Makefile - - - - localhost - 2 - - - - - - - - - - - - - - - gdb - - - - "${OUTPUT_PATH}" - - "${OUTPUT_PATH}" - - true - 0 - 0 - - - - - - - - localhost - 2 - - - - - - - - - - - - - - - gdb - - - - "${OUTPUT_PATH}" - - "${OUTPUT_PATH}" - - true - 0 - 0 - - - - - - diff --git a/projects/Netbeans/Tests/nbproject/private/launcher.properties b/projects/Netbeans/Tests/nbproject/private/launcher.properties deleted file mode 100644 index 6cc2127d..00000000 --- a/projects/Netbeans/Tests/nbproject/private/launcher.properties +++ /dev/null @@ -1,40 +0,0 @@ -# Launchers File syntax: -# -# [Must-have property line] -# launcher1.runCommand= -# [Optional extra properties] -# launcher1.displayName= -# launcher1.buildCommand= -# launcher1.runDir= -# launcher1.symbolFiles= -# launcher1.env.= -# (If this value is quoted with ` it is handled as a native command which execution result will become the value) -# [Common launcher properties] -# common.runDir= -# (This value is overwritten by a launcher specific runDir value if the latter exists) -# common.env.= -# (Environment variables from common launcher are merged with launcher specific variables) -# common.symbolFiles= -# (This value is overwritten by a launcher specific symbolFiles value if the latter exists) -# -# In runDir, symbolFiles and env fields you can use these macroses: -# ${PROJECT_DIR} - project directory absolute path -# ${OUTPUT_PATH} - linker output path (relative to project directory path) -# ${OUTPUT_BASENAME}- linker output filename -# ${TESTDIR} - test files directory (relative to project directory path) -# ${OBJECTDIR} - object files directory (relative to project directory path) -# ${CND_DISTDIR} - distribution directory (relative to project directory path) -# ${CND_BUILDDIR} - build directory (relative to project directory path) -# ${CND_PLATFORM} - platform name -# ${CND_CONF} - configuration name -# ${CND_DLIB_EXT} - dynamic library extension -# -# All the project launchers must be listed in the file! -# -# launcher1.runCommand=... -# launcher2.runCommand=... -# ... -# common.runDir=... -# common.env.KEY=VALUE - -# launcher1.runCommand= \ No newline at end of file diff --git a/projects/Netbeans/Tests/nbproject/private/private.xml b/projects/Netbeans/Tests/nbproject/private/private.xml deleted file mode 100644 index 1a0cc27a..00000000 --- a/projects/Netbeans/Tests/nbproject/private/private.xml +++ /dev/null @@ -1,11 +0,0 @@ - - - - 1 - 0 - - - - - - diff --git a/projects/Netbeans/Tests/nbproject/project.xml b/projects/Netbeans/Tests/nbproject/project.xml deleted file mode 100644 index f55d4d62..00000000 --- a/projects/Netbeans/Tests/nbproject/project.xml +++ /dev/null @@ -1,33 +0,0 @@ - - - org.netbeans.modules.cnd.makeproject - - - Tests - - cc - h,hpp - UTF-8 - - /home/romanov/halld/ccdb/trunk/projects/Netbeans/Library - - - /home/romanov/halld/ccdb/trunk/include/Tests - /home/romanov/halld/ccdb/trunk/src/Tests - - - - Debug - 1 - - - Release - 1 - - - - false - - - - diff --git a/projects/PyCharm/python b/projects/PyCharm/python deleted file mode 120000 index 0fe7a6c3..00000000 --- a/projects/PyCharm/python +++ /dev/null @@ -1 +0,0 @@ -/home/romanov/work/ccdb/ccdb/python \ No newline at end of file diff --git a/projects/VisualStudio/Benchmarks/Benchmarks.vcxproj b/projects/VisualStudio/Benchmarks/Benchmarks.vcxproj deleted file mode 100644 index 040caf7d..00000000 --- a/projects/VisualStudio/Benchmarks/Benchmarks.vcxproj +++ /dev/null @@ -1,145 +0,0 @@ - - - - - Debug - Win32 - - - Release - Win32 - - - SQLite Debug - Win32 - - - - {4F69203B-0EA6-4D21-B617-86CAF4B4DC85} - Win32Proj - Benchmarks - - - - Application - true - Unicode - v120 - - - Application - true - Unicode - v120 - - - Application - false - true - Unicode - v120 - - - - - - - - - - - - - - - - true - - - true - - - false - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - $(MYSQL_CONNECTOR)/include;$(CCDB_HOME)/include - - - Console - true - $(MYSQL_CONNECTOR)\lib\opt; - libmysql.lib;mysqlclient.lib;zlib.lib;%(AdditionalDependencies) - true - - - - - - - Level3 - Disabled - WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) - $(MYSQL_CONNECTOR)/include;$(CCDB_HOME)/include - - - Console - true - $(MYSQL_CONNECTOR)\lib\opt; - libmysql.lib;mysqlclient.lib;zlib.lib;%(AdditionalDependencies) - true - - - - - Level3 - - - MaxSpeed - true - true - WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - $(MYSQL_CONNECTOR)/include;$(CCDB_HOME)/include - - - Console - true - true - true - $(MYSQL_CONNECTOR)\lib\opt; - libmysql.lib;mysqlclient.lib;zlib.lib;%(AdditionalDependencies) - true - - - - - {ff2376e7-5637-46a4-ae1b-a083d7588552} - - - - - - - - true - true - true - - - true - true - true - - - - - - - - - \ No newline at end of file diff --git a/projects/VisualStudio/Benchmarks/Benchmarks.vcxproj.filters b/projects/VisualStudio/Benchmarks/Benchmarks.vcxproj.filters deleted file mode 100644 index cff13d71..00000000 --- a/projects/VisualStudio/Benchmarks/Benchmarks.vcxproj.filters +++ /dev/null @@ -1,39 +0,0 @@ - - - - - {4FC737F1-C7A5-4376-A066-2A32D752A2FF} - cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx - - - {93995380-89BD-4b04-88EB-625FBE52EBFB} - h;hpp;hxx;hm;inl;inc;xsd - - - {67DA6AB6-F800-4c08-8B7A-83BB121AAD01} - rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms - - - - - Header Files - - - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - \ No newline at end of file diff --git a/projects/VisualStudio/CCDB.sln b/projects/VisualStudio/CCDB.sln deleted file mode 100644 index 309a0103..00000000 --- a/projects/VisualStudio/CCDB.sln +++ /dev/null @@ -1,121 +0,0 @@ - -Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio 2013 -VisualStudioVersion = 12.0.30110.0 -MinimumVisualStudioVersion = 10.0.40219.1 -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Library", "Library\Library.vcxproj", "{FF2376E7-5637-46A4-AE1B-A083D7588552}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Tests", "Tests\Tests.vcxproj", "{3E64C06E-02CC-4183-92B2-6012CB975413}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Jana", "Jana\Jana.vcxproj", "{3F6DE0CB-51FB-4CB7-A0E1-D933D5886C5E}" -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Benchmarks", "Benchmarks\Benchmarks.vcxproj", "{4F69203B-0EA6-4D21-B617-86CAF4B4DC85}" -EndProject -Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{CF983026-1744-4F84-B52A-3797F2A4BADE}" -EndProject -Global - GlobalSection(SolutionConfigurationPlatforms) = preSolution - Debug|Any CPU = Debug|Any CPU - Debug|Mixed Platforms = Debug|Mixed Platforms - Debug|Win32 = Debug|Win32 - Debug|x64 = Debug|x64 - Debug|x86 = Debug|x86 - Release|Any CPU = Release|Any CPU - Release|Mixed Platforms = Release|Mixed Platforms - Release|Win32 = Release|Win32 - Release|x64 = Release|x64 - Release|x86 = Release|x86 - SQLite Debug|Any CPU = SQLite Debug|Any CPU - SQLite Debug|Mixed Platforms = SQLite Debug|Mixed Platforms - SQLite Debug|Win32 = SQLite Debug|Win32 - SQLite Debug|x64 = SQLite Debug|x64 - SQLite Debug|x86 = SQLite Debug|x86 - EndGlobalSection - GlobalSection(ProjectConfigurationPlatforms) = postSolution - {FF2376E7-5637-46A4-AE1B-A083D7588552}.Debug|Any CPU.ActiveCfg = Debug|Win32 - {FF2376E7-5637-46A4-AE1B-A083D7588552}.Debug|Mixed Platforms.ActiveCfg = Debug|Win32 - {FF2376E7-5637-46A4-AE1B-A083D7588552}.Debug|Mixed Platforms.Build.0 = Debug|Win32 - {FF2376E7-5637-46A4-AE1B-A083D7588552}.Debug|Win32.ActiveCfg = Debug|Win32 - {FF2376E7-5637-46A4-AE1B-A083D7588552}.Debug|Win32.Build.0 = Debug|Win32 - {FF2376E7-5637-46A4-AE1B-A083D7588552}.Debug|x64.ActiveCfg = Debug|Win32 - {FF2376E7-5637-46A4-AE1B-A083D7588552}.Debug|x86.ActiveCfg = Debug|Win32 - {FF2376E7-5637-46A4-AE1B-A083D7588552}.Release|Any CPU.ActiveCfg = Release|Win32 - {FF2376E7-5637-46A4-AE1B-A083D7588552}.Release|Any CPU.Build.0 = Release|Win32 - {FF2376E7-5637-46A4-AE1B-A083D7588552}.Release|Mixed Platforms.ActiveCfg = Release|Win32 - {FF2376E7-5637-46A4-AE1B-A083D7588552}.Release|Mixed Platforms.Build.0 = Release|Win32 - {FF2376E7-5637-46A4-AE1B-A083D7588552}.Release|Win32.ActiveCfg = Release|Win32 - {FF2376E7-5637-46A4-AE1B-A083D7588552}.Release|Win32.Build.0 = Release|Win32 - {FF2376E7-5637-46A4-AE1B-A083D7588552}.Release|x64.ActiveCfg = Release|Win32 - {FF2376E7-5637-46A4-AE1B-A083D7588552}.Release|x86.ActiveCfg = Release|Win32 - {FF2376E7-5637-46A4-AE1B-A083D7588552}.SQLite Debug|Any CPU.ActiveCfg = SQLite Debug|Win32 - {FF2376E7-5637-46A4-AE1B-A083D7588552}.SQLite Debug|Any CPU.Build.0 = SQLite Debug|Win32 - {FF2376E7-5637-46A4-AE1B-A083D7588552}.SQLite Debug|Mixed Platforms.ActiveCfg = SQLite Debug|Win32 - {FF2376E7-5637-46A4-AE1B-A083D7588552}.SQLite Debug|Mixed Platforms.Build.0 = SQLite Debug|Win32 - {FF2376E7-5637-46A4-AE1B-A083D7588552}.SQLite Debug|Win32.ActiveCfg = SQLite Debug|Win32 - {FF2376E7-5637-46A4-AE1B-A083D7588552}.SQLite Debug|Win32.Build.0 = SQLite Debug|Win32 - {FF2376E7-5637-46A4-AE1B-A083D7588552}.SQLite Debug|x64.ActiveCfg = SQLite Debug|Win32 - {FF2376E7-5637-46A4-AE1B-A083D7588552}.SQLite Debug|x86.ActiveCfg = SQLite Debug|Win32 - {FF2376E7-5637-46A4-AE1B-A083D7588552}.SQLite Debug|x86.Build.0 = SQLite Debug|Win32 - {3E64C06E-02CC-4183-92B2-6012CB975413}.Debug|Any CPU.ActiveCfg = Debug|Win32 - {3E64C06E-02CC-4183-92B2-6012CB975413}.Debug|Mixed Platforms.ActiveCfg = Debug|Win32 - {3E64C06E-02CC-4183-92B2-6012CB975413}.Debug|Mixed Platforms.Build.0 = Debug|Win32 - {3E64C06E-02CC-4183-92B2-6012CB975413}.Debug|Win32.ActiveCfg = Debug|Win32 - {3E64C06E-02CC-4183-92B2-6012CB975413}.Debug|Win32.Build.0 = Debug|Win32 - {3E64C06E-02CC-4183-92B2-6012CB975413}.Debug|x64.ActiveCfg = Debug|Win32 - {3E64C06E-02CC-4183-92B2-6012CB975413}.Debug|x86.ActiveCfg = Debug|Win32 - {3E64C06E-02CC-4183-92B2-6012CB975413}.Release|Any CPU.ActiveCfg = Release|Win32 - {3E64C06E-02CC-4183-92B2-6012CB975413}.Release|Any CPU.Build.0 = Release|Win32 - {3E64C06E-02CC-4183-92B2-6012CB975413}.Release|Mixed Platforms.ActiveCfg = Release|Win32 - {3E64C06E-02CC-4183-92B2-6012CB975413}.Release|Mixed Platforms.Build.0 = Release|Win32 - {3E64C06E-02CC-4183-92B2-6012CB975413}.Release|Win32.ActiveCfg = Release|Win32 - {3E64C06E-02CC-4183-92B2-6012CB975413}.Release|Win32.Build.0 = Release|Win32 - {3E64C06E-02CC-4183-92B2-6012CB975413}.Release|x64.ActiveCfg = Release|Win32 - {3E64C06E-02CC-4183-92B2-6012CB975413}.Release|x86.ActiveCfg = Release|Win32 - {3E64C06E-02CC-4183-92B2-6012CB975413}.SQLite Debug|Any CPU.ActiveCfg = SQLite Debug|Win32 - {3E64C06E-02CC-4183-92B2-6012CB975413}.SQLite Debug|Any CPU.Build.0 = SQLite Debug|Win32 - {3E64C06E-02CC-4183-92B2-6012CB975413}.SQLite Debug|Mixed Platforms.ActiveCfg = SQLite Debug|Win32 - {3E64C06E-02CC-4183-92B2-6012CB975413}.SQLite Debug|Mixed Platforms.Build.0 = SQLite Debug|Win32 - {3E64C06E-02CC-4183-92B2-6012CB975413}.SQLite Debug|Win32.ActiveCfg = SQLite Debug|Win32 - {3E64C06E-02CC-4183-92B2-6012CB975413}.SQLite Debug|Win32.Build.0 = SQLite Debug|Win32 - {3E64C06E-02CC-4183-92B2-6012CB975413}.SQLite Debug|x64.ActiveCfg = SQLite Debug|Win32 - {3E64C06E-02CC-4183-92B2-6012CB975413}.SQLite Debug|x86.ActiveCfg = SQLite Debug|Win32 - {3E64C06E-02CC-4183-92B2-6012CB975413}.SQLite Debug|x86.Build.0 = SQLite Debug|Win32 - {3F6DE0CB-51FB-4CB7-A0E1-D933D5886C5E}.Debug|Any CPU.ActiveCfg = Debug|Win32 - {3F6DE0CB-51FB-4CB7-A0E1-D933D5886C5E}.Debug|Mixed Platforms.ActiveCfg = Debug|Win32 - {3F6DE0CB-51FB-4CB7-A0E1-D933D5886C5E}.Debug|Win32.ActiveCfg = Debug|Win32 - {3F6DE0CB-51FB-4CB7-A0E1-D933D5886C5E}.Debug|x64.ActiveCfg = Debug|Win32 - {3F6DE0CB-51FB-4CB7-A0E1-D933D5886C5E}.Debug|x86.ActiveCfg = Debug|Win32 - {3F6DE0CB-51FB-4CB7-A0E1-D933D5886C5E}.Release|Any CPU.ActiveCfg = Release|Win32 - {3F6DE0CB-51FB-4CB7-A0E1-D933D5886C5E}.Release|Mixed Platforms.ActiveCfg = Release|Win32 - {3F6DE0CB-51FB-4CB7-A0E1-D933D5886C5E}.Release|Win32.ActiveCfg = Release|Win32 - {3F6DE0CB-51FB-4CB7-A0E1-D933D5886C5E}.Release|x64.ActiveCfg = Release|Win32 - {3F6DE0CB-51FB-4CB7-A0E1-D933D5886C5E}.Release|x86.ActiveCfg = Release|Win32 - {3F6DE0CB-51FB-4CB7-A0E1-D933D5886C5E}.SQLite Debug|Any CPU.ActiveCfg = SQLite Debug|Win32 - {3F6DE0CB-51FB-4CB7-A0E1-D933D5886C5E}.SQLite Debug|Mixed Platforms.ActiveCfg = SQLite Debug|Win32 - {3F6DE0CB-51FB-4CB7-A0E1-D933D5886C5E}.SQLite Debug|Win32.ActiveCfg = SQLite Debug|Win32 - {3F6DE0CB-51FB-4CB7-A0E1-D933D5886C5E}.SQLite Debug|x64.ActiveCfg = SQLite Debug|Win32 - {3F6DE0CB-51FB-4CB7-A0E1-D933D5886C5E}.SQLite Debug|x86.ActiveCfg = SQLite Debug|Win32 - {3F6DE0CB-51FB-4CB7-A0E1-D933D5886C5E}.SQLite Debug|x86.Build.0 = SQLite Debug|Win32 - {4F69203B-0EA6-4D21-B617-86CAF4B4DC85}.Debug|Any CPU.ActiveCfg = Debug|Win32 - {4F69203B-0EA6-4D21-B617-86CAF4B4DC85}.Debug|Mixed Platforms.ActiveCfg = Debug|Win32 - {4F69203B-0EA6-4D21-B617-86CAF4B4DC85}.Debug|Win32.ActiveCfg = Debug|Win32 - {4F69203B-0EA6-4D21-B617-86CAF4B4DC85}.Debug|Win32.Build.0 = Debug|Win32 - {4F69203B-0EA6-4D21-B617-86CAF4B4DC85}.Debug|x64.ActiveCfg = Debug|Win32 - {4F69203B-0EA6-4D21-B617-86CAF4B4DC85}.Debug|x86.ActiveCfg = Debug|Win32 - {4F69203B-0EA6-4D21-B617-86CAF4B4DC85}.Release|Any CPU.ActiveCfg = Release|Win32 - {4F69203B-0EA6-4D21-B617-86CAF4B4DC85}.Release|Mixed Platforms.ActiveCfg = Release|Win32 - {4F69203B-0EA6-4D21-B617-86CAF4B4DC85}.Release|Win32.ActiveCfg = Release|Win32 - {4F69203B-0EA6-4D21-B617-86CAF4B4DC85}.Release|Win32.Build.0 = Release|Win32 - {4F69203B-0EA6-4D21-B617-86CAF4B4DC85}.Release|x64.ActiveCfg = Release|Win32 - {4F69203B-0EA6-4D21-B617-86CAF4B4DC85}.Release|x86.ActiveCfg = Release|Win32 - {4F69203B-0EA6-4D21-B617-86CAF4B4DC85}.SQLite Debug|Any CPU.ActiveCfg = SQLite Debug|Win32 - {4F69203B-0EA6-4D21-B617-86CAF4B4DC85}.SQLite Debug|Mixed Platforms.ActiveCfg = SQLite Debug|Win32 - {4F69203B-0EA6-4D21-B617-86CAF4B4DC85}.SQLite Debug|Win32.ActiveCfg = SQLite Debug|Win32 - {4F69203B-0EA6-4D21-B617-86CAF4B4DC85}.SQLite Debug|x64.ActiveCfg = SQLite Debug|Win32 - {4F69203B-0EA6-4D21-B617-86CAF4B4DC85}.SQLite Debug|x86.ActiveCfg = SQLite Debug|Win32 - {4F69203B-0EA6-4D21-B617-86CAF4B4DC85}.SQLite Debug|x86.Build.0 = SQLite Debug|Win32 - EndGlobalSection - GlobalSection(SolutionProperties) = preSolution - HideSolutionNode = FALSE - EndGlobalSection -EndGlobal diff --git a/projects/VisualStudio/Jana/Jana.vcxproj b/projects/VisualStudio/Jana/Jana.vcxproj deleted file mode 100644 index 34c6a3b2..00000000 --- a/projects/VisualStudio/Jana/Jana.vcxproj +++ /dev/null @@ -1,133 +0,0 @@ - - - - - Debug - Win32 - - - Release - Win32 - - - SQLite Debug - Win32 - - - - {3F6DE0CB-51FB-4CB7-A0E1-D933D5886C5E} - v4.5 - ManagedCProj - Jana - - - - DynamicLibrary - true - true - Unicode - v120 - - - DynamicLibrary - true - true - Unicode - v120 - - - DynamicLibrary - false - true - Unicode - v120 - - - - - - - - - - - - - - - - true - D:\Projects\Share\gluex\JANA\include\JANA;D:\Projects\Share\gluex\JANA\include;$(IncludePath) - - - true - D:\Projects\Share\gluex\JANA\include\JANA;D:\Projects\Share\gluex\JANA\include;$(IncludePath) - - - false - - - - Level3 - Disabled - WIN32;CCDB_DEBUG_OUTPUT;_DEBUG;%(PreprocessorDefinitions) - Use - D:\Science\Jana\tag_0.6.5\src;$(CCDB_HOME)\include;%(AdditionalIncludeDirectories) - - - true - - - - - - - Level3 - Disabled - WIN32;CCDB_DEBUG_OUTPUT;_DEBUG;%(PreprocessorDefinitions) - Use - D:\Science\Jana\tag_0.6.5\src;$(CCDB_HOME)\include;%(AdditionalIncludeDirectories) - - - true - - - - - - - Level3 - WIN32;CCDB_DEBUG_OUTPUT;NDEBUG;%(PreprocessorDefinitions) - Use - - - true - - - - - - - - - - - - - - - - - - - - - - - {ff2376e7-5637-46a4-ae1b-a083d7588552} - - - - - - \ No newline at end of file diff --git a/projects/VisualStudio/Library/Library.vcxproj b/projects/VisualStudio/Library/Library.vcxproj deleted file mode 100644 index 5f6205a0..00000000 --- a/projects/VisualStudio/Library/Library.vcxproj +++ /dev/null @@ -1,220 +0,0 @@ - - - - - Debug - Win32 - - - Release - Win32 - - - SQLite Debug - Win32 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - true - false - - - - - - - - true - false - - - false - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - true - true - - - - - - - - true - true - true - - - false - - - - - - - - {FF2376E7-5637-46A4-AE1B-A083D7588552} - Win32Proj - CCDBLibrary - - - - StaticLibrary - true - Unicode - v120 - - - StaticLibrary - true - Unicode - v120 - - - StaticLibrary - false - true - Unicode - v120 - - - - - - - - - - - - - - - - true - - - - - - Level3 - Disabled - WIN32;SQLITE;CCDB_MYSQL;_DEBUG;_LIB;%(PreprocessorDefinitions) - $(MYSQL_CONNECTOR)\include;$(CCDB_HOME)/include;$(SolutionDir)/SQLite; - - - - - Windows - true - - - libmysql.lib;mysqlclient.lib;zlib.lib;pthreadVC2.lib;%(AdditionalDependencies) - - - $(MYSQL_CONNECTOR)/lib/opt;$(CCDB_HOME)\lib;$(SolutionDir);%(AdditionalLibraryDirectories) - - - - - - - Level3 - Disabled - WIN32;SQLITE;_DEBUG;_LIB;%(PreprocessorDefinitions) - $(MYSQL_CONNECTOR)\include;$(CCDB_HOME)/include;$(SolutionDir)/SQLite; - - - - - Windows - true - - - pthreadVC2.lib;%(AdditionalDependencies) - - - $(MYSQL_CONNECTOR)/lib/opt;$(CCDB_HOME)\lib;$(SolutionDir);%(AdditionalLibraryDirectories) - - - - - Level3 - - - MaxSpeed - true - true - WIN32;SQLITE;NDEBUG;_LIB;%(PreprocessorDefinitions) - $(CCDB_HOME)/include;$(SolutionDir)/SQLite;$(MYSQL_CONNECTOR)/include - CCDB_MYSQL - true - - - Windows - true - true - true - - - $(MYSQL_CONNECTOR)/lib/opt;$(CCDB_HOME)\lib;$(SolutionDir);%(AdditionalLibraryDirectories) - libmysql.lib;mysqlclient.lib;zlib.lib;pthreadVC2.lib;%(AdditionalDependencies) - - - - - - \ No newline at end of file diff --git a/projects/VisualStudio/Library/Library.vcxproj.filters b/projects/VisualStudio/Library/Library.vcxproj.filters deleted file mode 100644 index ef95c6a4..00000000 --- a/projects/VisualStudio/Library/Library.vcxproj.filters +++ /dev/null @@ -1,260 +0,0 @@ - - - - - {aa7ed321-f0e8-4ec8-852e-4e8372c2be7b} - - - {f8825fb0-69c9-4ed7-8410-1b05f5f30973} - - - {cfa14838-d96e-4126-a044-204863fe4ae3} - - - {01d71cdb-0585-46b1-8248-75566d717c26} - - - {98bdd288-e057-4051-9211-9d5f3251c135} - - - {bb35650e-a4ae-42c1-bab8-5f579f223c91} - - - {d70bc452-7101-4b19-9fc0-3c8b90b2c323} - - - {490e6452-e30c-4504-9739-d78fb7a9de6c} - - - {85448266-7249-4bf9-aa60-f6c10a6cf542} - - - {ce17f0e5-3201-41e6-8a73-2e7e1ab355cf} - - - {559b8d40-0e50-4276-b9a4-6375e258c891} - - - {fc21a92f-e260-47b7-afeb-0daa88f0ae47} - - - {005481d0-fe11-41c3-b2ff-8fe7d24fc76c} - - - {c5a49971-91dc-46ee-bc42-1b040dfe9832} - - - {84fd2c79-ecd2-47f2-9261-a8a2d926212f} - - - - - Model\Headers - - - Model\Headers - - - Model\Headers - - - Model\Headers - - - Model\Headers - - - Model\Headers - - - Model\Headers - - - UserApi\Headers - - - UserApi\Headers - - - UserApi\Headers - - - Providers\Headers - - - Providers\Headers - - - Providers\Headers - - - Providers\Headers - - - Model\Headers - - - Model\Headers - - - Helpers\Headers - - - Helpers\Headers - - - Helpers\Headers - - - Helpers\Headers - - - Globals\Headers - - - Globals\Headers - - - Globals\Headers - - - Globals\Headers - - - Globals\Headers - - - Helpers\Headers - - - Globals\Headers - - - Globals\Headers - - - Globals\Headers - - - Globals\Headers - - - Providers\Headers - - - Providers\Headers - - - Providers\Headers - - - - UserApi\Headers - - - Helpers\Headers - - - - - Helpers\Sources - - - Helpers\Sources - - - Model\Sources - - - Model\Sources - - - Model\Sources - - - Model\Sources - - - Model\Sources - - - Model\Sources - - - Model\Sources - - - Model\Sources - - - Model\Sources - - - Providers\Sources - - - Providers\Sources - - - Providers\Sources - - - Providers\Sources - - - UserApi\Sources - - - UserApi\Sources - - - Globals\Sources - - - Globals\Sources - - - Globals\Sources - - - Helpers\Sources - - - Globals\Sources - - - Globals\Sources - - - Globals\Sources - - - Globals\Sources - - - Globals\Sources - - - UserApi\Sources - - - Providers\Sources - - - Providers\Sources - - - Providers\Sources - - - - UserApi\Sources - - - Helpers\Sources - - - Helpers\Sources - - - \ No newline at end of file diff --git a/projects/VisualStudio/SQLite/shell.c b/projects/VisualStudio/SQLite/shell.c deleted file mode 100644 index 801ad2ca..00000000 --- a/projects/VisualStudio/SQLite/shell.c +++ /dev/null @@ -1,3137 +0,0 @@ -/* -** 2001 September 15 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This file contains code to implement the "sqlite" command line -** utility for accessing SQLite databases. -*/ -#if (defined(_WIN32) || defined(WIN32)) && !defined(_CRT_SECURE_NO_WARNINGS) -/* This needs to come before any includes for MSVC compiler */ -#define _CRT_SECURE_NO_WARNINGS -#endif - -/* -** Enable large-file support for fopen() and friends on unix. -*/ -#ifndef SQLITE_DISABLE_LFS -# define _LARGE_FILE 1 -# ifndef _FILE_OFFSET_BITS -# define _FILE_OFFSET_BITS 64 -# endif -# define _LARGEFILE_SOURCE 1 -#endif - -#include -#include -#include -#include -#include "sqlite3.h" -#include -#include - -#if !defined(_WIN32) && !defined(WIN32) && !defined(__OS2__) -# include -# if !defined(__RTP__) && !defined(_WRS_KERNEL) -# include -# endif -# include -# include -#endif - -#ifdef __OS2__ -# include -#endif - -#ifdef HAVE_EDITLINE -# include -#endif -#if defined(HAVE_READLINE) && HAVE_READLINE==1 -# include -# include -#endif -#if !defined(HAVE_EDITLINE) && (!defined(HAVE_READLINE) || HAVE_READLINE!=1) -# define readline(p) local_getline(p,stdin,0) -# define add_history(X) -# define read_history(X) -# define write_history(X) -# define stifle_history(X) -#endif - -#if defined(_WIN32) || defined(WIN32) -# include -#define isatty(h) _isatty(h) -#define access(f,m) _access((f),(m)) -#define popen(a,b) _popen((a),(b)) -#define pclose(x) _pclose(x) -#else -/* Make sure isatty() has a prototype. -*/ -extern int isatty(int); -#endif - -#if defined(_WIN32_WCE) -/* Windows CE (arm-wince-mingw32ce-gcc) does not provide isatty() - * thus we always assume that we have a console. That can be - * overridden with the -batch command line option. - */ -#define isatty(x) 1 -#endif - -/* True if the timer is enabled */ -static int enableTimer = 0; - -/* ctype macros that work with signed characters */ -#define IsSpace(X) isspace((unsigned char)X) -#define IsDigit(X) isdigit((unsigned char)X) -#define ToLower(X) (char)tolower((unsigned char)X) - -#if !defined(_WIN32) && !defined(WIN32) && !defined(__OS2__) && !defined(__RTP__) && !defined(_WRS_KERNEL) -#include -#include - -/* Saved resource information for the beginning of an operation */ -static struct rusage sBegin; - -/* -** Begin timing an operation -*/ -static void beginTimer(void){ - if( enableTimer ){ - getrusage(RUSAGE_SELF, &sBegin); - } -} - -/* Return the difference of two time_structs in seconds */ -static double timeDiff(struct timeval *pStart, struct timeval *pEnd){ - return (pEnd->tv_usec - pStart->tv_usec)*0.000001 + - (double)(pEnd->tv_sec - pStart->tv_sec); -} - -/* -** Print the timing results. -*/ -static void endTimer(void){ - if( enableTimer ){ - struct rusage sEnd; - getrusage(RUSAGE_SELF, &sEnd); - printf("CPU Time: user %f sys %f\n", - timeDiff(&sBegin.ru_utime, &sEnd.ru_utime), - timeDiff(&sBegin.ru_stime, &sEnd.ru_stime)); - } -} - -#define BEGIN_TIMER beginTimer() -#define END_TIMER endTimer() -#define HAS_TIMER 1 - -#elif (defined(_WIN32) || defined(WIN32)) - -#include - -/* Saved resource information for the beginning of an operation */ -static HANDLE hProcess; -static FILETIME ftKernelBegin; -static FILETIME ftUserBegin; -typedef BOOL (WINAPI *GETPROCTIMES)(HANDLE, LPFILETIME, LPFILETIME, LPFILETIME, LPFILETIME); -static GETPROCTIMES getProcessTimesAddr = NULL; - -/* -** Check to see if we have timer support. Return 1 if necessary -** support found (or found previously). -*/ -static int hasTimer(void){ - if( getProcessTimesAddr ){ - return 1; - } else { - /* GetProcessTimes() isn't supported in WIN95 and some other Windows versions. - ** See if the version we are running on has it, and if it does, save off - ** a pointer to it and the current process handle. - */ - hProcess = GetCurrentProcess(); - if( hProcess ){ - HINSTANCE hinstLib = LoadLibrary(TEXT("Kernel32.dll")); - if( NULL != hinstLib ){ - getProcessTimesAddr = (GETPROCTIMES) GetProcAddress(hinstLib, "GetProcessTimes"); - if( NULL != getProcessTimesAddr ){ - return 1; - } - FreeLibrary(hinstLib); - } - } - } - return 0; -} - -/* -** Begin timing an operation -*/ -static void beginTimer(void){ - if( enableTimer && getProcessTimesAddr ){ - FILETIME ftCreation, ftExit; - getProcessTimesAddr(hProcess, &ftCreation, &ftExit, &ftKernelBegin, &ftUserBegin); - } -} - -/* Return the difference of two FILETIME structs in seconds */ -static double timeDiff(FILETIME *pStart, FILETIME *pEnd){ - sqlite_int64 i64Start = *((sqlite_int64 *) pStart); - sqlite_int64 i64End = *((sqlite_int64 *) pEnd); - return (double) ((i64End - i64Start) / 10000000.0); -} - -/* -** Print the timing results. -*/ -static void endTimer(void){ - if( enableTimer && getProcessTimesAddr){ - FILETIME ftCreation, ftExit, ftKernelEnd, ftUserEnd; - getProcessTimesAddr(hProcess, &ftCreation, &ftExit, &ftKernelEnd, &ftUserEnd); - printf("CPU Time: user %f sys %f\n", - timeDiff(&ftUserBegin, &ftUserEnd), - timeDiff(&ftKernelBegin, &ftKernelEnd)); - } -} - -#define BEGIN_TIMER beginTimer() -#define END_TIMER endTimer() -#define HAS_TIMER hasTimer() - -#else -#define BEGIN_TIMER -#define END_TIMER -#define HAS_TIMER 0 -#endif - -/* -** Used to prevent warnings about unused parameters -*/ -#define UNUSED_PARAMETER(x) (void)(x) - -/* -** If the following flag is set, then command execution stops -** at an error if we are not interactive. -*/ -static int bail_on_error = 0; - -/* -** Threat stdin as an interactive input if the following variable -** is true. Otherwise, assume stdin is connected to a file or pipe. -*/ -static int stdin_is_interactive = 1; - -/* -** The following is the open SQLite database. We make a pointer -** to this database a static variable so that it can be accessed -** by the SIGINT handler to interrupt database processing. -*/ -static sqlite3 *db = 0; - -/* -** True if an interrupt (Control-C) has been received. -*/ -static volatile int seenInterrupt = 0; - -/* -** This is the name of our program. It is set in main(), used -** in a number of other places, mostly for error messages. -*/ -static char *Argv0; - -/* -** Prompt strings. Initialized in main. Settable with -** .prompt main continue -*/ -static char mainPrompt[20]; /* First line prompt. default: "sqlite> "*/ -static char continuePrompt[20]; /* Continuation prompt. default: " ...> " */ - -/* -** Write I/O traces to the following stream. -*/ -#ifdef SQLITE_ENABLE_IOTRACE -static FILE *iotrace = 0; -#endif - -/* -** This routine works like printf in that its first argument is a -** format string and subsequent arguments are values to be substituted -** in place of % fields. The result of formatting this string -** is written to iotrace. -*/ -#ifdef SQLITE_ENABLE_IOTRACE -static void iotracePrintf(const char *zFormat, ...){ - va_list ap; - char *z; - if( iotrace==0 ) return; - va_start(ap, zFormat); - z = sqlite3_vmprintf(zFormat, ap); - va_end(ap); - fprintf(iotrace, "%s", z); - sqlite3_free(z); -} -#endif - - -/* -** Determines if a string is a number of not. -*/ -static int isNumber(const char *z, int *realnum){ - if( *z=='-' || *z=='+' ) z++; - if( !IsDigit(*z) ){ - return 0; - } - z++; - if( realnum ) *realnum = 0; - while( IsDigit(*z) ){ z++; } - if( *z=='.' ){ - z++; - if( !IsDigit(*z) ) return 0; - while( IsDigit(*z) ){ z++; } - if( realnum ) *realnum = 1; - } - if( *z=='e' || *z=='E' ){ - z++; - if( *z=='+' || *z=='-' ) z++; - if( !IsDigit(*z) ) return 0; - while( IsDigit(*z) ){ z++; } - if( realnum ) *realnum = 1; - } - return *z==0; -} - -/* -** A global char* and an SQL function to access its current value -** from within an SQL statement. This program used to use the -** sqlite_exec_printf() API to substitue a string into an SQL statement. -** The correct way to do this with sqlite3 is to use the bind API, but -** since the shell is built around the callback paradigm it would be a lot -** of work. Instead just use this hack, which is quite harmless. -*/ -static const char *zShellStatic = 0; -static void shellstaticFunc( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - assert( 0==argc ); - assert( zShellStatic ); - UNUSED_PARAMETER(argc); - UNUSED_PARAMETER(argv); - sqlite3_result_text(context, zShellStatic, -1, SQLITE_STATIC); -} - - -/* -** This routine reads a line of text from FILE in, stores -** the text in memory obtained from malloc() and returns a pointer -** to the text. NULL is returned at end of file, or if malloc() -** fails. -** -** The interface is like "readline" but no command-line editing -** is done. -*/ -static char *local_getline(char *zPrompt, FILE *in, int csvFlag){ - char *zLine; - int nLine; - int n; - int inQuote = 0; - - if( zPrompt && *zPrompt ){ - printf("%s",zPrompt); - fflush(stdout); - } - nLine = 100; - zLine = malloc( nLine ); - if( zLine==0 ) return 0; - n = 0; - while( 1 ){ - if( n+100>nLine ){ - nLine = nLine*2 + 100; - zLine = realloc(zLine, nLine); - if( zLine==0 ) return 0; - } - if( fgets(&zLine[n], nLine - n, in)==0 ){ - if( n==0 ){ - free(zLine); - return 0; - } - zLine[n] = 0; - break; - } - while( zLine[n] ){ - if( zLine[n]=='"' ) inQuote = !inQuote; - n++; - } - if( n>0 && zLine[n-1]=='\n' && (!inQuote || !csvFlag) ){ - n--; - if( n>0 && zLine[n-1]=='\r' ) n--; - zLine[n] = 0; - break; - } - } - zLine = realloc( zLine, n+1 ); - return zLine; -} - -/* -** Retrieve a single line of input text. -** -** zPrior is a string of prior text retrieved. If not the empty -** string, then issue a continuation prompt. -*/ -static char *one_input_line(const char *zPrior, FILE *in){ - char *zPrompt; - char *zResult; - if( in!=0 ){ - return local_getline(0, in, 0); - } - if( zPrior && zPrior[0] ){ - zPrompt = continuePrompt; - }else{ - zPrompt = mainPrompt; - } - zResult = readline(zPrompt); -#if defined(HAVE_READLINE) && HAVE_READLINE==1 - if( zResult && *zResult ) add_history(zResult); -#endif - return zResult; -} - -struct previous_mode_data { - int valid; /* Is there legit data in here? */ - int mode; - int showHeader; - int colWidth[100]; -}; - -/* -** An pointer to an instance of this structure is passed from -** the main program to the callback. This is used to communicate -** state and mode information. -*/ -struct callback_data { - sqlite3 *db; /* The database */ - int echoOn; /* True to echo input commands */ - int statsOn; /* True to display memory stats before each finalize */ - int cnt; /* Number of records displayed so far */ - FILE *out; /* Write results here */ - FILE *traceOut; /* Output for sqlite3_trace() */ - int nErr; /* Number of errors seen */ - int mode; /* An output mode setting */ - int writableSchema; /* True if PRAGMA writable_schema=ON */ - int showHeader; /* True to show column names in List or Column mode */ - char *zDestTable; /* Name of destination table when MODE_Insert */ - char separator[20]; /* Separator character for MODE_List */ - int colWidth[100]; /* Requested width of each column when in column mode*/ - int actualWidth[100]; /* Actual width of each column */ - char nullvalue[20]; /* The text to print when a NULL comes back from - ** the database */ - struct previous_mode_data explainPrev; - /* Holds the mode information just before - ** .explain ON */ - char outfile[FILENAME_MAX]; /* Filename for *out */ - const char *zDbFilename; /* name of the database file */ - const char *zVfs; /* Name of VFS to use */ - sqlite3_stmt *pStmt; /* Current statement if any. */ - FILE *pLog; /* Write log output here */ -}; - -/* -** These are the allowed modes. -*/ -#define MODE_Line 0 /* One column per line. Blank line between records */ -#define MODE_Column 1 /* One record per line in neat columns */ -#define MODE_List 2 /* One record per line with a separator */ -#define MODE_Semi 3 /* Same as MODE_List but append ";" to each line */ -#define MODE_Html 4 /* Generate an XHTML table */ -#define MODE_Insert 5 /* Generate SQL "insert" statements */ -#define MODE_Tcl 6 /* Generate ANSI-C or TCL quoted elements */ -#define MODE_Csv 7 /* Quote strings, numbers are plain */ -#define MODE_Explain 8 /* Like MODE_Column, but do not truncate data */ - -static const char *modeDescr[] = { - "line", - "column", - "list", - "semi", - "html", - "insert", - "tcl", - "csv", - "explain", -}; - -/* -** Number of elements in an array -*/ -#define ArraySize(X) (int)(sizeof(X)/sizeof(X[0])) - -/* -** Compute a string length that is limited to what can be stored in -** lower 30 bits of a 32-bit signed integer. -*/ -static int strlen30(const char *z){ - const char *z2 = z; - while( *z2 ){ z2++; } - return 0x3fffffff & (int)(z2 - z); -} - -/* -** A callback for the sqlite3_log() interface. -*/ -static void shellLog(void *pArg, int iErrCode, const char *zMsg){ - struct callback_data *p = (struct callback_data*)pArg; - if( p->pLog==0 ) return; - fprintf(p->pLog, "(%d) %s\n", iErrCode, zMsg); - fflush(p->pLog); -} - -/* -** Output the given string as a hex-encoded blob (eg. X'1234' ) -*/ -static void output_hex_blob(FILE *out, const void *pBlob, int nBlob){ - int i; - char *zBlob = (char *)pBlob; - fprintf(out,"X'"); - for(i=0; i0 ){ - fprintf(out,"%.*s",i,z); - } - if( z[i]=='<' ){ - fprintf(out,"<"); - }else if( z[i]=='&' ){ - fprintf(out,"&"); - }else if( z[i]=='>' ){ - fprintf(out,">"); - }else if( z[i]=='\"' ){ - fprintf(out,"""); - }else if( z[i]=='\'' ){ - fprintf(out,"'"); - }else{ - break; - } - z += i + 1; - } -} - -/* -** If a field contains any character identified by a 1 in the following -** array, then the string must be quoted for CSV. -*/ -static const char needCsvQuote[] = { - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -}; - -/* -** Output a single term of CSV. Actually, p->separator is used for -** the separator, which may or may not be a comma. p->nullvalue is -** the null value. Strings are quoted if necessary. -*/ -static void output_csv(struct callback_data *p, const char *z, int bSep){ - FILE *out = p->out; - if( z==0 ){ - fprintf(out,"%s",p->nullvalue); - }else{ - int i; - int nSep = strlen30(p->separator); - for(i=0; z[i]; i++){ - if( needCsvQuote[((unsigned char*)z)[i]] - || (z[i]==p->separator[0] && - (nSep==1 || memcmp(z, p->separator, nSep)==0)) ){ - i = 0; - break; - } - } - if( i==0 ){ - putc('"', out); - for(i=0; z[i]; i++){ - if( z[i]=='"' ) putc('"', out); - putc(z[i], out); - } - putc('"', out); - }else{ - fprintf(out, "%s", z); - } - } - if( bSep ){ - fprintf(p->out, "%s", p->separator); - } -} - -#ifdef SIGINT -/* -** This routine runs when the user presses Ctrl-C -*/ -static void interrupt_handler(int NotUsed){ - UNUSED_PARAMETER(NotUsed); - seenInterrupt = 1; - if( db ) sqlite3_interrupt(db); -} -#endif - -/* -** This is the callback routine that the shell -** invokes for each row of a query result. -*/ -static int shell_callback(void *pArg, int nArg, char **azArg, char **azCol, int *aiType){ - int i; - struct callback_data *p = (struct callback_data*)pArg; - - switch( p->mode ){ - case MODE_Line: { - int w = 5; - if( azArg==0 ) break; - for(i=0; iw ) w = len; - } - if( p->cnt++>0 ) fprintf(p->out,"\n"); - for(i=0; iout,"%*s = %s\n", w, azCol[i], - azArg[i] ? azArg[i] : p->nullvalue); - } - break; - } - case MODE_Explain: - case MODE_Column: { - if( p->cnt++==0 ){ - for(i=0; icolWidth) ){ - w = p->colWidth[i]; - }else{ - w = 0; - } - if( w<=0 ){ - w = strlen30(azCol[i] ? azCol[i] : ""); - if( w<10 ) w = 10; - n = strlen30(azArg && azArg[i] ? azArg[i] : p->nullvalue); - if( wactualWidth) ){ - p->actualWidth[i] = w; - } - if( p->showHeader ){ - fprintf(p->out,"%-*.*s%s",w,w,azCol[i], i==nArg-1 ? "\n": " "); - } - } - if( p->showHeader ){ - for(i=0; iactualWidth) ){ - w = p->actualWidth[i]; - }else{ - w = 10; - } - fprintf(p->out,"%-*.*s%s",w,w,"-----------------------------------" - "----------------------------------------------------------", - i==nArg-1 ? "\n": " "); - } - } - } - if( azArg==0 ) break; - for(i=0; iactualWidth) ){ - w = p->actualWidth[i]; - }else{ - w = 10; - } - if( p->mode==MODE_Explain && azArg[i] && - strlen30(azArg[i])>w ){ - w = strlen30(azArg[i]); - } - fprintf(p->out,"%-*.*s%s",w,w, - azArg[i] ? azArg[i] : p->nullvalue, i==nArg-1 ? "\n": " "); - } - break; - } - case MODE_Semi: - case MODE_List: { - if( p->cnt++==0 && p->showHeader ){ - for(i=0; iout,"%s%s",azCol[i], i==nArg-1 ? "\n" : p->separator); - } - } - if( azArg==0 ) break; - for(i=0; inullvalue; - fprintf(p->out, "%s", z); - if( iout, "%s", p->separator); - }else if( p->mode==MODE_Semi ){ - fprintf(p->out, ";\n"); - }else{ - fprintf(p->out, "\n"); - } - } - break; - } - case MODE_Html: { - if( p->cnt++==0 && p->showHeader ){ - fprintf(p->out,"

"); - for(i=0; iout,"\n"); - } - fprintf(p->out,"\n"); - } - if( azArg==0 ) break; - fprintf(p->out,""); - for(i=0; iout,"\n"); - } - fprintf(p->out,"\n"); - break; - } - case MODE_Tcl: { - if( p->cnt++==0 && p->showHeader ){ - for(i=0; iout,azCol[i] ? azCol[i] : ""); - fprintf(p->out, "%s", p->separator); - } - fprintf(p->out,"\n"); - } - if( azArg==0 ) break; - for(i=0; iout, azArg[i] ? azArg[i] : p->nullvalue); - fprintf(p->out, "%s", p->separator); - } - fprintf(p->out,"\n"); - break; - } - case MODE_Csv: { - if( p->cnt++==0 && p->showHeader ){ - for(i=0; iout,"\n"); - } - if( azArg==0 ) break; - for(i=0; iout,"\n"); - break; - } - case MODE_Insert: { - p->cnt++; - if( azArg==0 ) break; - fprintf(p->out,"INSERT INTO %s VALUES(",p->zDestTable); - for(i=0; i0 ? ",": ""; - if( (azArg[i]==0) || (aiType && aiType[i]==SQLITE_NULL) ){ - fprintf(p->out,"%sNULL",zSep); - }else if( aiType && aiType[i]==SQLITE_TEXT ){ - if( zSep[0] ) fprintf(p->out,"%s",zSep); - output_quoted_string(p->out, azArg[i]); - }else if( aiType && (aiType[i]==SQLITE_INTEGER || aiType[i]==SQLITE_FLOAT) ){ - fprintf(p->out,"%s%s",zSep, azArg[i]); - }else if( aiType && aiType[i]==SQLITE_BLOB && p->pStmt ){ - const void *pBlob = sqlite3_column_blob(p->pStmt, i); - int nBlob = sqlite3_column_bytes(p->pStmt, i); - if( zSep[0] ) fprintf(p->out,"%s",zSep); - output_hex_blob(p->out, pBlob, nBlob); - }else if( isNumber(azArg[i], 0) ){ - fprintf(p->out,"%s%s",zSep, azArg[i]); - }else{ - if( zSep[0] ) fprintf(p->out,"%s",zSep); - output_quoted_string(p->out, azArg[i]); - } - } - fprintf(p->out,");\n"); - break; - } - } - return 0; -} - -/* -** This is the callback routine that the SQLite library -** invokes for each row of a query result. -*/ -static int callback(void *pArg, int nArg, char **azArg, char **azCol){ - /* since we don't have type info, call the shell_callback with a NULL value */ - return shell_callback(pArg, nArg, azArg, azCol, NULL); -} - -/* -** Set the destination table field of the callback_data structure to -** the name of the table given. Escape any quote characters in the -** table name. -*/ -static void set_table_name(struct callback_data *p, const char *zName){ - int i, n; - int needQuote; - char *z; - - if( p->zDestTable ){ - free(p->zDestTable); - p->zDestTable = 0; - } - if( zName==0 ) return; - needQuote = !isalpha((unsigned char)*zName) && *zName!='_'; - for(i=n=0; zName[i]; i++, n++){ - if( !isalnum((unsigned char)zName[i]) && zName[i]!='_' ){ - needQuote = 1; - if( zName[i]=='\'' ) n++; - } - } - if( needQuote ) n += 2; - z = p->zDestTable = malloc( n+1 ); - if( z==0 ){ - fprintf(stderr,"Error: out of memory\n"); - exit(1); - } - n = 0; - if( needQuote ) z[n++] = '\''; - for(i=0; zName[i]; i++){ - z[n++] = zName[i]; - if( zName[i]=='\'' ) z[n++] = '\''; - } - if( needQuote ) z[n++] = '\''; - z[n] = 0; -} - -/* zIn is either a pointer to a NULL-terminated string in memory obtained -** from malloc(), or a NULL pointer. The string pointed to by zAppend is -** added to zIn, and the result returned in memory obtained from malloc(). -** zIn, if it was not NULL, is freed. -** -** If the third argument, quote, is not '\0', then it is used as a -** quote character for zAppend. -*/ -static char *appendText(char *zIn, char const *zAppend, char quote){ - int len; - int i; - int nAppend = strlen30(zAppend); - int nIn = (zIn?strlen30(zIn):0); - - len = nAppend+nIn+1; - if( quote ){ - len += 2; - for(i=0; idb, zSelect, -1, &pSelect, 0); - if( rc!=SQLITE_OK || !pSelect ){ - fprintf(p->out, "/**** ERROR: (%d) %s *****/\n", rc, sqlite3_errmsg(p->db)); - p->nErr++; - return rc; - } - rc = sqlite3_step(pSelect); - nResult = sqlite3_column_count(pSelect); - while( rc==SQLITE_ROW ){ - if( zFirstRow ){ - fprintf(p->out, "%s", zFirstRow); - zFirstRow = 0; - } - z = (const char*)sqlite3_column_text(pSelect, 0); - fprintf(p->out, "%s", z); - for(i=1; iout, ",%s", sqlite3_column_text(pSelect, i)); - } - if( z==0 ) z = ""; - while( z[0] && (z[0]!='-' || z[1]!='-') ) z++; - if( z[0] ){ - fprintf(p->out, "\n;\n"); - }else{ - fprintf(p->out, ";\n"); - } - rc = sqlite3_step(pSelect); - } - rc = sqlite3_finalize(pSelect); - if( rc!=SQLITE_OK ){ - fprintf(p->out, "/**** ERROR: (%d) %s *****/\n", rc, sqlite3_errmsg(p->db)); - p->nErr++; - } - return rc; -} - -/* -** Allocate space and save off current error string. -*/ -static char *save_err_msg( - sqlite3 *db /* Database to query */ -){ - int nErrMsg = 1+strlen30(sqlite3_errmsg(db)); - char *zErrMsg = sqlite3_malloc(nErrMsg); - if( zErrMsg ){ - memcpy(zErrMsg, sqlite3_errmsg(db), nErrMsg); - } - return zErrMsg; -} - -/* -** Display memory stats. -*/ -static int display_stats( - sqlite3 *db, /* Database to query */ - struct callback_data *pArg, /* Pointer to struct callback_data */ - int bReset /* True to reset the stats */ -){ - int iCur; - int iHiwtr; - - if( pArg && pArg->out ){ - - iHiwtr = iCur = -1; - sqlite3_status(SQLITE_STATUS_MEMORY_USED, &iCur, &iHiwtr, bReset); - fprintf(pArg->out, "Memory Used: %d (max %d) bytes\n", iCur, iHiwtr); - iHiwtr = iCur = -1; - sqlite3_status(SQLITE_STATUS_MALLOC_COUNT, &iCur, &iHiwtr, bReset); - fprintf(pArg->out, "Number of Outstanding Allocations: %d (max %d)\n", iCur, iHiwtr); -/* -** Not currently used by the CLI. -** iHiwtr = iCur = -1; -** sqlite3_status(SQLITE_STATUS_PAGECACHE_USED, &iCur, &iHiwtr, bReset); -** fprintf(pArg->out, "Number of Pcache Pages Used: %d (max %d) pages\n", iCur, iHiwtr); -*/ - iHiwtr = iCur = -1; - sqlite3_status(SQLITE_STATUS_PAGECACHE_OVERFLOW, &iCur, &iHiwtr, bReset); - fprintf(pArg->out, "Number of Pcache Overflow Bytes: %d (max %d) bytes\n", iCur, iHiwtr); -/* -** Not currently used by the CLI. -** iHiwtr = iCur = -1; -** sqlite3_status(SQLITE_STATUS_SCRATCH_USED, &iCur, &iHiwtr, bReset); -** fprintf(pArg->out, "Number of Scratch Allocations Used: %d (max %d)\n", iCur, iHiwtr); -*/ - iHiwtr = iCur = -1; - sqlite3_status(SQLITE_STATUS_SCRATCH_OVERFLOW, &iCur, &iHiwtr, bReset); - fprintf(pArg->out, "Number of Scratch Overflow Bytes: %d (max %d) bytes\n", iCur, iHiwtr); - iHiwtr = iCur = -1; - sqlite3_status(SQLITE_STATUS_MALLOC_SIZE, &iCur, &iHiwtr, bReset); - fprintf(pArg->out, "Largest Allocation: %d bytes\n", iHiwtr); - iHiwtr = iCur = -1; - sqlite3_status(SQLITE_STATUS_PAGECACHE_SIZE, &iCur, &iHiwtr, bReset); - fprintf(pArg->out, "Largest Pcache Allocation: %d bytes\n", iHiwtr); - iHiwtr = iCur = -1; - sqlite3_status(SQLITE_STATUS_SCRATCH_SIZE, &iCur, &iHiwtr, bReset); - fprintf(pArg->out, "Largest Scratch Allocation: %d bytes\n", iHiwtr); -#ifdef YYTRACKMAXSTACKDEPTH - iHiwtr = iCur = -1; - sqlite3_status(SQLITE_STATUS_PARSER_STACK, &iCur, &iHiwtr, bReset); - fprintf(pArg->out, "Deepest Parser Stack: %d (max %d)\n", iCur, iHiwtr); -#endif - } - - if( pArg && pArg->out && db ){ - iHiwtr = iCur = -1; - sqlite3_db_status(db, SQLITE_DBSTATUS_LOOKASIDE_USED, &iCur, &iHiwtr, bReset); - fprintf(pArg->out, "Lookaside Slots Used: %d (max %d)\n", iCur, iHiwtr); - sqlite3_db_status(db, SQLITE_DBSTATUS_LOOKASIDE_HIT, &iCur, &iHiwtr, bReset); - fprintf(pArg->out, "Successful lookaside attempts: %d\n", iHiwtr); - sqlite3_db_status(db, SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE, &iCur, &iHiwtr, bReset); - fprintf(pArg->out, "Lookaside failures due to size: %d\n", iHiwtr); - sqlite3_db_status(db, SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL, &iCur, &iHiwtr, bReset); - fprintf(pArg->out, "Lookaside failures due to OOM: %d\n", iHiwtr); - iHiwtr = iCur = -1; - sqlite3_db_status(db, SQLITE_DBSTATUS_CACHE_USED, &iCur, &iHiwtr, bReset); - fprintf(pArg->out, "Pager Heap Usage: %d bytes\n", iCur); iHiwtr = iCur = -1; - sqlite3_db_status(db, SQLITE_DBSTATUS_CACHE_HIT, &iCur, &iHiwtr, 1); - fprintf(pArg->out, "Page cache hits: %d\n", iCur); - iHiwtr = iCur = -1; - sqlite3_db_status(db, SQLITE_DBSTATUS_CACHE_MISS, &iCur, &iHiwtr, 1); - fprintf(pArg->out, "Page cache misses: %d\n", iCur); - iHiwtr = iCur = -1; - sqlite3_db_status(db, SQLITE_DBSTATUS_CACHE_WRITE, &iCur, &iHiwtr, 1); - fprintf(pArg->out, "Page cache writes: %d\n", iCur); - iHiwtr = iCur = -1; - sqlite3_db_status(db, SQLITE_DBSTATUS_SCHEMA_USED, &iCur, &iHiwtr, bReset); - fprintf(pArg->out, "Schema Heap Usage: %d bytes\n", iCur); - iHiwtr = iCur = -1; - sqlite3_db_status(db, SQLITE_DBSTATUS_STMT_USED, &iCur, &iHiwtr, bReset); - fprintf(pArg->out, "Statement Heap/Lookaside Usage: %d bytes\n", iCur); - } - - if( pArg && pArg->out && db && pArg->pStmt ){ - iCur = sqlite3_stmt_status(pArg->pStmt, SQLITE_STMTSTATUS_FULLSCAN_STEP, bReset); - fprintf(pArg->out, "Fullscan Steps: %d\n", iCur); - iCur = sqlite3_stmt_status(pArg->pStmt, SQLITE_STMTSTATUS_SORT, bReset); - fprintf(pArg->out, "Sort Operations: %d\n", iCur); - iCur = sqlite3_stmt_status(pArg->pStmt, SQLITE_STMTSTATUS_AUTOINDEX, bReset); - fprintf(pArg->out, "Autoindex Inserts: %d\n", iCur); - } - - return 0; -} - -/* -** Execute a statement or set of statements. Print -** any result rows/columns depending on the current mode -** set via the supplied callback. -** -** This is very similar to SQLite's built-in sqlite3_exec() -** function except it takes a slightly different callback -** and callback data argument. -*/ -static int shell_exec( - sqlite3 *db, /* An open database */ - const char *zSql, /* SQL to be evaluated */ - int (*xCallback)(void*,int,char**,char**,int*), /* Callback function */ - /* (not the same as sqlite3_exec) */ - struct callback_data *pArg, /* Pointer to struct callback_data */ - char **pzErrMsg /* Error msg written here */ -){ - sqlite3_stmt *pStmt = NULL; /* Statement to execute. */ - int rc = SQLITE_OK; /* Return Code */ - int rc2; - const char *zLeftover; /* Tail of unprocessed SQL */ - - if( pzErrMsg ){ - *pzErrMsg = NULL; - } - - while( zSql[0] && (SQLITE_OK == rc) ){ - rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, &zLeftover); - if( SQLITE_OK != rc ){ - if( pzErrMsg ){ - *pzErrMsg = save_err_msg(db); - } - }else{ - if( !pStmt ){ - /* this happens for a comment or white-space */ - zSql = zLeftover; - while( IsSpace(zSql[0]) ) zSql++; - continue; - } - - /* save off the prepared statment handle and reset row count */ - if( pArg ){ - pArg->pStmt = pStmt; - pArg->cnt = 0; - } - - /* echo the sql statement if echo on */ - if( pArg && pArg->echoOn ){ - const char *zStmtSql = sqlite3_sql(pStmt); - fprintf(pArg->out, "%s\n", zStmtSql ? zStmtSql : zSql); - } - - /* Output TESTCTRL_EXPLAIN text of requested */ - if( pArg && pArg->mode==MODE_Explain ){ - const char *zExplain = 0; - sqlite3_test_control(SQLITE_TESTCTRL_EXPLAIN_STMT, pStmt, &zExplain); - if( zExplain && zExplain[0] ){ - fprintf(pArg->out, "%s", zExplain); - } - } - - /* perform the first step. this will tell us if we - ** have a result set or not and how wide it is. - */ - rc = sqlite3_step(pStmt); - /* if we have a result set... */ - if( SQLITE_ROW == rc ){ - /* if we have a callback... */ - if( xCallback ){ - /* allocate space for col name ptr, value ptr, and type */ - int nCol = sqlite3_column_count(pStmt); - void *pData = sqlite3_malloc(3*nCol*sizeof(const char*) + 1); - if( !pData ){ - rc = SQLITE_NOMEM; - }else{ - char **azCols = (char **)pData; /* Names of result columns */ - char **azVals = &azCols[nCol]; /* Results */ - int *aiTypes = (int *)&azVals[nCol]; /* Result types */ - int i; - assert(sizeof(int) <= sizeof(char *)); - /* save off ptrs to column names */ - for(i=0; istatsOn ){ - display_stats(db, pArg, 0); - } - - /* Finalize the statement just executed. If this fails, save a - ** copy of the error message. Otherwise, set zSql to point to the - ** next statement to execute. */ - rc2 = sqlite3_finalize(pStmt); - if( rc!=SQLITE_NOMEM ) rc = rc2; - if( rc==SQLITE_OK ){ - zSql = zLeftover; - while( IsSpace(zSql[0]) ) zSql++; - }else if( pzErrMsg ){ - *pzErrMsg = save_err_msg(db); - } - - /* clear saved stmt handle */ - if( pArg ){ - pArg->pStmt = NULL; - } - } - } /* end while */ - - return rc; -} - - -/* -** This is a different callback routine used for dumping the database. -** Each row received by this callback consists of a table name, -** the table type ("index" or "table") and SQL to create the table. -** This routine should print text sufficient to recreate the table. -*/ -static int dump_callback(void *pArg, int nArg, char **azArg, char **azCol){ - int rc; - const char *zTable; - const char *zType; - const char *zSql; - const char *zPrepStmt = 0; - struct callback_data *p = (struct callback_data *)pArg; - - UNUSED_PARAMETER(azCol); - if( nArg!=3 ) return 1; - zTable = azArg[0]; - zType = azArg[1]; - zSql = azArg[2]; - - if( strcmp(zTable, "sqlite_sequence")==0 ){ - zPrepStmt = "DELETE FROM sqlite_sequence;\n"; - }else if( strcmp(zTable, "sqlite_stat1")==0 ){ - fprintf(p->out, "ANALYZE sqlite_master;\n"); - }else if( strncmp(zTable, "sqlite_", 7)==0 ){ - return 0; - }else if( strncmp(zSql, "CREATE VIRTUAL TABLE", 20)==0 ){ - char *zIns; - if( !p->writableSchema ){ - fprintf(p->out, "PRAGMA writable_schema=ON;\n"); - p->writableSchema = 1; - } - zIns = sqlite3_mprintf( - "INSERT INTO sqlite_master(type,name,tbl_name,rootpage,sql)" - "VALUES('table','%q','%q',0,'%q');", - zTable, zTable, zSql); - fprintf(p->out, "%s\n", zIns); - sqlite3_free(zIns); - return 0; - }else{ - fprintf(p->out, "%s;\n", zSql); - } - - if( strcmp(zType, "table")==0 ){ - sqlite3_stmt *pTableInfo = 0; - char *zSelect = 0; - char *zTableInfo = 0; - char *zTmp = 0; - int nRow = 0; - - zTableInfo = appendText(zTableInfo, "PRAGMA table_info(", 0); - zTableInfo = appendText(zTableInfo, zTable, '"'); - zTableInfo = appendText(zTableInfo, ");", 0); - - rc = sqlite3_prepare(p->db, zTableInfo, -1, &pTableInfo, 0); - free(zTableInfo); - if( rc!=SQLITE_OK || !pTableInfo ){ - return 1; - } - - zSelect = appendText(zSelect, "SELECT 'INSERT INTO ' || ", 0); - /* Always quote the table name, even if it appears to be pure ascii, - ** in case it is a keyword. Ex: INSERT INTO "table" ... */ - zTmp = appendText(zTmp, zTable, '"'); - if( zTmp ){ - zSelect = appendText(zSelect, zTmp, '\''); - free(zTmp); - } - zSelect = appendText(zSelect, " || ' VALUES(' || ", 0); - rc = sqlite3_step(pTableInfo); - while( rc==SQLITE_ROW ){ - const char *zText = (const char *)sqlite3_column_text(pTableInfo, 1); - zSelect = appendText(zSelect, "quote(", 0); - zSelect = appendText(zSelect, zText, '"'); - rc = sqlite3_step(pTableInfo); - if( rc==SQLITE_ROW ){ - zSelect = appendText(zSelect, "), ", 0); - }else{ - zSelect = appendText(zSelect, ") ", 0); - } - nRow++; - } - rc = sqlite3_finalize(pTableInfo); - if( rc!=SQLITE_OK || nRow==0 ){ - free(zSelect); - return 1; - } - zSelect = appendText(zSelect, "|| ')' FROM ", 0); - zSelect = appendText(zSelect, zTable, '"'); - - rc = run_table_dump_query(p, zSelect, zPrepStmt); - if( rc==SQLITE_CORRUPT ){ - zSelect = appendText(zSelect, " ORDER BY rowid DESC", 0); - run_table_dump_query(p, zSelect, 0); - } - free(zSelect); - } - return 0; -} - -/* -** Run zQuery. Use dump_callback() as the callback routine so that -** the contents of the query are output as SQL statements. -** -** If we get a SQLITE_CORRUPT error, rerun the query after appending -** "ORDER BY rowid DESC" to the end. -*/ -static int run_schema_dump_query( - struct callback_data *p, - const char *zQuery -){ - int rc; - char *zErr = 0; - rc = sqlite3_exec(p->db, zQuery, dump_callback, p, &zErr); - if( rc==SQLITE_CORRUPT ){ - char *zQ2; - int len = strlen30(zQuery); - fprintf(p->out, "/****** CORRUPTION ERROR *******/\n"); - if( zErr ){ - fprintf(p->out, "/****** %s ******/\n", zErr); - sqlite3_free(zErr); - zErr = 0; - } - zQ2 = malloc( len+100 ); - if( zQ2==0 ) return rc; - sqlite3_snprintf(len+100, zQ2, "%s ORDER BY rowid DESC", zQuery); - rc = sqlite3_exec(p->db, zQ2, dump_callback, p, &zErr); - if( rc ){ - fprintf(p->out, "/****** ERROR: %s ******/\n", zErr); - }else{ - rc = SQLITE_CORRUPT; - } - sqlite3_free(zErr); - free(zQ2); - } - return rc; -} - -/* -** Text of a help message -*/ -static char zHelp[] = - ".backup ?DB? FILE Backup DB (default \"main\") to FILE\n" - ".bail ON|OFF Stop after hitting an error. Default OFF\n" - ".databases List names and files of attached databases\n" - ".dump ?TABLE? ... Dump the database in an SQL text format\n" - " If TABLE specified, only dump tables matching\n" - " LIKE pattern TABLE.\n" - ".echo ON|OFF Turn command echo on or off\n" - ".exit Exit this program\n" - ".explain ?ON|OFF? Turn output mode suitable for EXPLAIN on or off.\n" - " With no args, it turns EXPLAIN on.\n" - ".header(s) ON|OFF Turn display of headers on or off\n" - ".help Show this message\n" - ".import FILE TABLE Import data from FILE into TABLE\n" - ".indices ?TABLE? Show names of all indices\n" - " If TABLE specified, only show indices for tables\n" - " matching LIKE pattern TABLE.\n" -#ifdef SQLITE_ENABLE_IOTRACE - ".iotrace FILE Enable I/O diagnostic logging to FILE\n" -#endif -#ifndef SQLITE_OMIT_LOAD_EXTENSION - ".load FILE ?ENTRY? Load an extension library\n" -#endif - ".log FILE|off Turn logging on or off. FILE can be stderr/stdout\n" - ".mode MODE ?TABLE? Set output mode where MODE is one of:\n" - " csv Comma-separated values\n" - " column Left-aligned columns. (See .width)\n" - " html HTML
"); - output_html_string(p->out, azCol[i]); - fprintf(p->out,"
"); - output_html_string(p->out, azArg[i] ? azArg[i] : p->nullvalue); - fprintf(p->out,"
code\n" - " insert SQL insert statements for TABLE\n" - " line One value per line\n" - " list Values delimited by .separator string\n" - " tabs Tab-separated values\n" - " tcl TCL list elements\n" - ".nullvalue STRING Print STRING in place of NULL values\n" - ".output FILENAME Send output to FILENAME\n" - ".output stdout Send output to the screen\n" - ".prompt MAIN CONTINUE Replace the standard prompts\n" - ".quit Exit this program\n" - ".read FILENAME Execute SQL in FILENAME\n" - ".restore ?DB? FILE Restore content of DB (default \"main\") from FILE\n" - ".schema ?TABLE? Show the CREATE statements\n" - " If TABLE specified, only show tables matching\n" - " LIKE pattern TABLE.\n" - ".separator STRING Change separator used by output mode and .import\n" - ".show Show the current values for various settings\n" - ".stats ON|OFF Turn stats on or off\n" - ".tables ?TABLE? List names of tables\n" - " If TABLE specified, only list tables matching\n" - " LIKE pattern TABLE.\n" - ".timeout MS Try opening locked tables for MS milliseconds\n" - ".trace FILE|off Output each SQL statement as it is run\n" - ".vfsname ?AUX? Print the name of the VFS stack\n" - ".width NUM1 NUM2 ... Set column widths for \"column\" mode\n" -; - -static char zTimerHelp[] = - ".timer ON|OFF Turn the CPU timer measurement on or off\n" -; - -/* Forward reference */ -static int process_input(struct callback_data *p, FILE *in); - -/* -** Make sure the database is open. If it is not, then open it. If -** the database fails to open, print an error message and exit. -*/ -static void open_db(struct callback_data *p){ - if( p->db==0 ){ - sqlite3_open(p->zDbFilename, &p->db); - db = p->db; - if( db && sqlite3_errcode(db)==SQLITE_OK ){ - sqlite3_create_function(db, "shellstatic", 0, SQLITE_UTF8, 0, - shellstaticFunc, 0, 0); - } - if( db==0 || SQLITE_OK!=sqlite3_errcode(db) ){ - fprintf(stderr,"Error: unable to open database \"%s\": %s\n", - p->zDbFilename, sqlite3_errmsg(db)); - exit(1); - } -#ifndef SQLITE_OMIT_LOAD_EXTENSION - sqlite3_enable_load_extension(p->db, 1); -#endif - } -} - -/* -** Do C-language style dequoting. -** -** \t -> tab -** \n -> newline -** \r -> carriage return -** \NNN -> ascii character NNN in octal -** \\ -> backslash -*/ -static void resolve_backslashes(char *z){ - int i, j; - char c; - for(i=j=0; (c = z[i])!=0; i++, j++){ - if( c=='\\' ){ - c = z[++i]; - if( c=='n' ){ - c = '\n'; - }else if( c=='t' ){ - c = '\t'; - }else if( c=='r' ){ - c = '\r'; - }else if( c>='0' && c<='7' ){ - c -= '0'; - if( z[i+1]>='0' && z[i+1]<='7' ){ - i++; - c = (c<<3) + z[i] - '0'; - if( z[i+1]>='0' && z[i+1]<='7' ){ - i++; - c = (c<<3) + z[i] - '0'; - } - } - } - } - z[j] = c; - } - z[j] = 0; -} - -/* -** Interpret zArg as a boolean value. Return either 0 or 1. -*/ -static int booleanValue(char *zArg){ - int val = atoi(zArg); - int j; - for(j=0; zArg[j]; j++){ - zArg[j] = ToLower(zArg[j]); - } - if( strcmp(zArg,"on")==0 ){ - val = 1; - }else if( strcmp(zArg,"yes")==0 ){ - val = 1; - } - return val; -} - -/* -** Close an output file, assuming it is not stderr or stdout -*/ -static void output_file_close(FILE *f){ - if( f && f!=stdout && f!=stderr ) fclose(f); -} - -/* -** Try to open an output file. The names "stdout" and "stderr" are -** recognized and do the right thing. NULL is returned if the output -** filename is "off". -*/ -static FILE *output_file_open(const char *zFile){ - FILE *f; - if( strcmp(zFile,"stdout")==0 ){ - f = stdout; - }else if( strcmp(zFile, "stderr")==0 ){ - f = stderr; - }else if( strcmp(zFile, "off")==0 ){ - f = 0; - }else{ - f = fopen(zFile, "wb"); - if( f==0 ){ - fprintf(stderr, "Error: cannot open \"%s\"\n", zFile); - } - } - return f; -} - -/* -** A routine for handling output from sqlite3_trace(). -*/ -static void sql_trace_callback(void *pArg, const char *z){ - FILE *f = (FILE*)pArg; - if( f ) fprintf(f, "%s\n", z); -} - -/* -** A no-op routine that runs with the ".breakpoint" doc-command. This is -** a useful spot to set a debugger breakpoint. -*/ -static void test_breakpoint(void){ - static int nCall = 0; - nCall++; -} - -/* -** If an input line begins with "." then invoke this routine to -** process that line. -** -** Return 1 on error, 2 to exit, and 0 otherwise. -*/ -static int do_meta_command(char *zLine, struct callback_data *p){ - int i = 1; - int nArg = 0; - int n, c; - int rc = 0; - char *azArg[50]; - - /* Parse the input line into tokens. - */ - while( zLine[i] && nArg=3 && strncmp(azArg[0], "backup", n)==0 && nArg>1 && nArg<4){ - const char *zDestFile; - const char *zDb; - sqlite3 *pDest; - sqlite3_backup *pBackup; - if( nArg==2 ){ - zDestFile = azArg[1]; - zDb = "main"; - }else{ - zDestFile = azArg[2]; - zDb = azArg[1]; - } - rc = sqlite3_open(zDestFile, &pDest); - if( rc!=SQLITE_OK ){ - fprintf(stderr, "Error: cannot open \"%s\"\n", zDestFile); - sqlite3_close(pDest); - return 1; - } - open_db(p); - pBackup = sqlite3_backup_init(pDest, "main", p->db, zDb); - if( pBackup==0 ){ - fprintf(stderr, "Error: %s\n", sqlite3_errmsg(pDest)); - sqlite3_close(pDest); - return 1; - } - while( (rc = sqlite3_backup_step(pBackup,100))==SQLITE_OK ){} - sqlite3_backup_finish(pBackup); - if( rc==SQLITE_DONE ){ - rc = 0; - }else{ - fprintf(stderr, "Error: %s\n", sqlite3_errmsg(pDest)); - rc = 1; - } - sqlite3_close(pDest); - }else - - if( c=='b' && n>=3 && strncmp(azArg[0], "bail", n)==0 && nArg>1 && nArg<3 ){ - bail_on_error = booleanValue(azArg[1]); - }else - - /* The undocumented ".breakpoint" command causes a call to the no-op - ** routine named test_breakpoint(). - */ - if( c=='b' && n>=3 && strncmp(azArg[0], "breakpoint", n)==0 ){ - test_breakpoint(); - }else - - if( c=='d' && n>1 && strncmp(azArg[0], "databases", n)==0 && nArg==1 ){ - struct callback_data data; - char *zErrMsg = 0; - open_db(p); - memcpy(&data, p, sizeof(data)); - data.showHeader = 1; - data.mode = MODE_Column; - data.colWidth[0] = 3; - data.colWidth[1] = 15; - data.colWidth[2] = 58; - data.cnt = 0; - sqlite3_exec(p->db, "PRAGMA database_list; ", callback, &data, &zErrMsg); - if( zErrMsg ){ - fprintf(stderr,"Error: %s\n", zErrMsg); - sqlite3_free(zErrMsg); - rc = 1; - } - }else - - if( c=='d' && strncmp(azArg[0], "dump", n)==0 && nArg<3 ){ - open_db(p); - /* When playing back a "dump", the content might appear in an order - ** which causes immediate foreign key constraints to be violated. - ** So disable foreign-key constraint enforcement to prevent problems. */ - fprintf(p->out, "PRAGMA foreign_keys=OFF;\n"); - fprintf(p->out, "BEGIN TRANSACTION;\n"); - p->writableSchema = 0; - sqlite3_exec(p->db, "SAVEPOINT dump; PRAGMA writable_schema=ON", 0, 0, 0); - p->nErr = 0; - if( nArg==1 ){ - run_schema_dump_query(p, - "SELECT name, type, sql FROM sqlite_master " - "WHERE sql NOT NULL AND type=='table' AND name!='sqlite_sequence'" - ); - run_schema_dump_query(p, - "SELECT name, type, sql FROM sqlite_master " - "WHERE name=='sqlite_sequence'" - ); - run_table_dump_query(p, - "SELECT sql FROM sqlite_master " - "WHERE sql NOT NULL AND type IN ('index','trigger','view')", 0 - ); - }else{ - int i; - for(i=1; iwritableSchema ){ - fprintf(p->out, "PRAGMA writable_schema=OFF;\n"); - p->writableSchema = 0; - } - sqlite3_exec(p->db, "PRAGMA writable_schema=OFF;", 0, 0, 0); - sqlite3_exec(p->db, "RELEASE dump;", 0, 0, 0); - fprintf(p->out, p->nErr ? "ROLLBACK; -- due to errors\n" : "COMMIT;\n"); - }else - - if( c=='e' && strncmp(azArg[0], "echo", n)==0 && nArg>1 && nArg<3 ){ - p->echoOn = booleanValue(azArg[1]); - }else - - if( c=='e' && strncmp(azArg[0], "exit", n)==0 && nArg==1 ){ - rc = 2; - }else - - if( c=='e' && strncmp(azArg[0], "explain", n)==0 && nArg<3 ){ - int val = nArg>=2 ? booleanValue(azArg[1]) : 1; - if(val == 1) { - if(!p->explainPrev.valid) { - p->explainPrev.valid = 1; - p->explainPrev.mode = p->mode; - p->explainPrev.showHeader = p->showHeader; - memcpy(p->explainPrev.colWidth,p->colWidth,sizeof(p->colWidth)); - } - /* We could put this code under the !p->explainValid - ** condition so that it does not execute if we are already in - ** explain mode. However, always executing it allows us an easy - ** was to reset to explain mode in case the user previously - ** did an .explain followed by a .width, .mode or .header - ** command. - */ - p->mode = MODE_Explain; - p->showHeader = 1; - memset(p->colWidth,0,ArraySize(p->colWidth)); - p->colWidth[0] = 4; /* addr */ - p->colWidth[1] = 13; /* opcode */ - p->colWidth[2] = 4; /* P1 */ - p->colWidth[3] = 4; /* P2 */ - p->colWidth[4] = 4; /* P3 */ - p->colWidth[5] = 13; /* P4 */ - p->colWidth[6] = 2; /* P5 */ - p->colWidth[7] = 13; /* Comment */ - }else if (p->explainPrev.valid) { - p->explainPrev.valid = 0; - p->mode = p->explainPrev.mode; - p->showHeader = p->explainPrev.showHeader; - memcpy(p->colWidth,p->explainPrev.colWidth,sizeof(p->colWidth)); - } - }else - - if( c=='h' && (strncmp(azArg[0], "header", n)==0 || - strncmp(azArg[0], "headers", n)==0) && nArg>1 && nArg<3 ){ - p->showHeader = booleanValue(azArg[1]); - }else - - if( c=='h' && strncmp(azArg[0], "help", n)==0 ){ - fprintf(stderr,"%s",zHelp); - if( HAS_TIMER ){ - fprintf(stderr,"%s",zTimerHelp); - } - }else - - if( c=='i' && strncmp(azArg[0], "import", n)==0 && nArg==3 ){ - char *zTable = azArg[2]; /* Insert data into this table */ - char *zFile = azArg[1]; /* The file from which to extract data */ - sqlite3_stmt *pStmt = NULL; /* A statement */ - int nCol; /* Number of columns in the table */ - int nByte; /* Number of bytes in an SQL string */ - int i, j; /* Loop counters */ - int nSep; /* Number of bytes in p->separator[] */ - char *zSql; /* An SQL statement */ - char *zLine; /* A single line of input from the file */ - char **azCol; /* zLine[] broken up into columns */ - char *zCommit; /* How to commit changes */ - FILE *in; /* The input file */ - int lineno = 0; /* Line number of input file */ - - open_db(p); - nSep = strlen30(p->separator); - if( nSep==0 ){ - fprintf(stderr, "Error: non-null separator required for import\n"); - return 1; - } - zSql = sqlite3_mprintf("SELECT * FROM %s", zTable); - if( zSql==0 ){ - fprintf(stderr, "Error: out of memory\n"); - return 1; - } - nByte = strlen30(zSql); - rc = sqlite3_prepare(p->db, zSql, -1, &pStmt, 0); - sqlite3_free(zSql); - if( rc ){ - if (pStmt) sqlite3_finalize(pStmt); - fprintf(stderr,"Error: %s\n", sqlite3_errmsg(db)); - return 1; - } - nCol = sqlite3_column_count(pStmt); - sqlite3_finalize(pStmt); - pStmt = 0; - if( nCol==0 ) return 0; /* no columns, no error */ - zSql = malloc( nByte + 20 + nCol*2 ); - if( zSql==0 ){ - fprintf(stderr, "Error: out of memory\n"); - return 1; - } - sqlite3_snprintf(nByte+20, zSql, "INSERT INTO %s VALUES(?", zTable); - j = strlen30(zSql); - for(i=1; idb, zSql, -1, &pStmt, 0); - free(zSql); - if( rc ){ - fprintf(stderr, "Error: %s\n", sqlite3_errmsg(db)); - if (pStmt) sqlite3_finalize(pStmt); - return 1; - } - in = fopen(zFile, "rb"); - if( in==0 ){ - fprintf(stderr, "Error: cannot open \"%s\"\n", zFile); - sqlite3_finalize(pStmt); - return 1; - } - azCol = malloc( sizeof(azCol[0])*(nCol+1) ); - if( azCol==0 ){ - fprintf(stderr, "Error: out of memory\n"); - fclose(in); - sqlite3_finalize(pStmt); - return 1; - } - sqlite3_exec(p->db, "BEGIN", 0, 0, 0); - zCommit = "COMMIT"; - while( (zLine = local_getline(0, in, 1))!=0 ){ - char *z, c; - int inQuote = 0; - lineno++; - azCol[0] = zLine; - for(i=0, z=zLine; (c = *z)!=0; z++){ - if( c=='"' ) inQuote = !inQuote; - if( c=='\n' ) lineno++; - if( !inQuote && c==p->separator[0] && strncmp(z,p->separator,nSep)==0 ){ - *z = 0; - i++; - if( idb, zCommit, 0, 0, 0); - }else - - if( c=='i' && strncmp(azArg[0], "indices", n)==0 && nArg<3 ){ - struct callback_data data; - char *zErrMsg = 0; - open_db(p); - memcpy(&data, p, sizeof(data)); - data.showHeader = 0; - data.mode = MODE_List; - if( nArg==1 ){ - rc = sqlite3_exec(p->db, - "SELECT name FROM sqlite_master " - "WHERE type='index' AND name NOT LIKE 'sqlite_%' " - "UNION ALL " - "SELECT name FROM sqlite_temp_master " - "WHERE type='index' " - "ORDER BY 1", - callback, &data, &zErrMsg - ); - }else{ - zShellStatic = azArg[1]; - rc = sqlite3_exec(p->db, - "SELECT name FROM sqlite_master " - "WHERE type='index' AND tbl_name LIKE shellstatic() " - "UNION ALL " - "SELECT name FROM sqlite_temp_master " - "WHERE type='index' AND tbl_name LIKE shellstatic() " - "ORDER BY 1", - callback, &data, &zErrMsg - ); - zShellStatic = 0; - } - if( zErrMsg ){ - fprintf(stderr,"Error: %s\n", zErrMsg); - sqlite3_free(zErrMsg); - rc = 1; - }else if( rc != SQLITE_OK ){ - fprintf(stderr,"Error: querying sqlite_master and sqlite_temp_master\n"); - rc = 1; - } - }else - -#ifdef SQLITE_ENABLE_IOTRACE - if( c=='i' && strncmp(azArg[0], "iotrace", n)==0 ){ - extern void (*sqlite3IoTrace)(const char*, ...); - if( iotrace && iotrace!=stdout ) fclose(iotrace); - iotrace = 0; - if( nArg<2 ){ - sqlite3IoTrace = 0; - }else if( strcmp(azArg[1], "-")==0 ){ - sqlite3IoTrace = iotracePrintf; - iotrace = stdout; - }else{ - iotrace = fopen(azArg[1], "w"); - if( iotrace==0 ){ - fprintf(stderr, "Error: cannot open \"%s\"\n", azArg[1]); - sqlite3IoTrace = 0; - rc = 1; - }else{ - sqlite3IoTrace = iotracePrintf; - } - } - }else -#endif - -#ifndef SQLITE_OMIT_LOAD_EXTENSION - if( c=='l' && strncmp(azArg[0], "load", n)==0 && nArg>=2 ){ - const char *zFile, *zProc; - char *zErrMsg = 0; - zFile = azArg[1]; - zProc = nArg>=3 ? azArg[2] : 0; - open_db(p); - rc = sqlite3_load_extension(p->db, zFile, zProc, &zErrMsg); - if( rc!=SQLITE_OK ){ - fprintf(stderr, "Error: %s\n", zErrMsg); - sqlite3_free(zErrMsg); - rc = 1; - } - }else -#endif - - if( c=='l' && strncmp(azArg[0], "log", n)==0 && nArg>=2 ){ - const char *zFile = azArg[1]; - output_file_close(p->pLog); - p->pLog = output_file_open(zFile); - }else - - if( c=='m' && strncmp(azArg[0], "mode", n)==0 && nArg==2 ){ - int n2 = strlen30(azArg[1]); - if( (n2==4 && strncmp(azArg[1],"line",n2)==0) - || - (n2==5 && strncmp(azArg[1],"lines",n2)==0) ){ - p->mode = MODE_Line; - }else if( (n2==6 && strncmp(azArg[1],"column",n2)==0) - || - (n2==7 && strncmp(azArg[1],"columns",n2)==0) ){ - p->mode = MODE_Column; - }else if( n2==4 && strncmp(azArg[1],"list",n2)==0 ){ - p->mode = MODE_List; - }else if( n2==4 && strncmp(azArg[1],"html",n2)==0 ){ - p->mode = MODE_Html; - }else if( n2==3 && strncmp(azArg[1],"tcl",n2)==0 ){ - p->mode = MODE_Tcl; - }else if( n2==3 && strncmp(azArg[1],"csv",n2)==0 ){ - p->mode = MODE_Csv; - sqlite3_snprintf(sizeof(p->separator), p->separator, ","); - }else if( n2==4 && strncmp(azArg[1],"tabs",n2)==0 ){ - p->mode = MODE_List; - sqlite3_snprintf(sizeof(p->separator), p->separator, "\t"); - }else if( n2==6 && strncmp(azArg[1],"insert",n2)==0 ){ - p->mode = MODE_Insert; - set_table_name(p, "table"); - }else { - fprintf(stderr,"Error: mode should be one of: " - "column csv html insert line list tabs tcl\n"); - rc = 1; - } - }else - - if( c=='m' && strncmp(azArg[0], "mode", n)==0 && nArg==3 ){ - int n2 = strlen30(azArg[1]); - if( n2==6 && strncmp(azArg[1],"insert",n2)==0 ){ - p->mode = MODE_Insert; - set_table_name(p, azArg[2]); - }else { - fprintf(stderr, "Error: invalid arguments: " - " \"%s\". Enter \".help\" for help\n", azArg[2]); - rc = 1; - } - }else - - if( c=='n' && strncmp(azArg[0], "nullvalue", n)==0 && nArg==2 ) { - sqlite3_snprintf(sizeof(p->nullvalue), p->nullvalue, - "%.*s", (int)ArraySize(p->nullvalue)-1, azArg[1]); - }else - - if( c=='o' && strncmp(azArg[0], "output", n)==0 && nArg==2 ){ - if( p->outfile[0]=='|' ){ - pclose(p->out); - }else{ - output_file_close(p->out); - } - p->outfile[0] = 0; - if( azArg[1][0]=='|' ){ - p->out = popen(&azArg[1][1], "w"); - if( p->out==0 ){ - fprintf(stderr,"Error: cannot open pipe \"%s\"\n", &azArg[1][1]); - p->out = stdout; - rc = 1; - }else{ - sqlite3_snprintf(sizeof(p->outfile), p->outfile, "%s", azArg[1]); - } - }else{ - p->out = output_file_open(azArg[1]); - if( p->out==0 ){ - if( strcmp(azArg[1],"off")!=0 ){ - fprintf(stderr,"Error: cannot write to \"%s\"\n", azArg[1]); - } - p->out = stdout; - rc = 1; - } else { - sqlite3_snprintf(sizeof(p->outfile), p->outfile, "%s", azArg[1]); - } - } - }else - - if( c=='p' && strncmp(azArg[0], "prompt", n)==0 && (nArg==2 || nArg==3)){ - if( nArg >= 2) { - strncpy(mainPrompt,azArg[1],(int)ArraySize(mainPrompt)-1); - } - if( nArg >= 3) { - strncpy(continuePrompt,azArg[2],(int)ArraySize(continuePrompt)-1); - } - }else - - if( c=='q' && strncmp(azArg[0], "quit", n)==0 && nArg==1 ){ - rc = 2; - }else - - if( c=='r' && n>=3 && strncmp(azArg[0], "read", n)==0 && nArg==2 ){ - FILE *alt = fopen(azArg[1], "rb"); - if( alt==0 ){ - fprintf(stderr,"Error: cannot open \"%s\"\n", azArg[1]); - rc = 1; - }else{ - rc = process_input(p, alt); - fclose(alt); - } - }else - - if( c=='r' && n>=3 && strncmp(azArg[0], "restore", n)==0 && nArg>1 && nArg<4){ - const char *zSrcFile; - const char *zDb; - sqlite3 *pSrc; - sqlite3_backup *pBackup; - int nTimeout = 0; - - if( nArg==2 ){ - zSrcFile = azArg[1]; - zDb = "main"; - }else{ - zSrcFile = azArg[2]; - zDb = azArg[1]; - } - rc = sqlite3_open(zSrcFile, &pSrc); - if( rc!=SQLITE_OK ){ - fprintf(stderr, "Error: cannot open \"%s\"\n", zSrcFile); - sqlite3_close(pSrc); - return 1; - } - open_db(p); - pBackup = sqlite3_backup_init(p->db, zDb, pSrc, "main"); - if( pBackup==0 ){ - fprintf(stderr, "Error: %s\n", sqlite3_errmsg(p->db)); - sqlite3_close(pSrc); - return 1; - } - while( (rc = sqlite3_backup_step(pBackup,100))==SQLITE_OK - || rc==SQLITE_BUSY ){ - if( rc==SQLITE_BUSY ){ - if( nTimeout++ >= 3 ) break; - sqlite3_sleep(100); - } - } - sqlite3_backup_finish(pBackup); - if( rc==SQLITE_DONE ){ - rc = 0; - }else if( rc==SQLITE_BUSY || rc==SQLITE_LOCKED ){ - fprintf(stderr, "Error: source database is busy\n"); - rc = 1; - }else{ - fprintf(stderr, "Error: %s\n", sqlite3_errmsg(p->db)); - rc = 1; - } - sqlite3_close(pSrc); - }else - - if( c=='s' && strncmp(azArg[0], "schema", n)==0 && nArg<3 ){ - struct callback_data data; - char *zErrMsg = 0; - open_db(p); - memcpy(&data, p, sizeof(data)); - data.showHeader = 0; - data.mode = MODE_Semi; - if( nArg>1 ){ - int i; - for(i=0; azArg[1][i]; i++) azArg[1][i] = ToLower(azArg[1][i]); - if( strcmp(azArg[1],"sqlite_master")==0 ){ - char *new_argv[2], *new_colv[2]; - new_argv[0] = "CREATE TABLE sqlite_master (\n" - " type text,\n" - " name text,\n" - " tbl_name text,\n" - " rootpage integer,\n" - " sql text\n" - ")"; - new_argv[1] = 0; - new_colv[0] = "sql"; - new_colv[1] = 0; - callback(&data, 1, new_argv, new_colv); - rc = SQLITE_OK; - }else if( strcmp(azArg[1],"sqlite_temp_master")==0 ){ - char *new_argv[2], *new_colv[2]; - new_argv[0] = "CREATE TEMP TABLE sqlite_temp_master (\n" - " type text,\n" - " name text,\n" - " tbl_name text,\n" - " rootpage integer,\n" - " sql text\n" - ")"; - new_argv[1] = 0; - new_colv[0] = "sql"; - new_colv[1] = 0; - callback(&data, 1, new_argv, new_colv); - rc = SQLITE_OK; - }else{ - zShellStatic = azArg[1]; - rc = sqlite3_exec(p->db, - "SELECT sql FROM " - " (SELECT sql sql, type type, tbl_name tbl_name, name name, rowid x" - " FROM sqlite_master UNION ALL" - " SELECT sql, type, tbl_name, name, rowid FROM sqlite_temp_master) " - "WHERE lower(tbl_name) LIKE shellstatic()" - " AND type!='meta' AND sql NOTNULL " - "ORDER BY substr(type,2,1), " - " CASE type WHEN 'view' THEN rowid ELSE name END", - callback, &data, &zErrMsg); - zShellStatic = 0; - } - }else{ - rc = sqlite3_exec(p->db, - "SELECT sql FROM " - " (SELECT sql sql, type type, tbl_name tbl_name, name name, rowid x" - " FROM sqlite_master UNION ALL" - " SELECT sql, type, tbl_name, name, rowid FROM sqlite_temp_master) " - "WHERE type!='meta' AND sql NOTNULL AND name NOT LIKE 'sqlite_%'" - "ORDER BY substr(type,2,1)," - " CASE type WHEN 'view' THEN rowid ELSE name END", - callback, &data, &zErrMsg - ); - } - if( zErrMsg ){ - fprintf(stderr,"Error: %s\n", zErrMsg); - sqlite3_free(zErrMsg); - rc = 1; - }else if( rc != SQLITE_OK ){ - fprintf(stderr,"Error: querying schema information\n"); - rc = 1; - }else{ - rc = 0; - } - }else - - if( c=='s' && strncmp(azArg[0], "separator", n)==0 && nArg==2 ){ - sqlite3_snprintf(sizeof(p->separator), p->separator, - "%.*s", (int)sizeof(p->separator)-1, azArg[1]); - }else - - if( c=='s' && strncmp(azArg[0], "show", n)==0 && nArg==1 ){ - int i; - fprintf(p->out,"%9.9s: %s\n","echo", p->echoOn ? "on" : "off"); - fprintf(p->out,"%9.9s: %s\n","explain", p->explainPrev.valid ? "on" :"off"); - fprintf(p->out,"%9.9s: %s\n","headers", p->showHeader ? "on" : "off"); - fprintf(p->out,"%9.9s: %s\n","mode", modeDescr[p->mode]); - fprintf(p->out,"%9.9s: ", "nullvalue"); - output_c_string(p->out, p->nullvalue); - fprintf(p->out, "\n"); - fprintf(p->out,"%9.9s: %s\n","output", - strlen30(p->outfile) ? p->outfile : "stdout"); - fprintf(p->out,"%9.9s: ", "separator"); - output_c_string(p->out, p->separator); - fprintf(p->out, "\n"); - fprintf(p->out,"%9.9s: %s\n","stats", p->statsOn ? "on" : "off"); - fprintf(p->out,"%9.9s: ","width"); - for (i=0;i<(int)ArraySize(p->colWidth) && p->colWidth[i] != 0;i++) { - fprintf(p->out,"%d ",p->colWidth[i]); - } - fprintf(p->out,"\n"); - }else - - if( c=='s' && strncmp(azArg[0], "stats", n)==0 && nArg>1 && nArg<3 ){ - p->statsOn = booleanValue(azArg[1]); - }else - - if( c=='t' && n>1 && strncmp(azArg[0], "tables", n)==0 && nArg<3 ){ - sqlite3_stmt *pStmt; - char **azResult; - int nRow, nAlloc; - char *zSql = 0; - int ii; - open_db(p); - rc = sqlite3_prepare_v2(p->db, "PRAGMA database_list", -1, &pStmt, 0); - if( rc ) return rc; - zSql = sqlite3_mprintf( - "SELECT name FROM sqlite_master" - " WHERE type IN ('table','view')" - " AND name NOT LIKE 'sqlite_%%'" - " AND name LIKE ?1"); - while( sqlite3_step(pStmt)==SQLITE_ROW ){ - const char *zDbName = (const char*)sqlite3_column_text(pStmt, 1); - if( zDbName==0 || strcmp(zDbName,"main")==0 ) continue; - if( strcmp(zDbName,"temp")==0 ){ - zSql = sqlite3_mprintf( - "%z UNION ALL " - "SELECT 'temp.' || name FROM sqlite_temp_master" - " WHERE type IN ('table','view')" - " AND name NOT LIKE 'sqlite_%%'" - " AND name LIKE ?1", zSql); - }else{ - zSql = sqlite3_mprintf( - "%z UNION ALL " - "SELECT '%q.' || name FROM \"%w\".sqlite_master" - " WHERE type IN ('table','view')" - " AND name NOT LIKE 'sqlite_%%'" - " AND name LIKE ?1", zSql, zDbName, zDbName); - } - } - sqlite3_finalize(pStmt); - zSql = sqlite3_mprintf("%z ORDER BY 1", zSql); - rc = sqlite3_prepare_v2(p->db, zSql, -1, &pStmt, 0); - sqlite3_free(zSql); - if( rc ) return rc; - nRow = nAlloc = 0; - azResult = 0; - if( nArg>1 ){ - sqlite3_bind_text(pStmt, 1, azArg[1], -1, SQLITE_TRANSIENT); - }else{ - sqlite3_bind_text(pStmt, 1, "%", -1, SQLITE_STATIC); - } - while( sqlite3_step(pStmt)==SQLITE_ROW ){ - if( nRow>=nAlloc ){ - char **azNew; - int n = nAlloc*2 + 10; - azNew = sqlite3_realloc(azResult, sizeof(azResult[0])*n); - if( azNew==0 ){ - fprintf(stderr, "Error: out of memory\n"); - break; - } - nAlloc = n; - azResult = azNew; - } - azResult[nRow] = sqlite3_mprintf("%s", sqlite3_column_text(pStmt, 0)); - if( azResult[nRow] ) nRow++; - } - sqlite3_finalize(pStmt); - if( nRow>0 ){ - int len, maxlen = 0; - int i, j; - int nPrintCol, nPrintRow; - for(i=0; imaxlen ) maxlen = len; - } - nPrintCol = 80/(maxlen+2); - if( nPrintCol<1 ) nPrintCol = 1; - nPrintRow = (nRow + nPrintCol - 1)/nPrintCol; - for(i=0; i=8 && strncmp(azArg[0], "testctrl", n)==0 && nArg>=2 ){ - static const struct { - const char *zCtrlName; /* Name of a test-control option */ - int ctrlCode; /* Integer code for that option */ - } aCtrl[] = { - { "prng_save", SQLITE_TESTCTRL_PRNG_SAVE }, - { "prng_restore", SQLITE_TESTCTRL_PRNG_RESTORE }, - { "prng_reset", SQLITE_TESTCTRL_PRNG_RESET }, - { "bitvec_test", SQLITE_TESTCTRL_BITVEC_TEST }, - { "fault_install", SQLITE_TESTCTRL_FAULT_INSTALL }, - { "benign_malloc_hooks", SQLITE_TESTCTRL_BENIGN_MALLOC_HOOKS }, - { "pending_byte", SQLITE_TESTCTRL_PENDING_BYTE }, - { "assert", SQLITE_TESTCTRL_ASSERT }, - { "always", SQLITE_TESTCTRL_ALWAYS }, - { "reserve", SQLITE_TESTCTRL_RESERVE }, - { "optimizations", SQLITE_TESTCTRL_OPTIMIZATIONS }, - { "iskeyword", SQLITE_TESTCTRL_ISKEYWORD }, - { "scratchmalloc", SQLITE_TESTCTRL_SCRATCHMALLOC }, - }; - int testctrl = -1; - int rc = 0; - int i, n; - open_db(p); - - /* convert testctrl text option to value. allow any unique prefix - ** of the option name, or a numerical value. */ - n = strlen30(azArg[1]); - for(i=0; i<(int)(sizeof(aCtrl)/sizeof(aCtrl[0])); i++){ - if( strncmp(azArg[1], aCtrl[i].zCtrlName, n)==0 ){ - if( testctrl<0 ){ - testctrl = aCtrl[i].ctrlCode; - }else{ - fprintf(stderr, "ambiguous option name: \"%s\"\n", azArg[1]); - testctrl = -1; - break; - } - } - } - if( testctrl<0 ) testctrl = atoi(azArg[1]); - if( (testctrlSQLITE_TESTCTRL_LAST) ){ - fprintf(stderr,"Error: invalid testctrl option: %s\n", azArg[1]); - }else{ - switch(testctrl){ - - /* sqlite3_test_control(int, db, int) */ - case SQLITE_TESTCTRL_OPTIMIZATIONS: - case SQLITE_TESTCTRL_RESERVE: - if( nArg==3 ){ - int opt = (int)strtol(azArg[2], 0, 0); - rc = sqlite3_test_control(testctrl, p->db, opt); - printf("%d (0x%08x)\n", rc, rc); - } else { - fprintf(stderr,"Error: testctrl %s takes a single int option\n", - azArg[1]); - } - break; - - /* sqlite3_test_control(int) */ - case SQLITE_TESTCTRL_PRNG_SAVE: - case SQLITE_TESTCTRL_PRNG_RESTORE: - case SQLITE_TESTCTRL_PRNG_RESET: - if( nArg==2 ){ - rc = sqlite3_test_control(testctrl); - printf("%d (0x%08x)\n", rc, rc); - } else { - fprintf(stderr,"Error: testctrl %s takes no options\n", azArg[1]); - } - break; - - /* sqlite3_test_control(int, uint) */ - case SQLITE_TESTCTRL_PENDING_BYTE: - if( nArg==3 ){ - unsigned int opt = (unsigned int)atoi(azArg[2]); - rc = sqlite3_test_control(testctrl, opt); - printf("%d (0x%08x)\n", rc, rc); - } else { - fprintf(stderr,"Error: testctrl %s takes a single unsigned" - " int option\n", azArg[1]); - } - break; - - /* sqlite3_test_control(int, int) */ - case SQLITE_TESTCTRL_ASSERT: - case SQLITE_TESTCTRL_ALWAYS: - if( nArg==3 ){ - int opt = atoi(azArg[2]); - rc = sqlite3_test_control(testctrl, opt); - printf("%d (0x%08x)\n", rc, rc); - } else { - fprintf(stderr,"Error: testctrl %s takes a single int option\n", - azArg[1]); - } - break; - - /* sqlite3_test_control(int, char *) */ -#ifdef SQLITE_N_KEYWORD - case SQLITE_TESTCTRL_ISKEYWORD: - if( nArg==3 ){ - const char *opt = azArg[2]; - rc = sqlite3_test_control(testctrl, opt); - printf("%d (0x%08x)\n", rc, rc); - } else { - fprintf(stderr,"Error: testctrl %s takes a single char * option\n", - azArg[1]); - } - break; -#endif - - case SQLITE_TESTCTRL_BITVEC_TEST: - case SQLITE_TESTCTRL_FAULT_INSTALL: - case SQLITE_TESTCTRL_BENIGN_MALLOC_HOOKS: - case SQLITE_TESTCTRL_SCRATCHMALLOC: - default: - fprintf(stderr,"Error: CLI support for testctrl %s not implemented\n", - azArg[1]); - break; - } - } - }else - - if( c=='t' && n>4 && strncmp(azArg[0], "timeout", n)==0 && nArg==2 ){ - open_db(p); - sqlite3_busy_timeout(p->db, atoi(azArg[1])); - }else - - if( HAS_TIMER && c=='t' && n>=5 && strncmp(azArg[0], "timer", n)==0 - && nArg==2 - ){ - enableTimer = booleanValue(azArg[1]); - }else - - if( c=='t' && strncmp(azArg[0], "trace", n)==0 && nArg>1 ){ - open_db(p); - output_file_close(p->traceOut); - p->traceOut = output_file_open(azArg[1]); -#ifndef SQLITE_OMIT_TRACE - if( p->traceOut==0 ){ - sqlite3_trace(p->db, 0, 0); - }else{ - sqlite3_trace(p->db, sql_trace_callback, p->traceOut); - } -#endif - }else - - if( c=='v' && strncmp(azArg[0], "version", n)==0 ){ - printf("SQLite %s %s\n" /*extra-version-info*/, - sqlite3_libversion(), sqlite3_sourceid()); - }else - - if( c=='v' && strncmp(azArg[0], "vfsname", n)==0 ){ - const char *zDbName = nArg==2 ? azArg[1] : "main"; - char *zVfsName = 0; - if( p->db ){ - sqlite3_file_control(p->db, zDbName, SQLITE_FCNTL_VFSNAME, &zVfsName); - if( zVfsName ){ - printf("%s\n", zVfsName); - sqlite3_free(zVfsName); - } - } - }else - - if( c=='w' && strncmp(azArg[0], "width", n)==0 && nArg>1 ){ - int j; - assert( nArg<=ArraySize(azArg) ); - for(j=1; jcolWidth); j++){ - p->colWidth[j-1] = atoi(azArg[j]); - } - }else - - { - fprintf(stderr, "Error: unknown command or invalid arguments: " - " \"%s\". Enter \".help\" for help\n", azArg[0]); - rc = 1; - } - - return rc; -} - -/* -** Return TRUE if a semicolon occurs anywhere in the first N characters -** of string z[]. -*/ -static int _contains_semicolon(const char *z, int N){ - int i; - for(i=0; iout); - free(zLine); - zLine = one_input_line(zSql, in); - if( zLine==0 ){ - /* End of input */ - if( stdin_is_interactive ) printf("\n"); - break; - } - if( seenInterrupt ){ - if( in!=0 ) break; - seenInterrupt = 0; - } - lineno++; - if( (zSql==0 || zSql[0]==0) && _all_whitespace(zLine) ) continue; - if( zLine && zLine[0]=='.' && nSql==0 ){ - if( p->echoOn ) printf("%s\n", zLine); - rc = do_meta_command(zLine, p); - if( rc==2 ){ /* exit requested */ - break; - }else if( rc ){ - errCnt++; - } - continue; - } - if( _is_command_terminator(zLine) && _is_complete(zSql, nSql) ){ - memcpy(zLine,";",2); - } - nSqlPrior = nSql; - if( zSql==0 ){ - int i; - for(i=0; zLine[i] && IsSpace(zLine[i]); i++){} - if( zLine[i]!=0 ){ - nSql = strlen30(zLine); - zSql = malloc( nSql+3 ); - if( zSql==0 ){ - fprintf(stderr, "Error: out of memory\n"); - exit(1); - } - memcpy(zSql, zLine, nSql+1); - startline = lineno; - } - }else{ - int len = strlen30(zLine); - zSql = realloc( zSql, nSql + len + 4 ); - if( zSql==0 ){ - fprintf(stderr,"Error: out of memory\n"); - exit(1); - } - zSql[nSql++] = '\n'; - memcpy(&zSql[nSql], zLine, len+1); - nSql += len; - } - if( zSql && _contains_semicolon(&zSql[nSqlPrior], nSql-nSqlPrior) - && sqlite3_complete(zSql) ){ - p->cnt = 0; - open_db(p); - BEGIN_TIMER; - rc = shell_exec(p->db, zSql, shell_callback, p, &zErrMsg); - END_TIMER; - if( rc || zErrMsg ){ - char zPrefix[100]; - if( in!=0 || !stdin_is_interactive ){ - sqlite3_snprintf(sizeof(zPrefix), zPrefix, - "Error: near line %d:", startline); - }else{ - sqlite3_snprintf(sizeof(zPrefix), zPrefix, "Error:"); - } - if( zErrMsg!=0 ){ - fprintf(stderr, "%s %s\n", zPrefix, zErrMsg); - sqlite3_free(zErrMsg); - zErrMsg = 0; - }else{ - fprintf(stderr, "%s %s\n", zPrefix, sqlite3_errmsg(p->db)); - } - errCnt++; - } - free(zSql); - zSql = 0; - nSql = 0; - } - } - if( zSql ){ - if( !_all_whitespace(zSql) ){ - fprintf(stderr, "Error: incomplete SQL: %s\n", zSql); - } - free(zSql); - } - free(zLine); - return errCnt; -} - -/* -** Return a pathname which is the user's home directory. A -** 0 return indicates an error of some kind. -*/ -static char *find_home_dir(void){ - static char *home_dir = NULL; - if( home_dir ) return home_dir; - -#if !defined(_WIN32) && !defined(WIN32) && !defined(__OS2__) && !defined(_WIN32_WCE) && !defined(__RTP__) && !defined(_WRS_KERNEL) - struct passwd *pwent; - uid_t uid = getuid(); - if( (pwent=getpwuid(uid)) != NULL) { - home_dir = pwent->pw_dir; - } -#endif - -#if defined(_WIN32_WCE) - /* Windows CE (arm-wince-mingw32ce-gcc) does not provide getenv() - */ - home_dir = "/"; -#else - -#if defined(_WIN32) || defined(WIN32) || defined(__OS2__) - if (!home_dir) { - home_dir = getenv("USERPROFILE"); - } -#endif - - if (!home_dir) { - home_dir = getenv("HOME"); - } - -#if defined(_WIN32) || defined(WIN32) || defined(__OS2__) - if (!home_dir) { - char *zDrive, *zPath; - int n; - zDrive = getenv("HOMEDRIVE"); - zPath = getenv("HOMEPATH"); - if( zDrive && zPath ){ - n = strlen30(zDrive) + strlen30(zPath) + 1; - home_dir = malloc( n ); - if( home_dir==0 ) return 0; - sqlite3_snprintf(n, home_dir, "%s%s", zDrive, zPath); - return home_dir; - } - home_dir = "c:\\"; - } -#endif - -#endif /* !_WIN32_WCE */ - - if( home_dir ){ - int n = strlen30(home_dir) + 1; - char *z = malloc( n ); - if( z ) memcpy(z, home_dir, n); - home_dir = z; - } - - return home_dir; -} - -/* -** Read input from the file given by sqliterc_override. Or if that -** parameter is NULL, take input from ~/.sqliterc -** -** Returns the number of errors. -*/ -static int process_sqliterc( - struct callback_data *p, /* Configuration data */ - const char *sqliterc_override /* Name of config file. NULL to use default */ -){ - char *home_dir = NULL; - const char *sqliterc = sqliterc_override; - char *zBuf = 0; - FILE *in = NULL; - int rc = 0; - - if (sqliterc == NULL) { - home_dir = find_home_dir(); - if( home_dir==0 ){ -#if !defined(__RTP__) && !defined(_WRS_KERNEL) - fprintf(stderr,"%s: Error: cannot locate your home directory\n", Argv0); -#endif - return 1; - } - zBuf = sqlite3_mprintf("%s/.sqliterc",home_dir); - sqliterc = zBuf; - } - in = fopen(sqliterc,"rb"); - if( in ){ - if( stdin_is_interactive ){ - fprintf(stderr,"-- Loading resources from %s\n",sqliterc); - } - rc = process_input(p,in); - fclose(in); - } - sqlite3_free(zBuf); - return rc; -} - -/* -** Show available command line options -*/ -static const char zOptions[] = - " -bail stop after hitting an error\n" - " -batch force batch I/O\n" - " -column set output mode to 'column'\n" - " -cmd command run \"command\" before reading stdin\n" - " -csv set output mode to 'csv'\n" - " -echo print commands before execution\n" - " -init filename read/process named file\n" - " -[no]header turn headers on or off\n" - " -help show this message\n" - " -html set output mode to HTML\n" - " -interactive force interactive I/O\n" - " -line set output mode to 'line'\n" - " -list set output mode to 'list'\n" -#ifdef SQLITE_ENABLE_MULTIPLEX - " -multiplex enable the multiplexor VFS\n" -#endif - " -nullvalue 'text' set text string for NULL values\n" - " -separator 'x' set output field separator (|)\n" - " -stats print memory stats before each finalize\n" - " -version show SQLite version\n" - " -vfs NAME use NAME as the default VFS\n" -#ifdef SQLITE_ENABLE_VFSTRACE - " -vfstrace enable tracing of all VFS calls\n" -#endif -; -static void usage(int showDetail){ - fprintf(stderr, - "Usage: %s [OPTIONS] FILENAME [SQL]\n" - "FILENAME is the name of an SQLite database. A new database is created\n" - "if the file does not previously exist.\n", Argv0); - if( showDetail ){ - fprintf(stderr, "OPTIONS include:\n%s", zOptions); - }else{ - fprintf(stderr, "Use the -help option for additional information\n"); - } - exit(1); -} - -/* -** Initialize the state information in data -*/ -static void main_init(struct callback_data *data) { - memset(data, 0, sizeof(*data)); - data->mode = MODE_List; - memcpy(data->separator,"|", 2); - data->showHeader = 0; - sqlite3_config(SQLITE_CONFIG_URI, 1); - sqlite3_config(SQLITE_CONFIG_LOG, shellLog, data); - sqlite3_snprintf(sizeof(mainPrompt), mainPrompt,"sqlite> "); - sqlite3_snprintf(sizeof(continuePrompt), continuePrompt," ...> "); - sqlite3_config(SQLITE_CONFIG_SINGLETHREAD); -} - -int main(int argc, char **argv){ - char *zErrMsg = 0; - struct callback_data data; - const char *zInitFile = 0; - char *zFirstCmd = 0; - int i; - int rc = 0; - - if( strcmp(sqlite3_sourceid(),SQLITE_SOURCE_ID)!=0 ){ - fprintf(stderr, "SQLite header and source version mismatch\n%s\n%s\n", - sqlite3_sourceid(), SQLITE_SOURCE_ID); - exit(1); - } - Argv0 = argv[0]; - main_init(&data); - stdin_is_interactive = isatty(0); - - /* Make sure we have a valid signal handler early, before anything - ** else is done. - */ -#ifdef SIGINT - signal(SIGINT, interrupt_handler); -#endif - - /* Do an initial pass through the command-line argument to locate - ** the name of the database file, the name of the initialization file, - ** the size of the alternative malloc heap, - ** and the first command to execute. - */ - for(i=1; i0x7fff0000 ) szHeap = 0x7fff0000; - sqlite3_config(SQLITE_CONFIG_HEAP, malloc((int)szHeap), (int)szHeap, 64); -#endif -#ifdef SQLITE_ENABLE_VFSTRACE - }else if( strcmp(z,"-vfstrace")==0 ){ - extern int vfstrace_register( - const char *zTraceName, - const char *zOldVfsName, - int (*xOut)(const char*,void*), - void *pOutArg, - int makeDefault - ); - vfstrace_register("trace",0,(int(*)(const char*,void*))fputs,stderr,1); -#endif -#ifdef SQLITE_ENABLE_MULTIPLEX - }else if( strcmp(z,"-multiplex")==0 ){ - extern int sqlite3_multiple_initialize(const char*,int); - sqlite3_multiplex_initialize(0, 1); -#endif - }else if( strcmp(z,"-vfs")==0 ){ - sqlite3_vfs *pVfs = sqlite3_vfs_find(argv[++i]); - if( pVfs ){ - sqlite3_vfs_register(pVfs, 1); - }else{ - fprintf(stderr, "no such VFS: \"%s\"\n", argv[i]); - exit(1); - } - } - } - if( i0 ){ - return rc; - } - - /* Make a second pass through the command-line argument and set - ** options. This second pass is delayed until after the initialization - ** file is processed so that the command-line arguments will override - ** settings in the initialization file. - */ - for(i=1; i=argc){ - fprintf(stderr,"%s: Error: missing argument for option: %s\n", - Argv0, z); - fprintf(stderr,"Use -help for a list of options.\n"); - return 1; - } - sqlite3_snprintf(sizeof(data.separator), data.separator, - "%.*s",(int)sizeof(data.separator)-1,argv[i]); - }else if( strcmp(z,"-nullvalue")==0 ){ - i++; - if(i>=argc){ - fprintf(stderr,"%s: Error: missing argument for option: %s\n", - Argv0, z); - fprintf(stderr,"Use -help for a list of options.\n"); - return 1; - } - sqlite3_snprintf(sizeof(data.nullvalue), data.nullvalue, - "%.*s",(int)sizeof(data.nullvalue)-1,argv[i]); - }else if( strcmp(z,"-header")==0 ){ - data.showHeader = 1; - }else if( strcmp(z,"-noheader")==0 ){ - data.showHeader = 0; - }else if( strcmp(z,"-echo")==0 ){ - data.echoOn = 1; - }else if( strcmp(z,"-stats")==0 ){ - data.statsOn = 1; - }else if( strcmp(z,"-bail")==0 ){ - bail_on_error = 1; - }else if( strcmp(z,"-version")==0 ){ - printf("%s %s\n", sqlite3_libversion(), sqlite3_sourceid()); - return 0; - }else if( strcmp(z,"-interactive")==0 ){ - stdin_is_interactive = 1; - }else if( strcmp(z,"-batch")==0 ){ - stdin_is_interactive = 0; - }else if( strcmp(z,"-heap")==0 ){ - i++; - }else if( strcmp(z,"-vfs")==0 ){ - i++; -#ifdef SQLITE_ENABLE_VFSTRACE - }else if( strcmp(z,"-vfstrace")==0 ){ - i++; -#endif -#ifdef SQLITE_ENABLE_MULTIPLEX - }else if( strcmp(z,"-multiplex")==0 ){ - i++; -#endif - }else if( strcmp(z,"-help")==0 ){ - usage(1); - }else if( strcmp(z,"-cmd")==0 ){ - if( i==argc-1 ) break; - i++; - z = argv[i]; - if( z[0]=='.' ){ - rc = do_meta_command(z, &data); - if( rc && bail_on_error ) return rc; - }else{ - open_db(&data); - rc = shell_exec(data.db, z, shell_callback, &data, &zErrMsg); - if( zErrMsg!=0 ){ - fprintf(stderr,"Error: %s\n", zErrMsg); - if( bail_on_error ) return rc!=0 ? rc : 1; - }else if( rc!=0 ){ - fprintf(stderr,"Error: unable to process SQL \"%s\"\n", z); - if( bail_on_error ) return rc; - } - } - }else{ - fprintf(stderr,"%s: Error: unknown option: %s\n", Argv0, z); - fprintf(stderr,"Use -help for a list of options.\n"); - return 1; - } - } - - if( zFirstCmd ){ - /* Run just the command that follows the database name - */ - if( zFirstCmd[0]=='.' ){ - rc = do_meta_command(zFirstCmd, &data); - }else{ - open_db(&data); - rc = shell_exec(data.db, zFirstCmd, shell_callback, &data, &zErrMsg); - if( zErrMsg!=0 ){ - fprintf(stderr,"Error: %s\n", zErrMsg); - return rc!=0 ? rc : 1; - }else if( rc!=0 ){ - fprintf(stderr,"Error: unable to process SQL \"%s\"\n", zFirstCmd); - return rc; - } - } - }else{ - /* Run commands received from standard input - */ - if( stdin_is_interactive ){ - char *zHome; - char *zHistory = 0; - int nHistory; - printf( - "SQLite version %s %.19s\n" /*extra-version-info*/ - "Enter \".help\" for instructions\n" - "Enter SQL statements terminated with a \";\"\n", - sqlite3_libversion(), sqlite3_sourceid() - ); - zHome = find_home_dir(); - if( zHome ){ - nHistory = strlen30(zHome) + 20; - if( (zHistory = malloc(nHistory))!=0 ){ - sqlite3_snprintf(nHistory, zHistory,"%s/.sqlite_history", zHome); - } - } -#if defined(HAVE_READLINE) && HAVE_READLINE==1 - if( zHistory ) read_history(zHistory); -#endif - rc = process_input(&data, 0); - if( zHistory ){ - stifle_history(100); - write_history(zHistory); - free(zHistory); - } - }else{ - rc = process_input(&data, stdin); - } - } - set_table_name(&data, 0); - if( data.db ){ - sqlite3_close(data.db); - } - return rc; -} diff --git a/projects/VisualStudio/SQLite/sqlite3.c b/projects/VisualStudio/SQLite/sqlite3.c deleted file mode 100644 index f69816e2..00000000 --- a/projects/VisualStudio/SQLite/sqlite3.c +++ /dev/null @@ -1,138243 +0,0 @@ -/****************************************************************************** -** This file is an amalgamation of many separate C source files from SQLite -** version 3.7.13. By combining all the individual C code files into this -** single large file, the entire code can be compiled as a single translation -** unit. This allows many compilers to do optimizations that would not be -** possible if the files were compiled separately. Performance improvements -** of 5% or more are commonly seen when SQLite is compiled as a single -** translation unit. -** -** This file is all you need to compile SQLite. To use SQLite in other -** programs, you need this file and the "sqlite3.h" header file that defines -** the programming interface to the SQLite library. (If you do not have -** the "sqlite3.h" header file at hand, you will find a copy embedded within -** the text of this file. Search for "Begin file sqlite3.h" to find the start -** of the embedded sqlite3.h header file.) Additional code files may be needed -** if you want a wrapper to interface SQLite with your choice of programming -** language. The code for the "sqlite3" command-line shell is also in a -** separate file. This file contains only code for the core SQLite library. -*/ -#define SQLITE_CORE 1 -#define SQLITE_AMALGAMATION 1 -#ifndef SQLITE_PRIVATE -# define SQLITE_PRIVATE static -#endif -#ifndef SQLITE_API -# define SQLITE_API -#endif -/************** Begin file sqliteInt.h ***************************************/ -/* -** 2001 September 15 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** Internal interface definitions for SQLite. -** -*/ -#ifndef _SQLITEINT_H_ -#define _SQLITEINT_H_ - -/* -** These #defines should enable >2GB file support on POSIX if the -** underlying operating system supports it. If the OS lacks -** large file support, or if the OS is windows, these should be no-ops. -** -** Ticket #2739: The _LARGEFILE_SOURCE macro must appear before any -** system #includes. Hence, this block of code must be the very first -** code in all source files. -** -** Large file support can be disabled using the -DSQLITE_DISABLE_LFS switch -** on the compiler command line. This is necessary if you are compiling -** on a recent machine (ex: Red Hat 7.2) but you want your code to work -** on an older machine (ex: Red Hat 6.0). If you compile on Red Hat 7.2 -** without this option, LFS is enable. But LFS does not exist in the kernel -** in Red Hat 6.0, so the code won't work. Hence, for maximum binary -** portability you should omit LFS. -** -** Similar is true for Mac OS X. LFS is only supported on Mac OS X 9 and later. -*/ -#ifndef SQLITE_DISABLE_LFS -# define _LARGE_FILE 1 -# ifndef _FILE_OFFSET_BITS -# define _FILE_OFFSET_BITS 64 -# endif -# define _LARGEFILE_SOURCE 1 -#endif - -/* -** Include the configuration header output by 'configure' if we're using the -** autoconf-based build -*/ -#ifdef _HAVE_SQLITE_CONFIG_H -#include "config.h" -#endif - -/************** Include sqliteLimit.h in the middle of sqliteInt.h ***********/ -/************** Begin file sqliteLimit.h *************************************/ -/* -** 2007 May 7 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** This file defines various limits of what SQLite can process. -*/ - -/* -** The maximum length of a TEXT or BLOB in bytes. This also -** limits the size of a row in a table or index. -** -** The hard limit is the ability of a 32-bit signed integer -** to count the size: 2^31-1 or 2147483647. -*/ -#ifndef SQLITE_MAX_LENGTH -# define SQLITE_MAX_LENGTH 1000000000 -#endif - -/* -** This is the maximum number of -** -** * Columns in a table -** * Columns in an index -** * Columns in a view -** * Terms in the SET clause of an UPDATE statement -** * Terms in the result set of a SELECT statement -** * Terms in the GROUP BY or ORDER BY clauses of a SELECT statement. -** * Terms in the VALUES clause of an INSERT statement -** -** The hard upper limit here is 32676. Most database people will -** tell you that in a well-normalized database, you usually should -** not have more than a dozen or so columns in any table. And if -** that is the case, there is no point in having more than a few -** dozen values in any of the other situations described above. -*/ -#ifndef SQLITE_MAX_COLUMN -# define SQLITE_MAX_COLUMN 2000 -#endif - -/* -** The maximum length of a single SQL statement in bytes. -** -** It used to be the case that setting this value to zero would -** turn the limit off. That is no longer true. It is not possible -** to turn this limit off. -*/ -#ifndef SQLITE_MAX_SQL_LENGTH -# define SQLITE_MAX_SQL_LENGTH 1000000000 -#endif - -/* -** The maximum depth of an expression tree. This is limited to -** some extent by SQLITE_MAX_SQL_LENGTH. But sometime you might -** want to place more severe limits on the complexity of an -** expression. -** -** A value of 0 used to mean that the limit was not enforced. -** But that is no longer true. The limit is now strictly enforced -** at all times. -*/ -#ifndef SQLITE_MAX_EXPR_DEPTH -# define SQLITE_MAX_EXPR_DEPTH 1000 -#endif - -/* -** The maximum number of terms in a compound SELECT statement. -** The code generator for compound SELECT statements does one -** level of recursion for each term. A stack overflow can result -** if the number of terms is too large. In practice, most SQL -** never has more than 3 or 4 terms. Use a value of 0 to disable -** any limit on the number of terms in a compount SELECT. -*/ -#ifndef SQLITE_MAX_COMPOUND_SELECT -# define SQLITE_MAX_COMPOUND_SELECT 500 -#endif - -/* -** The maximum number of opcodes in a VDBE program. -** Not currently enforced. -*/ -#ifndef SQLITE_MAX_VDBE_OP -# define SQLITE_MAX_VDBE_OP 25000 -#endif - -/* -** The maximum number of arguments to an SQL function. -*/ -#ifndef SQLITE_MAX_FUNCTION_ARG -# define SQLITE_MAX_FUNCTION_ARG 127 -#endif - -/* -** The maximum number of in-memory pages to use for the main database -** table and for temporary tables. The SQLITE_DEFAULT_CACHE_SIZE -*/ -#ifndef SQLITE_DEFAULT_CACHE_SIZE -# define SQLITE_DEFAULT_CACHE_SIZE 2000 -#endif -#ifndef SQLITE_DEFAULT_TEMP_CACHE_SIZE -# define SQLITE_DEFAULT_TEMP_CACHE_SIZE 500 -#endif - -/* -** The default number of frames to accumulate in the log file before -** checkpointing the database in WAL mode. -*/ -#ifndef SQLITE_DEFAULT_WAL_AUTOCHECKPOINT -# define SQLITE_DEFAULT_WAL_AUTOCHECKPOINT 1000 -#endif - -/* -** The maximum number of attached databases. This must be between 0 -** and 62. The upper bound on 62 is because a 64-bit integer bitmap -** is used internally to track attached databases. -*/ -#ifndef SQLITE_MAX_ATTACHED -# define SQLITE_MAX_ATTACHED 10 -#endif - - -/* -** The maximum value of a ?nnn wildcard that the parser will accept. -*/ -#ifndef SQLITE_MAX_VARIABLE_NUMBER -# define SQLITE_MAX_VARIABLE_NUMBER 999 -#endif - -/* Maximum page size. The upper bound on this value is 65536. This a limit -** imposed by the use of 16-bit offsets within each page. -** -** Earlier versions of SQLite allowed the user to change this value at -** compile time. This is no longer permitted, on the grounds that it creates -** a library that is technically incompatible with an SQLite library -** compiled with a different limit. If a process operating on a database -** with a page-size of 65536 bytes crashes, then an instance of SQLite -** compiled with the default page-size limit will not be able to rollback -** the aborted transaction. This could lead to database corruption. -*/ -#ifdef SQLITE_MAX_PAGE_SIZE -# undef SQLITE_MAX_PAGE_SIZE -#endif -#define SQLITE_MAX_PAGE_SIZE 65536 - - -/* -** The default size of a database page. -*/ -#ifndef SQLITE_DEFAULT_PAGE_SIZE -# define SQLITE_DEFAULT_PAGE_SIZE 1024 -#endif -#if SQLITE_DEFAULT_PAGE_SIZE>SQLITE_MAX_PAGE_SIZE -# undef SQLITE_DEFAULT_PAGE_SIZE -# define SQLITE_DEFAULT_PAGE_SIZE SQLITE_MAX_PAGE_SIZE -#endif - -/* -** Ordinarily, if no value is explicitly provided, SQLite creates databases -** with page size SQLITE_DEFAULT_PAGE_SIZE. However, based on certain -** device characteristics (sector-size and atomic write() support), -** SQLite may choose a larger value. This constant is the maximum value -** SQLite will choose on its own. -*/ -#ifndef SQLITE_MAX_DEFAULT_PAGE_SIZE -# define SQLITE_MAX_DEFAULT_PAGE_SIZE 8192 -#endif -#if SQLITE_MAX_DEFAULT_PAGE_SIZE>SQLITE_MAX_PAGE_SIZE -# undef SQLITE_MAX_DEFAULT_PAGE_SIZE -# define SQLITE_MAX_DEFAULT_PAGE_SIZE SQLITE_MAX_PAGE_SIZE -#endif - - -/* -** Maximum number of pages in one database file. -** -** This is really just the default value for the max_page_count pragma. -** This value can be lowered (or raised) at run-time using that the -** max_page_count macro. -*/ -#ifndef SQLITE_MAX_PAGE_COUNT -# define SQLITE_MAX_PAGE_COUNT 1073741823 -#endif - -/* -** Maximum length (in bytes) of the pattern in a LIKE or GLOB -** operator. -*/ -#ifndef SQLITE_MAX_LIKE_PATTERN_LENGTH -# define SQLITE_MAX_LIKE_PATTERN_LENGTH 50000 -#endif - -/* -** Maximum depth of recursion for triggers. -** -** A value of 1 means that a trigger program will not be able to itself -** fire any triggers. A value of 0 means that no trigger programs at all -** may be executed. -*/ -#ifndef SQLITE_MAX_TRIGGER_DEPTH -# define SQLITE_MAX_TRIGGER_DEPTH 1000 -#endif - -/************** End of sqliteLimit.h *****************************************/ -/************** Continuing where we left off in sqliteInt.h ******************/ - -/* Disable nuisance warnings on Borland compilers */ -#if defined(__BORLANDC__) -#pragma warn -rch /* unreachable code */ -#pragma warn -ccc /* Condition is always true or false */ -#pragma warn -aus /* Assigned value is never used */ -#pragma warn -csu /* Comparing signed and unsigned */ -#pragma warn -spa /* Suspicious pointer arithmetic */ -#endif - -/* Needed for various definitions... */ -#ifndef _GNU_SOURCE -# define _GNU_SOURCE -#endif - -/* -** Include standard header files as necessary -*/ -#ifdef HAVE_STDINT_H -#include -#endif -#ifdef HAVE_INTTYPES_H -#include -#endif - -/* -** The following macros are used to cast pointers to integers and -** integers to pointers. The way you do this varies from one compiler -** to the next, so we have developed the following set of #if statements -** to generate appropriate macros for a wide range of compilers. -** -** The correct "ANSI" way to do this is to use the intptr_t type. -** Unfortunately, that typedef is not available on all compilers, or -** if it is available, it requires an #include of specific headers -** that vary from one machine to the next. -** -** Ticket #3860: The llvm-gcc-4.2 compiler from Apple chokes on -** the ((void*)&((char*)0)[X]) construct. But MSVC chokes on ((void*)(X)). -** So we have to define the macros in different ways depending on the -** compiler. -*/ -#if defined(__PTRDIFF_TYPE__) /* This case should work for GCC */ -# define SQLITE_INT_TO_PTR(X) ((void*)(__PTRDIFF_TYPE__)(X)) -# define SQLITE_PTR_TO_INT(X) ((int)(__PTRDIFF_TYPE__)(X)) -#elif !defined(__GNUC__) /* Works for compilers other than LLVM */ -# define SQLITE_INT_TO_PTR(X) ((void*)&((char*)0)[X]) -# define SQLITE_PTR_TO_INT(X) ((int)(((char*)X)-(char*)0)) -#elif defined(HAVE_STDINT_H) /* Use this case if we have ANSI headers */ -# define SQLITE_INT_TO_PTR(X) ((void*)(intptr_t)(X)) -# define SQLITE_PTR_TO_INT(X) ((int)(intptr_t)(X)) -#else /* Generates a warning - but it always works */ -# define SQLITE_INT_TO_PTR(X) ((void*)(X)) -# define SQLITE_PTR_TO_INT(X) ((int)(X)) -#endif - -/* -** The SQLITE_THREADSAFE macro must be defined as 0, 1, or 2. -** 0 means mutexes are permanently disable and the library is never -** threadsafe. 1 means the library is serialized which is the highest -** level of threadsafety. 2 means the libary is multithreaded - multiple -** threads can use SQLite as long as no two threads try to use the same -** database connection at the same time. -** -** Older versions of SQLite used an optional THREADSAFE macro. -** We support that for legacy. -*/ -#if !defined(SQLITE_THREADSAFE) -#if defined(THREADSAFE) -# define SQLITE_THREADSAFE THREADSAFE -#else -# define SQLITE_THREADSAFE 1 /* IMP: R-07272-22309 */ -#endif -#endif - -/* -** Powersafe overwrite is on by default. But can be turned off using -** the -DSQLITE_POWERSAFE_OVERWRITE=0 command-line option. -*/ -#ifndef SQLITE_POWERSAFE_OVERWRITE -# define SQLITE_POWERSAFE_OVERWRITE 1 -#endif - -/* -** The SQLITE_DEFAULT_MEMSTATUS macro must be defined as either 0 or 1. -** It determines whether or not the features related to -** SQLITE_CONFIG_MEMSTATUS are available by default or not. This value can -** be overridden at runtime using the sqlite3_config() API. -*/ -#if !defined(SQLITE_DEFAULT_MEMSTATUS) -# define SQLITE_DEFAULT_MEMSTATUS 1 -#endif - -/* -** Exactly one of the following macros must be defined in order to -** specify which memory allocation subsystem to use. -** -** SQLITE_SYSTEM_MALLOC // Use normal system malloc() -** SQLITE_WIN32_MALLOC // Use Win32 native heap API -** SQLITE_MEMDEBUG // Debugging version of system malloc() -** -** On Windows, if the SQLITE_WIN32_MALLOC_VALIDATE macro is defined and the -** assert() macro is enabled, each call into the Win32 native heap subsystem -** will cause HeapValidate to be called. If heap validation should fail, an -** assertion will be triggered. -** -** (Historical note: There used to be several other options, but we've -** pared it down to just these three.) -** -** If none of the above are defined, then set SQLITE_SYSTEM_MALLOC as -** the default. -*/ -#if defined(SQLITE_SYSTEM_MALLOC)+defined(SQLITE_WIN32_MALLOC)+defined(SQLITE_MEMDEBUG)>1 -# error "At most one of the following compile-time configuration options\ - is allows: SQLITE_SYSTEM_MALLOC, SQLITE_WIN32_MALLOC, SQLITE_MEMDEBUG" -#endif -#if defined(SQLITE_SYSTEM_MALLOC)+defined(SQLITE_WIN32_MALLOC)+defined(SQLITE_MEMDEBUG)==0 -# define SQLITE_SYSTEM_MALLOC 1 -#endif - -/* -** If SQLITE_MALLOC_SOFT_LIMIT is not zero, then try to keep the -** sizes of memory allocations below this value where possible. -*/ -#if !defined(SQLITE_MALLOC_SOFT_LIMIT) -# define SQLITE_MALLOC_SOFT_LIMIT 1024 -#endif - -/* -** We need to define _XOPEN_SOURCE as follows in order to enable -** recursive mutexes on most Unix systems. But Mac OS X is different. -** The _XOPEN_SOURCE define causes problems for Mac OS X we are told, -** so it is omitted there. See ticket #2673. -** -** Later we learn that _XOPEN_SOURCE is poorly or incorrectly -** implemented on some systems. So we avoid defining it at all -** if it is already defined or if it is unneeded because we are -** not doing a threadsafe build. Ticket #2681. -** -** See also ticket #2741. -*/ -#if !defined(_XOPEN_SOURCE) && !defined(__DARWIN__) && !defined(__APPLE__) && SQLITE_THREADSAFE -# define _XOPEN_SOURCE 500 /* Needed to enable pthread recursive mutexes */ -#endif - -/* -** The TCL headers are only needed when compiling the TCL bindings. -*/ -#if defined(SQLITE_TCL) || defined(TCLSH) -# include -#endif - -/* -** NDEBUG and SQLITE_DEBUG are opposites. It should always be true that -** defined(NDEBUG)==!defined(SQLITE_DEBUG). If this is not currently true, -** make it true by defining or undefining NDEBUG. -** -** Setting NDEBUG makes the code smaller and run faster by disabling the -** number assert() statements in the code. So we want the default action -** to be for NDEBUG to be set and NDEBUG to be undefined only if SQLITE_DEBUG -** is set. Thus NDEBUG becomes an opt-in rather than an opt-out -** feature. -*/ -#if !defined(NDEBUG) && !defined(SQLITE_DEBUG) -# define NDEBUG 1 -#endif -#if defined(NDEBUG) && defined(SQLITE_DEBUG) -# undef NDEBUG -#endif - -/* -** The testcase() macro is used to aid in coverage testing. When -** doing coverage testing, the condition inside the argument to -** testcase() must be evaluated both true and false in order to -** get full branch coverage. The testcase() macro is inserted -** to help ensure adequate test coverage in places where simple -** condition/decision coverage is inadequate. For example, testcase() -** can be used to make sure boundary values are tested. For -** bitmask tests, testcase() can be used to make sure each bit -** is significant and used at least once. On switch statements -** where multiple cases go to the same block of code, testcase() -** can insure that all cases are evaluated. -** -*/ -#ifdef SQLITE_COVERAGE_TEST -SQLITE_PRIVATE void sqlite3Coverage(int); -# define testcase(X) if( X ){ sqlite3Coverage(__LINE__); } -#else -# define testcase(X) -#endif - -/* -** The TESTONLY macro is used to enclose variable declarations or -** other bits of code that are needed to support the arguments -** within testcase() and assert() macros. -*/ -#if !defined(NDEBUG) || defined(SQLITE_COVERAGE_TEST) -# define TESTONLY(X) X -#else -# define TESTONLY(X) -#endif - -/* -** Sometimes we need a small amount of code such as a variable initialization -** to setup for a later assert() statement. We do not want this code to -** appear when assert() is disabled. The following macro is therefore -** used to contain that setup code. The "VVA" acronym stands for -** "Verification, Validation, and Accreditation". In other words, the -** code within VVA_ONLY() will only run during verification processes. -*/ -#ifndef NDEBUG -# define VVA_ONLY(X) X -#else -# define VVA_ONLY(X) -#endif - -/* -** The ALWAYS and NEVER macros surround boolean expressions which -** are intended to always be true or false, respectively. Such -** expressions could be omitted from the code completely. But they -** are included in a few cases in order to enhance the resilience -** of SQLite to unexpected behavior - to make the code "self-healing" -** or "ductile" rather than being "brittle" and crashing at the first -** hint of unplanned behavior. -** -** In other words, ALWAYS and NEVER are added for defensive code. -** -** When doing coverage testing ALWAYS and NEVER are hard-coded to -** be true and false so that the unreachable code then specify will -** not be counted as untested code. -*/ -#if defined(SQLITE_COVERAGE_TEST) -# define ALWAYS(X) (1) -# define NEVER(X) (0) -#elif !defined(NDEBUG) -# define ALWAYS(X) ((X)?1:(assert(0),0)) -# define NEVER(X) ((X)?(assert(0),1):0) -#else -# define ALWAYS(X) (X) -# define NEVER(X) (X) -#endif - -/* -** Return true (non-zero) if the input is a integer that is too large -** to fit in 32-bits. This macro is used inside of various testcase() -** macros to verify that we have tested SQLite for large-file support. -*/ -#define IS_BIG_INT(X) (((X)&~(i64)0xffffffff)!=0) - -/* -** The macro unlikely() is a hint that surrounds a boolean -** expression that is usually false. Macro likely() surrounds -** a boolean expression that is usually true. GCC is able to -** use these hints to generate better code, sometimes. -*/ -#if defined(__GNUC__) && 0 -# define likely(X) __builtin_expect((X),1) -# define unlikely(X) __builtin_expect((X),0) -#else -# define likely(X) !!(X) -# define unlikely(X) !!(X) -#endif - -/************** Include sqlite3.h in the middle of sqliteInt.h ***************/ -/************** Begin file sqlite3.h *****************************************/ -/* -** 2001 September 15 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This header file defines the interface that the SQLite library -** presents to client programs. If a C-function, structure, datatype, -** or constant definition does not appear in this file, then it is -** not a published API of SQLite, is subject to change without -** notice, and should not be referenced by programs that use SQLite. -** -** Some of the definitions that are in this file are marked as -** "experimental". Experimental interfaces are normally new -** features recently added to SQLite. We do not anticipate changes -** to experimental interfaces but reserve the right to make minor changes -** if experience from use "in the wild" suggest such changes are prudent. -** -** The official C-language API documentation for SQLite is derived -** from comments in this file. This file is the authoritative source -** on how SQLite interfaces are suppose to operate. -** -** The name of this file under configuration management is "sqlite.h.in". -** The makefile makes some minor changes to this file (such as inserting -** the version number) and changes its name to "sqlite3.h" as -** part of the build process. -*/ -#ifndef _SQLITE3_H_ -#define _SQLITE3_H_ -#include /* Needed for the definition of va_list */ - -/* -** Make sure we can call this stuff from C++. -*/ -#if 0 -extern "C" { -#endif - - -/* -** Add the ability to override 'extern' -*/ -#ifndef SQLITE_EXTERN -# define SQLITE_EXTERN extern -#endif - -#ifndef SQLITE_API -# define SQLITE_API -#endif - - -/* -** These no-op macros are used in front of interfaces to mark those -** interfaces as either deprecated or experimental. New applications -** should not use deprecated interfaces - they are support for backwards -** compatibility only. Application writers should be aware that -** experimental interfaces are subject to change in point releases. -** -** These macros used to resolve to various kinds of compiler magic that -** would generate warning messages when they were used. But that -** compiler magic ended up generating such a flurry of bug reports -** that we have taken it all out and gone back to using simple -** noop macros. -*/ -#define SQLITE_DEPRECATED -#define SQLITE_EXPERIMENTAL - -/* -** Ensure these symbols were not defined by some previous header file. -*/ -#ifdef SQLITE_VERSION -# undef SQLITE_VERSION -#endif -#ifdef SQLITE_VERSION_NUMBER -# undef SQLITE_VERSION_NUMBER -#endif - -/* -** CAPI3REF: Compile-Time Library Version Numbers -** -** ^(The [SQLITE_VERSION] C preprocessor macro in the sqlite3.h header -** evaluates to a string literal that is the SQLite version in the -** format "X.Y.Z" where X is the major version number (always 3 for -** SQLite3) and Y is the minor version number and Z is the release number.)^ -** ^(The [SQLITE_VERSION_NUMBER] C preprocessor macro resolves to an integer -** with the value (X*1000000 + Y*1000 + Z) where X, Y, and Z are the same -** numbers used in [SQLITE_VERSION].)^ -** The SQLITE_VERSION_NUMBER for any given release of SQLite will also -** be larger than the release from which it is derived. Either Y will -** be held constant and Z will be incremented or else Y will be incremented -** and Z will be reset to zero. -** -** Since version 3.6.18, SQLite source code has been stored in the -** Fossil configuration management -** system. ^The SQLITE_SOURCE_ID macro evaluates to -** a string which identifies a particular check-in of SQLite -** within its configuration management system. ^The SQLITE_SOURCE_ID -** string contains the date and time of the check-in (UTC) and an SHA1 -** hash of the entire source tree. -** -** See also: [sqlite3_libversion()], -** [sqlite3_libversion_number()], [sqlite3_sourceid()], -** [sqlite_version()] and [sqlite_source_id()]. -*/ -#define SQLITE_VERSION "3.7.13" -#define SQLITE_VERSION_NUMBER 3007013 -#define SQLITE_SOURCE_ID "2012-06-11 02:05:22 f5b5a13f7394dc143aa136f1d4faba6839eaa6dc" - -/* -** CAPI3REF: Run-Time Library Version Numbers -** KEYWORDS: sqlite3_version, sqlite3_sourceid -** -** These interfaces provide the same information as the [SQLITE_VERSION], -** [SQLITE_VERSION_NUMBER], and [SQLITE_SOURCE_ID] C preprocessor macros -** but are associated with the library instead of the header file. ^(Cautious -** programmers might include assert() statements in their application to -** verify that values returned by these interfaces match the macros in -** the header, and thus insure that the application is -** compiled with matching library and header files. -** -**
-** assert( sqlite3_libversion_number()==SQLITE_VERSION_NUMBER );
-** assert( strcmp(sqlite3_sourceid(),SQLITE_SOURCE_ID)==0 );
-** assert( strcmp(sqlite3_libversion(),SQLITE_VERSION)==0 );
-** 
)^ -** -** ^The sqlite3_version[] string constant contains the text of [SQLITE_VERSION] -** macro. ^The sqlite3_libversion() function returns a pointer to the -** to the sqlite3_version[] string constant. The sqlite3_libversion() -** function is provided for use in DLLs since DLL users usually do not have -** direct access to string constants within the DLL. ^The -** sqlite3_libversion_number() function returns an integer equal to -** [SQLITE_VERSION_NUMBER]. ^The sqlite3_sourceid() function returns -** a pointer to a string constant whose value is the same as the -** [SQLITE_SOURCE_ID] C preprocessor macro. -** -** See also: [sqlite_version()] and [sqlite_source_id()]. -*/ -SQLITE_API const char sqlite3_version[] = SQLITE_VERSION; -SQLITE_API const char *sqlite3_libversion(void); -SQLITE_API const char *sqlite3_sourceid(void); -SQLITE_API int sqlite3_libversion_number(void); - -/* -** CAPI3REF: Run-Time Library Compilation Options Diagnostics -** -** ^The sqlite3_compileoption_used() function returns 0 or 1 -** indicating whether the specified option was defined at -** compile time. ^The SQLITE_ prefix may be omitted from the -** option name passed to sqlite3_compileoption_used(). -** -** ^The sqlite3_compileoption_get() function allows iterating -** over the list of options that were defined at compile time by -** returning the N-th compile time option string. ^If N is out of range, -** sqlite3_compileoption_get() returns a NULL pointer. ^The SQLITE_ -** prefix is omitted from any strings returned by -** sqlite3_compileoption_get(). -** -** ^Support for the diagnostic functions sqlite3_compileoption_used() -** and sqlite3_compileoption_get() may be omitted by specifying the -** [SQLITE_OMIT_COMPILEOPTION_DIAGS] option at compile time. -** -** See also: SQL functions [sqlite_compileoption_used()] and -** [sqlite_compileoption_get()] and the [compile_options pragma]. -*/ -#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS -SQLITE_API int sqlite3_compileoption_used(const char *zOptName); -SQLITE_API const char *sqlite3_compileoption_get(int N); -#endif - -/* -** CAPI3REF: Test To See If The Library Is Threadsafe -** -** ^The sqlite3_threadsafe() function returns zero if and only if -** SQLite was compiled with mutexing code omitted due to the -** [SQLITE_THREADSAFE] compile-time option being set to 0. -** -** SQLite can be compiled with or without mutexes. When -** the [SQLITE_THREADSAFE] C preprocessor macro is 1 or 2, mutexes -** are enabled and SQLite is threadsafe. When the -** [SQLITE_THREADSAFE] macro is 0, -** the mutexes are omitted. Without the mutexes, it is not safe -** to use SQLite concurrently from more than one thread. -** -** Enabling mutexes incurs a measurable performance penalty. -** So if speed is of utmost importance, it makes sense to disable -** the mutexes. But for maximum safety, mutexes should be enabled. -** ^The default behavior is for mutexes to be enabled. -** -** This interface can be used by an application to make sure that the -** version of SQLite that it is linking against was compiled with -** the desired setting of the [SQLITE_THREADSAFE] macro. -** -** This interface only reports on the compile-time mutex setting -** of the [SQLITE_THREADSAFE] flag. If SQLite is compiled with -** SQLITE_THREADSAFE=1 or =2 then mutexes are enabled by default but -** can be fully or partially disabled using a call to [sqlite3_config()] -** with the verbs [SQLITE_CONFIG_SINGLETHREAD], [SQLITE_CONFIG_MULTITHREAD], -** or [SQLITE_CONFIG_MUTEX]. ^(The return value of the -** sqlite3_threadsafe() function shows only the compile-time setting of -** thread safety, not any run-time changes to that setting made by -** sqlite3_config(). In other words, the return value from sqlite3_threadsafe() -** is unchanged by calls to sqlite3_config().)^ -** -** See the [threading mode] documentation for additional information. -*/ -SQLITE_API int sqlite3_threadsafe(void); - -/* -** CAPI3REF: Database Connection Handle -** KEYWORDS: {database connection} {database connections} -** -** Each open SQLite database is represented by a pointer to an instance of -** the opaque structure named "sqlite3". It is useful to think of an sqlite3 -** pointer as an object. The [sqlite3_open()], [sqlite3_open16()], and -** [sqlite3_open_v2()] interfaces are its constructors, and [sqlite3_close()] -** is its destructor. There are many other interfaces (such as -** [sqlite3_prepare_v2()], [sqlite3_create_function()], and -** [sqlite3_busy_timeout()] to name but three) that are methods on an -** sqlite3 object. -*/ -typedef struct sqlite3 sqlite3; - -/* -** CAPI3REF: 64-Bit Integer Types -** KEYWORDS: sqlite_int64 sqlite_uint64 -** -** Because there is no cross-platform way to specify 64-bit integer types -** SQLite includes typedefs for 64-bit signed and unsigned integers. -** -** The sqlite3_int64 and sqlite3_uint64 are the preferred type definitions. -** The sqlite_int64 and sqlite_uint64 types are supported for backwards -** compatibility only. -** -** ^The sqlite3_int64 and sqlite_int64 types can store integer values -** between -9223372036854775808 and +9223372036854775807 inclusive. ^The -** sqlite3_uint64 and sqlite_uint64 types can store integer values -** between 0 and +18446744073709551615 inclusive. -*/ -#ifdef SQLITE_INT64_TYPE - typedef SQLITE_INT64_TYPE sqlite_int64; - typedef unsigned SQLITE_INT64_TYPE sqlite_uint64; -#elif defined(_MSC_VER) || defined(__BORLANDC__) - typedef __int64 sqlite_int64; - typedef unsigned __int64 sqlite_uint64; -#else - typedef long long int sqlite_int64; - typedef unsigned long long int sqlite_uint64; -#endif -typedef sqlite_int64 sqlite3_int64; -typedef sqlite_uint64 sqlite3_uint64; - -/* -** If compiling for a processor that lacks floating point support, -** substitute integer for floating-point. -*/ -#ifdef SQLITE_OMIT_FLOATING_POINT -# define double sqlite3_int64 -#endif - -/* -** CAPI3REF: Closing A Database Connection -** -** ^The sqlite3_close() routine is the destructor for the [sqlite3] object. -** ^Calls to sqlite3_close() return SQLITE_OK if the [sqlite3] object is -** successfully destroyed and all associated resources are deallocated. -** -** Applications must [sqlite3_finalize | finalize] all [prepared statements] -** and [sqlite3_blob_close | close] all [BLOB handles] associated with -** the [sqlite3] object prior to attempting to close the object. ^If -** sqlite3_close() is called on a [database connection] that still has -** outstanding [prepared statements] or [BLOB handles], then it returns -** SQLITE_BUSY. -** -** ^If [sqlite3_close()] is invoked while a transaction is open, -** the transaction is automatically rolled back. -** -** The C parameter to [sqlite3_close(C)] must be either a NULL -** pointer or an [sqlite3] object pointer obtained -** from [sqlite3_open()], [sqlite3_open16()], or -** [sqlite3_open_v2()], and not previously closed. -** ^Calling sqlite3_close() with a NULL pointer argument is a -** harmless no-op. -*/ -SQLITE_API int sqlite3_close(sqlite3 *); - -/* -** The type for a callback function. -** This is legacy and deprecated. It is included for historical -** compatibility and is not documented. -*/ -typedef int (*sqlite3_callback)(void*,int,char**, char**); - -/* -** CAPI3REF: One-Step Query Execution Interface -** -** The sqlite3_exec() interface is a convenience wrapper around -** [sqlite3_prepare_v2()], [sqlite3_step()], and [sqlite3_finalize()], -** that allows an application to run multiple statements of SQL -** without having to use a lot of C code. -** -** ^The sqlite3_exec() interface runs zero or more UTF-8 encoded, -** semicolon-separate SQL statements passed into its 2nd argument, -** in the context of the [database connection] passed in as its 1st -** argument. ^If the callback function of the 3rd argument to -** sqlite3_exec() is not NULL, then it is invoked for each result row -** coming out of the evaluated SQL statements. ^The 4th argument to -** sqlite3_exec() is relayed through to the 1st argument of each -** callback invocation. ^If the callback pointer to sqlite3_exec() -** is NULL, then no callback is ever invoked and result rows are -** ignored. -** -** ^If an error occurs while evaluating the SQL statements passed into -** sqlite3_exec(), then execution of the current statement stops and -** subsequent statements are skipped. ^If the 5th parameter to sqlite3_exec() -** is not NULL then any error message is written into memory obtained -** from [sqlite3_malloc()] and passed back through the 5th parameter. -** To avoid memory leaks, the application should invoke [sqlite3_free()] -** on error message strings returned through the 5th parameter of -** of sqlite3_exec() after the error message string is no longer needed. -** ^If the 5th parameter to sqlite3_exec() is not NULL and no errors -** occur, then sqlite3_exec() sets the pointer in its 5th parameter to -** NULL before returning. -** -** ^If an sqlite3_exec() callback returns non-zero, the sqlite3_exec() -** routine returns SQLITE_ABORT without invoking the callback again and -** without running any subsequent SQL statements. -** -** ^The 2nd argument to the sqlite3_exec() callback function is the -** number of columns in the result. ^The 3rd argument to the sqlite3_exec() -** callback is an array of pointers to strings obtained as if from -** [sqlite3_column_text()], one for each column. ^If an element of a -** result row is NULL then the corresponding string pointer for the -** sqlite3_exec() callback is a NULL pointer. ^The 4th argument to the -** sqlite3_exec() callback is an array of pointers to strings where each -** entry represents the name of corresponding result column as obtained -** from [sqlite3_column_name()]. -** -** ^If the 2nd parameter to sqlite3_exec() is a NULL pointer, a pointer -** to an empty string, or a pointer that contains only whitespace and/or -** SQL comments, then no SQL statements are evaluated and the database -** is not changed. -** -** Restrictions: -** -**
    -**
  • The application must insure that the 1st parameter to sqlite3_exec() -** is a valid and open [database connection]. -**
  • The application must not close [database connection] specified by -** the 1st parameter to sqlite3_exec() while sqlite3_exec() is running. -**
  • The application must not modify the SQL statement text passed into -** the 2nd parameter of sqlite3_exec() while sqlite3_exec() is running. -**
-*/ -SQLITE_API int sqlite3_exec( - sqlite3*, /* An open database */ - const char *sql, /* SQL to be evaluated */ - int (*callback)(void*,int,char**,char**), /* Callback function */ - void *, /* 1st argument to callback */ - char **errmsg /* Error msg written here */ -); - -/* -** CAPI3REF: Result Codes -** KEYWORDS: SQLITE_OK {error code} {error codes} -** KEYWORDS: {result code} {result codes} -** -** Many SQLite functions return an integer result code from the set shown -** here in order to indicate success or failure. -** -** New error codes may be added in future versions of SQLite. -** -** See also: [SQLITE_IOERR_READ | extended result codes], -** [sqlite3_vtab_on_conflict()] [SQLITE_ROLLBACK | result codes]. -*/ -#define SQLITE_OK 0 /* Successful result */ -/* beginning-of-error-codes */ -#define SQLITE_ERROR 1 /* SQL error or missing database */ -#define SQLITE_INTERNAL 2 /* Internal logic error in SQLite */ -#define SQLITE_PERM 3 /* Access permission denied */ -#define SQLITE_ABORT 4 /* Callback routine requested an abort */ -#define SQLITE_BUSY 5 /* The database file is locked */ -#define SQLITE_LOCKED 6 /* A table in the database is locked */ -#define SQLITE_NOMEM 7 /* A malloc() failed */ -#define SQLITE_READONLY 8 /* Attempt to write a readonly database */ -#define SQLITE_INTERRUPT 9 /* Operation terminated by sqlite3_interrupt()*/ -#define SQLITE_IOERR 10 /* Some kind of disk I/O error occurred */ -#define SQLITE_CORRUPT 11 /* The database disk image is malformed */ -#define SQLITE_NOTFOUND 12 /* Unknown opcode in sqlite3_file_control() */ -#define SQLITE_FULL 13 /* Insertion failed because database is full */ -#define SQLITE_CANTOPEN 14 /* Unable to open the database file */ -#define SQLITE_PROTOCOL 15 /* Database lock protocol error */ -#define SQLITE_EMPTY 16 /* Database is empty */ -#define SQLITE_SCHEMA 17 /* The database schema changed */ -#define SQLITE_TOOBIG 18 /* String or BLOB exceeds size limit */ -#define SQLITE_CONSTRAINT 19 /* Abort due to constraint violation */ -#define SQLITE_MISMATCH 20 /* Data type mismatch */ -#define SQLITE_MISUSE 21 /* Library used incorrectly */ -#define SQLITE_NOLFS 22 /* Uses OS features not supported on host */ -#define SQLITE_AUTH 23 /* Authorization denied */ -#define SQLITE_FORMAT 24 /* Auxiliary database format error */ -#define SQLITE_RANGE 25 /* 2nd parameter to sqlite3_bind out of range */ -#define SQLITE_NOTADB 26 /* File opened that is not a database file */ -#define SQLITE_ROW 100 /* sqlite3_step() has another row ready */ -#define SQLITE_DONE 101 /* sqlite3_step() has finished executing */ -/* end-of-error-codes */ - -/* -** CAPI3REF: Extended Result Codes -** KEYWORDS: {extended error code} {extended error codes} -** KEYWORDS: {extended result code} {extended result codes} -** -** In its default configuration, SQLite API routines return one of 26 integer -** [SQLITE_OK | result codes]. However, experience has shown that many of -** these result codes are too coarse-grained. They do not provide as -** much information about problems as programmers might like. In an effort to -** address this, newer versions of SQLite (version 3.3.8 and later) include -** support for additional result codes that provide more detailed information -** about errors. The extended result codes are enabled or disabled -** on a per database connection basis using the -** [sqlite3_extended_result_codes()] API. -** -** Some of the available extended result codes are listed here. -** One may expect the number of extended result codes will be expand -** over time. Software that uses extended result codes should expect -** to see new result codes in future releases of SQLite. -** -** The SQLITE_OK result code will never be extended. It will always -** be exactly zero. -*/ -#define SQLITE_IOERR_READ (SQLITE_IOERR | (1<<8)) -#define SQLITE_IOERR_SHORT_READ (SQLITE_IOERR | (2<<8)) -#define SQLITE_IOERR_WRITE (SQLITE_IOERR | (3<<8)) -#define SQLITE_IOERR_FSYNC (SQLITE_IOERR | (4<<8)) -#define SQLITE_IOERR_DIR_FSYNC (SQLITE_IOERR | (5<<8)) -#define SQLITE_IOERR_TRUNCATE (SQLITE_IOERR | (6<<8)) -#define SQLITE_IOERR_FSTAT (SQLITE_IOERR | (7<<8)) -#define SQLITE_IOERR_UNLOCK (SQLITE_IOERR | (8<<8)) -#define SQLITE_IOERR_RDLOCK (SQLITE_IOERR | (9<<8)) -#define SQLITE_IOERR_DELETE (SQLITE_IOERR | (10<<8)) -#define SQLITE_IOERR_BLOCKED (SQLITE_IOERR | (11<<8)) -#define SQLITE_IOERR_NOMEM (SQLITE_IOERR | (12<<8)) -#define SQLITE_IOERR_ACCESS (SQLITE_IOERR | (13<<8)) -#define SQLITE_IOERR_CHECKRESERVEDLOCK (SQLITE_IOERR | (14<<8)) -#define SQLITE_IOERR_LOCK (SQLITE_IOERR | (15<<8)) -#define SQLITE_IOERR_CLOSE (SQLITE_IOERR | (16<<8)) -#define SQLITE_IOERR_DIR_CLOSE (SQLITE_IOERR | (17<<8)) -#define SQLITE_IOERR_SHMOPEN (SQLITE_IOERR | (18<<8)) -#define SQLITE_IOERR_SHMSIZE (SQLITE_IOERR | (19<<8)) -#define SQLITE_IOERR_SHMLOCK (SQLITE_IOERR | (20<<8)) -#define SQLITE_IOERR_SHMMAP (SQLITE_IOERR | (21<<8)) -#define SQLITE_IOERR_SEEK (SQLITE_IOERR | (22<<8)) -#define SQLITE_LOCKED_SHAREDCACHE (SQLITE_LOCKED | (1<<8)) -#define SQLITE_BUSY_RECOVERY (SQLITE_BUSY | (1<<8)) -#define SQLITE_CANTOPEN_NOTEMPDIR (SQLITE_CANTOPEN | (1<<8)) -#define SQLITE_CANTOPEN_ISDIR (SQLITE_CANTOPEN | (2<<8)) -#define SQLITE_CORRUPT_VTAB (SQLITE_CORRUPT | (1<<8)) -#define SQLITE_READONLY_RECOVERY (SQLITE_READONLY | (1<<8)) -#define SQLITE_READONLY_CANTLOCK (SQLITE_READONLY | (2<<8)) -#define SQLITE_ABORT_ROLLBACK (SQLITE_ABORT | (2<<8)) - -/* -** CAPI3REF: Flags For File Open Operations -** -** These bit values are intended for use in the -** 3rd parameter to the [sqlite3_open_v2()] interface and -** in the 4th parameter to the [sqlite3_vfs.xOpen] method. -*/ -#define SQLITE_OPEN_READONLY 0x00000001 /* Ok for sqlite3_open_v2() */ -#define SQLITE_OPEN_READWRITE 0x00000002 /* Ok for sqlite3_open_v2() */ -#define SQLITE_OPEN_CREATE 0x00000004 /* Ok for sqlite3_open_v2() */ -#define SQLITE_OPEN_DELETEONCLOSE 0x00000008 /* VFS only */ -#define SQLITE_OPEN_EXCLUSIVE 0x00000010 /* VFS only */ -#define SQLITE_OPEN_AUTOPROXY 0x00000020 /* VFS only */ -#define SQLITE_OPEN_URI 0x00000040 /* Ok for sqlite3_open_v2() */ -#define SQLITE_OPEN_MEMORY 0x00000080 /* Ok for sqlite3_open_v2() */ -#define SQLITE_OPEN_MAIN_DB 0x00000100 /* VFS only */ -#define SQLITE_OPEN_TEMP_DB 0x00000200 /* VFS only */ -#define SQLITE_OPEN_TRANSIENT_DB 0x00000400 /* VFS only */ -#define SQLITE_OPEN_MAIN_JOURNAL 0x00000800 /* VFS only */ -#define SQLITE_OPEN_TEMP_JOURNAL 0x00001000 /* VFS only */ -#define SQLITE_OPEN_SUBJOURNAL 0x00002000 /* VFS only */ -#define SQLITE_OPEN_MASTER_JOURNAL 0x00004000 /* VFS only */ -#define SQLITE_OPEN_NOMUTEX 0x00008000 /* Ok for sqlite3_open_v2() */ -#define SQLITE_OPEN_FULLMUTEX 0x00010000 /* Ok for sqlite3_open_v2() */ -#define SQLITE_OPEN_SHAREDCACHE 0x00020000 /* Ok for sqlite3_open_v2() */ -#define SQLITE_OPEN_PRIVATECACHE 0x00040000 /* Ok for sqlite3_open_v2() */ -#define SQLITE_OPEN_WAL 0x00080000 /* VFS only */ - -/* Reserved: 0x00F00000 */ - -/* -** CAPI3REF: Device Characteristics -** -** The xDeviceCharacteristics method of the [sqlite3_io_methods] -** object returns an integer which is a vector of the these -** bit values expressing I/O characteristics of the mass storage -** device that holds the file that the [sqlite3_io_methods] -** refers to. -** -** The SQLITE_IOCAP_ATOMIC property means that all writes of -** any size are atomic. The SQLITE_IOCAP_ATOMICnnn values -** mean that writes of blocks that are nnn bytes in size and -** are aligned to an address which is an integer multiple of -** nnn are atomic. The SQLITE_IOCAP_SAFE_APPEND value means -** that when data is appended to a file, the data is appended -** first then the size of the file is extended, never the other -** way around. The SQLITE_IOCAP_SEQUENTIAL property means that -** information is written to disk in the same order as calls -** to xWrite(). The SQLITE_IOCAP_POWERSAFE_OVERWRITE property means that -** after reboot following a crash or power loss, the only bytes in a -** file that were written at the application level might have changed -** and that adjacent bytes, even bytes within the same sector are -** guaranteed to be unchanged. -*/ -#define SQLITE_IOCAP_ATOMIC 0x00000001 -#define SQLITE_IOCAP_ATOMIC512 0x00000002 -#define SQLITE_IOCAP_ATOMIC1K 0x00000004 -#define SQLITE_IOCAP_ATOMIC2K 0x00000008 -#define SQLITE_IOCAP_ATOMIC4K 0x00000010 -#define SQLITE_IOCAP_ATOMIC8K 0x00000020 -#define SQLITE_IOCAP_ATOMIC16K 0x00000040 -#define SQLITE_IOCAP_ATOMIC32K 0x00000080 -#define SQLITE_IOCAP_ATOMIC64K 0x00000100 -#define SQLITE_IOCAP_SAFE_APPEND 0x00000200 -#define SQLITE_IOCAP_SEQUENTIAL 0x00000400 -#define SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN 0x00000800 -#define SQLITE_IOCAP_POWERSAFE_OVERWRITE 0x00001000 - -/* -** CAPI3REF: File Locking Levels -** -** SQLite uses one of these integer values as the second -** argument to calls it makes to the xLock() and xUnlock() methods -** of an [sqlite3_io_methods] object. -*/ -#define SQLITE_LOCK_NONE 0 -#define SQLITE_LOCK_SHARED 1 -#define SQLITE_LOCK_RESERVED 2 -#define SQLITE_LOCK_PENDING 3 -#define SQLITE_LOCK_EXCLUSIVE 4 - -/* -** CAPI3REF: Synchronization Type Flags -** -** When SQLite invokes the xSync() method of an -** [sqlite3_io_methods] object it uses a combination of -** these integer values as the second argument. -** -** When the SQLITE_SYNC_DATAONLY flag is used, it means that the -** sync operation only needs to flush data to mass storage. Inode -** information need not be flushed. If the lower four bits of the flag -** equal SQLITE_SYNC_NORMAL, that means to use normal fsync() semantics. -** If the lower four bits equal SQLITE_SYNC_FULL, that means -** to use Mac OS X style fullsync instead of fsync(). -** -** Do not confuse the SQLITE_SYNC_NORMAL and SQLITE_SYNC_FULL flags -** with the [PRAGMA synchronous]=NORMAL and [PRAGMA synchronous]=FULL -** settings. The [synchronous pragma] determines when calls to the -** xSync VFS method occur and applies uniformly across all platforms. -** The SQLITE_SYNC_NORMAL and SQLITE_SYNC_FULL flags determine how -** energetic or rigorous or forceful the sync operations are and -** only make a difference on Mac OSX for the default SQLite code. -** (Third-party VFS implementations might also make the distinction -** between SQLITE_SYNC_NORMAL and SQLITE_SYNC_FULL, but among the -** operating systems natively supported by SQLite, only Mac OSX -** cares about the difference.) -*/ -#define SQLITE_SYNC_NORMAL 0x00002 -#define SQLITE_SYNC_FULL 0x00003 -#define SQLITE_SYNC_DATAONLY 0x00010 - -/* -** CAPI3REF: OS Interface Open File Handle -** -** An [sqlite3_file] object represents an open file in the -** [sqlite3_vfs | OS interface layer]. Individual OS interface -** implementations will -** want to subclass this object by appending additional fields -** for their own use. The pMethods entry is a pointer to an -** [sqlite3_io_methods] object that defines methods for performing -** I/O operations on the open file. -*/ -typedef struct sqlite3_file sqlite3_file; -struct sqlite3_file { - const struct sqlite3_io_methods *pMethods; /* Methods for an open file */ -}; - -/* -** CAPI3REF: OS Interface File Virtual Methods Object -** -** Every file opened by the [sqlite3_vfs.xOpen] method populates an -** [sqlite3_file] object (or, more commonly, a subclass of the -** [sqlite3_file] object) with a pointer to an instance of this object. -** This object defines the methods used to perform various operations -** against the open file represented by the [sqlite3_file] object. -** -** If the [sqlite3_vfs.xOpen] method sets the sqlite3_file.pMethods element -** to a non-NULL pointer, then the sqlite3_io_methods.xClose method -** may be invoked even if the [sqlite3_vfs.xOpen] reported that it failed. The -** only way to prevent a call to xClose following a failed [sqlite3_vfs.xOpen] -** is for the [sqlite3_vfs.xOpen] to set the sqlite3_file.pMethods element -** to NULL. -** -** The flags argument to xSync may be one of [SQLITE_SYNC_NORMAL] or -** [SQLITE_SYNC_FULL]. The first choice is the normal fsync(). -** The second choice is a Mac OS X style fullsync. The [SQLITE_SYNC_DATAONLY] -** flag may be ORed in to indicate that only the data of the file -** and not its inode needs to be synced. -** -** The integer values to xLock() and xUnlock() are one of -**
    -**
  • [SQLITE_LOCK_NONE], -**
  • [SQLITE_LOCK_SHARED], -**
  • [SQLITE_LOCK_RESERVED], -**
  • [SQLITE_LOCK_PENDING], or -**
  • [SQLITE_LOCK_EXCLUSIVE]. -**
-** xLock() increases the lock. xUnlock() decreases the lock. -** The xCheckReservedLock() method checks whether any database connection, -** either in this process or in some other process, is holding a RESERVED, -** PENDING, or EXCLUSIVE lock on the file. It returns true -** if such a lock exists and false otherwise. -** -** The xFileControl() method is a generic interface that allows custom -** VFS implementations to directly control an open file using the -** [sqlite3_file_control()] interface. The second "op" argument is an -** integer opcode. The third argument is a generic pointer intended to -** point to a structure that may contain arguments or space in which to -** write return values. Potential uses for xFileControl() might be -** functions to enable blocking locks with timeouts, to change the -** locking strategy (for example to use dot-file locks), to inquire -** about the status of a lock, or to break stale locks. The SQLite -** core reserves all opcodes less than 100 for its own use. -** A [SQLITE_FCNTL_LOCKSTATE | list of opcodes] less than 100 is available. -** Applications that define a custom xFileControl method should use opcodes -** greater than 100 to avoid conflicts. VFS implementations should -** return [SQLITE_NOTFOUND] for file control opcodes that they do not -** recognize. -** -** The xSectorSize() method returns the sector size of the -** device that underlies the file. The sector size is the -** minimum write that can be performed without disturbing -** other bytes in the file. The xDeviceCharacteristics() -** method returns a bit vector describing behaviors of the -** underlying device: -** -**
    -**
  • [SQLITE_IOCAP_ATOMIC] -**
  • [SQLITE_IOCAP_ATOMIC512] -**
  • [SQLITE_IOCAP_ATOMIC1K] -**
  • [SQLITE_IOCAP_ATOMIC2K] -**
  • [SQLITE_IOCAP_ATOMIC4K] -**
  • [SQLITE_IOCAP_ATOMIC8K] -**
  • [SQLITE_IOCAP_ATOMIC16K] -**
  • [SQLITE_IOCAP_ATOMIC32K] -**
  • [SQLITE_IOCAP_ATOMIC64K] -**
  • [SQLITE_IOCAP_SAFE_APPEND] -**
  • [SQLITE_IOCAP_SEQUENTIAL] -**
-** -** The SQLITE_IOCAP_ATOMIC property means that all writes of -** any size are atomic. The SQLITE_IOCAP_ATOMICnnn values -** mean that writes of blocks that are nnn bytes in size and -** are aligned to an address which is an integer multiple of -** nnn are atomic. The SQLITE_IOCAP_SAFE_APPEND value means -** that when data is appended to a file, the data is appended -** first then the size of the file is extended, never the other -** way around. The SQLITE_IOCAP_SEQUENTIAL property means that -** information is written to disk in the same order as calls -** to xWrite(). -** -** If xRead() returns SQLITE_IOERR_SHORT_READ it must also fill -** in the unread portions of the buffer with zeros. A VFS that -** fails to zero-fill short reads might seem to work. However, -** failure to zero-fill short reads will eventually lead to -** database corruption. -*/ -typedef struct sqlite3_io_methods sqlite3_io_methods; -struct sqlite3_io_methods { - int iVersion; - int (*xClose)(sqlite3_file*); - int (*xRead)(sqlite3_file*, void*, int iAmt, sqlite3_int64 iOfst); - int (*xWrite)(sqlite3_file*, const void*, int iAmt, sqlite3_int64 iOfst); - int (*xTruncate)(sqlite3_file*, sqlite3_int64 size); - int (*xSync)(sqlite3_file*, int flags); - int (*xFileSize)(sqlite3_file*, sqlite3_int64 *pSize); - int (*xLock)(sqlite3_file*, int); - int (*xUnlock)(sqlite3_file*, int); - int (*xCheckReservedLock)(sqlite3_file*, int *pResOut); - int (*xFileControl)(sqlite3_file*, int op, void *pArg); - int (*xSectorSize)(sqlite3_file*); - int (*xDeviceCharacteristics)(sqlite3_file*); - /* Methods above are valid for version 1 */ - int (*xShmMap)(sqlite3_file*, int iPg, int pgsz, int, void volatile**); - int (*xShmLock)(sqlite3_file*, int offset, int n, int flags); - void (*xShmBarrier)(sqlite3_file*); - int (*xShmUnmap)(sqlite3_file*, int deleteFlag); - /* Methods above are valid for version 2 */ - /* Additional methods may be added in future releases */ -}; - -/* -** CAPI3REF: Standard File Control Opcodes -** -** These integer constants are opcodes for the xFileControl method -** of the [sqlite3_io_methods] object and for the [sqlite3_file_control()] -** interface. -** -** The [SQLITE_FCNTL_LOCKSTATE] opcode is used for debugging. This -** opcode causes the xFileControl method to write the current state of -** the lock (one of [SQLITE_LOCK_NONE], [SQLITE_LOCK_SHARED], -** [SQLITE_LOCK_RESERVED], [SQLITE_LOCK_PENDING], or [SQLITE_LOCK_EXCLUSIVE]) -** into an integer that the pArg argument points to. This capability -** is used during testing and only needs to be supported when SQLITE_TEST -** is defined. -**
    -**
  • [[SQLITE_FCNTL_SIZE_HINT]] -** The [SQLITE_FCNTL_SIZE_HINT] opcode is used by SQLite to give the VFS -** layer a hint of how large the database file will grow to be during the -** current transaction. This hint is not guaranteed to be accurate but it -** is often close. The underlying VFS might choose to preallocate database -** file space based on this hint in order to help writes to the database -** file run faster. -** -**
  • [[SQLITE_FCNTL_CHUNK_SIZE]] -** The [SQLITE_FCNTL_CHUNK_SIZE] opcode is used to request that the VFS -** extends and truncates the database file in chunks of a size specified -** by the user. The fourth argument to [sqlite3_file_control()] should -** point to an integer (type int) containing the new chunk-size to use -** for the nominated database. Allocating database file space in large -** chunks (say 1MB at a time), may reduce file-system fragmentation and -** improve performance on some systems. -** -**
  • [[SQLITE_FCNTL_FILE_POINTER]] -** The [SQLITE_FCNTL_FILE_POINTER] opcode is used to obtain a pointer -** to the [sqlite3_file] object associated with a particular database -** connection. See the [sqlite3_file_control()] documentation for -** additional information. -** -**
  • [[SQLITE_FCNTL_SYNC_OMITTED]] -** ^(The [SQLITE_FCNTL_SYNC_OMITTED] opcode is generated internally by -** SQLite and sent to all VFSes in place of a call to the xSync method -** when the database connection has [PRAGMA synchronous] set to OFF.)^ -** Some specialized VFSes need this signal in order to operate correctly -** when [PRAGMA synchronous | PRAGMA synchronous=OFF] is set, but most -** VFSes do not need this signal and should silently ignore this opcode. -** Applications should not call [sqlite3_file_control()] with this -** opcode as doing so may disrupt the operation of the specialized VFSes -** that do require it. -** -**
  • [[SQLITE_FCNTL_WIN32_AV_RETRY]] -** ^The [SQLITE_FCNTL_WIN32_AV_RETRY] opcode is used to configure automatic -** retry counts and intervals for certain disk I/O operations for the -** windows [VFS] in order to provide robustness in the presence of -** anti-virus programs. By default, the windows VFS will retry file read, -** file write, and file delete operations up to 10 times, with a delay -** of 25 milliseconds before the first retry and with the delay increasing -** by an additional 25 milliseconds with each subsequent retry. This -** opcode allows these two values (10 retries and 25 milliseconds of delay) -** to be adjusted. The values are changed for all database connections -** within the same process. The argument is a pointer to an array of two -** integers where the first integer i the new retry count and the second -** integer is the delay. If either integer is negative, then the setting -** is not changed but instead the prior value of that setting is written -** into the array entry, allowing the current retry settings to be -** interrogated. The zDbName parameter is ignored. -** -**
  • [[SQLITE_FCNTL_PERSIST_WAL]] -** ^The [SQLITE_FCNTL_PERSIST_WAL] opcode is used to set or query the -** persistent [WAL | Write Ahead Log] setting. By default, the auxiliary -** write ahead log and shared memory files used for transaction control -** are automatically deleted when the latest connection to the database -** closes. Setting persistent WAL mode causes those files to persist after -** close. Persisting the files is useful when other processes that do not -** have write permission on the directory containing the database file want -** to read the database file, as the WAL and shared memory files must exist -** in order for the database to be readable. The fourth parameter to -** [sqlite3_file_control()] for this opcode should be a pointer to an integer. -** That integer is 0 to disable persistent WAL mode or 1 to enable persistent -** WAL mode. If the integer is -1, then it is overwritten with the current -** WAL persistence setting. -** -**
  • [[SQLITE_FCNTL_POWERSAFE_OVERWRITE]] -** ^The [SQLITE_FCNTL_POWERSAFE_OVERWRITE] opcode is used to set or query the -** persistent "powersafe-overwrite" or "PSOW" setting. The PSOW setting -** determines the [SQLITE_IOCAP_POWERSAFE_OVERWRITE] bit of the -** xDeviceCharacteristics methods. The fourth parameter to -** [sqlite3_file_control()] for this opcode should be a pointer to an integer. -** That integer is 0 to disable zero-damage mode or 1 to enable zero-damage -** mode. If the integer is -1, then it is overwritten with the current -** zero-damage mode setting. -** -**
  • [[SQLITE_FCNTL_OVERWRITE]] -** ^The [SQLITE_FCNTL_OVERWRITE] opcode is invoked by SQLite after opening -** a write transaction to indicate that, unless it is rolled back for some -** reason, the entire database file will be overwritten by the current -** transaction. This is used by VACUUM operations. -** -**
  • [[SQLITE_FCNTL_VFSNAME]] -** ^The [SQLITE_FCNTL_VFSNAME] opcode can be used to obtain the names of -** all [VFSes] in the VFS stack. The names are of all VFS shims and the -** final bottom-level VFS are written into memory obtained from -** [sqlite3_malloc()] and the result is stored in the char* variable -** that the fourth parameter of [sqlite3_file_control()] points to. -** The caller is responsible for freeing the memory when done. As with -** all file-control actions, there is no guarantee that this will actually -** do anything. Callers should initialize the char* variable to a NULL -** pointer in case this file-control is not implemented. This file-control -** is intended for diagnostic use only. -** -**
  • [[SQLITE_FCNTL_PRAGMA]] -** ^Whenever a [PRAGMA] statement is parsed, an [SQLITE_FCNTL_PRAGMA] -** file control is sent to the open [sqlite3_file] object corresponding -** to the database file to which the pragma statement refers. ^The argument -** to the [SQLITE_FCNTL_PRAGMA] file control is an array of -** pointers to strings (char**) in which the second element of the array -** is the name of the pragma and the third element is the argument to the -** pragma or NULL if the pragma has no argument. ^The handler for an -** [SQLITE_FCNTL_PRAGMA] file control can optionally make the first element -** of the char** argument point to a string obtained from [sqlite3_mprintf()] -** or the equivalent and that string will become the result of the pragma or -** the error message if the pragma fails. ^If the -** [SQLITE_FCNTL_PRAGMA] file control returns [SQLITE_NOTFOUND], then normal -** [PRAGMA] processing continues. ^If the [SQLITE_FCNTL_PRAGMA] -** file control returns [SQLITE_OK], then the parser assumes that the -** VFS has handled the PRAGMA itself and the parser generates a no-op -** prepared statement. ^If the [SQLITE_FCNTL_PRAGMA] file control returns -** any result code other than [SQLITE_OK] or [SQLITE_NOTFOUND], that means -** that the VFS encountered an error while handling the [PRAGMA] and the -** compilation of the PRAGMA fails with an error. ^The [SQLITE_FCNTL_PRAGMA] -** file control occurs at the beginning of pragma statement analysis and so -** it is able to override built-in [PRAGMA] statements. -**
-*/ -#define SQLITE_FCNTL_LOCKSTATE 1 -#define SQLITE_GET_LOCKPROXYFILE 2 -#define SQLITE_SET_LOCKPROXYFILE 3 -#define SQLITE_LAST_ERRNO 4 -#define SQLITE_FCNTL_SIZE_HINT 5 -#define SQLITE_FCNTL_CHUNK_SIZE 6 -#define SQLITE_FCNTL_FILE_POINTER 7 -#define SQLITE_FCNTL_SYNC_OMITTED 8 -#define SQLITE_FCNTL_WIN32_AV_RETRY 9 -#define SQLITE_FCNTL_PERSIST_WAL 10 -#define SQLITE_FCNTL_OVERWRITE 11 -#define SQLITE_FCNTL_VFSNAME 12 -#define SQLITE_FCNTL_POWERSAFE_OVERWRITE 13 -#define SQLITE_FCNTL_PRAGMA 14 - -/* -** CAPI3REF: Mutex Handle -** -** The mutex module within SQLite defines [sqlite3_mutex] to be an -** abstract type for a mutex object. The SQLite core never looks -** at the internal representation of an [sqlite3_mutex]. It only -** deals with pointers to the [sqlite3_mutex] object. -** -** Mutexes are created using [sqlite3_mutex_alloc()]. -*/ -typedef struct sqlite3_mutex sqlite3_mutex; - -/* -** CAPI3REF: OS Interface Object -** -** An instance of the sqlite3_vfs object defines the interface between -** the SQLite core and the underlying operating system. The "vfs" -** in the name of the object stands for "virtual file system". See -** the [VFS | VFS documentation] for further information. -** -** The value of the iVersion field is initially 1 but may be larger in -** future versions of SQLite. Additional fields may be appended to this -** object when the iVersion value is increased. Note that the structure -** of the sqlite3_vfs object changes in the transaction between -** SQLite version 3.5.9 and 3.6.0 and yet the iVersion field was not -** modified. -** -** The szOsFile field is the size of the subclassed [sqlite3_file] -** structure used by this VFS. mxPathname is the maximum length of -** a pathname in this VFS. -** -** Registered sqlite3_vfs objects are kept on a linked list formed by -** the pNext pointer. The [sqlite3_vfs_register()] -** and [sqlite3_vfs_unregister()] interfaces manage this list -** in a thread-safe way. The [sqlite3_vfs_find()] interface -** searches the list. Neither the application code nor the VFS -** implementation should use the pNext pointer. -** -** The pNext field is the only field in the sqlite3_vfs -** structure that SQLite will ever modify. SQLite will only access -** or modify this field while holding a particular static mutex. -** The application should never modify anything within the sqlite3_vfs -** object once the object has been registered. -** -** The zName field holds the name of the VFS module. The name must -** be unique across all VFS modules. -** -** [[sqlite3_vfs.xOpen]] -** ^SQLite guarantees that the zFilename parameter to xOpen -** is either a NULL pointer or string obtained -** from xFullPathname() with an optional suffix added. -** ^If a suffix is added to the zFilename parameter, it will -** consist of a single "-" character followed by no more than -** 11 alphanumeric and/or "-" characters. -** ^SQLite further guarantees that -** the string will be valid and unchanged until xClose() is -** called. Because of the previous sentence, -** the [sqlite3_file] can safely store a pointer to the -** filename if it needs to remember the filename for some reason. -** If the zFilename parameter to xOpen is a NULL pointer then xOpen -** must invent its own temporary name for the file. ^Whenever the -** xFilename parameter is NULL it will also be the case that the -** flags parameter will include [SQLITE_OPEN_DELETEONCLOSE]. -** -** The flags argument to xOpen() includes all bits set in -** the flags argument to [sqlite3_open_v2()]. Or if [sqlite3_open()] -** or [sqlite3_open16()] is used, then flags includes at least -** [SQLITE_OPEN_READWRITE] | [SQLITE_OPEN_CREATE]. -** If xOpen() opens a file read-only then it sets *pOutFlags to -** include [SQLITE_OPEN_READONLY]. Other bits in *pOutFlags may be set. -** -** ^(SQLite will also add one of the following flags to the xOpen() -** call, depending on the object being opened: -** -**
    -**
  • [SQLITE_OPEN_MAIN_DB] -**
  • [SQLITE_OPEN_MAIN_JOURNAL] -**
  • [SQLITE_OPEN_TEMP_DB] -**
  • [SQLITE_OPEN_TEMP_JOURNAL] -**
  • [SQLITE_OPEN_TRANSIENT_DB] -**
  • [SQLITE_OPEN_SUBJOURNAL] -**
  • [SQLITE_OPEN_MASTER_JOURNAL] -**
  • [SQLITE_OPEN_WAL] -**
)^ -** -** The file I/O implementation can use the object type flags to -** change the way it deals with files. For example, an application -** that does not care about crash recovery or rollback might make -** the open of a journal file a no-op. Writes to this journal would -** also be no-ops, and any attempt to read the journal would return -** SQLITE_IOERR. Or the implementation might recognize that a database -** file will be doing page-aligned sector reads and writes in a random -** order and set up its I/O subsystem accordingly. -** -** SQLite might also add one of the following flags to the xOpen method: -** -**
    -**
  • [SQLITE_OPEN_DELETEONCLOSE] -**
  • [SQLITE_OPEN_EXCLUSIVE] -**
-** -** The [SQLITE_OPEN_DELETEONCLOSE] flag means the file should be -** deleted when it is closed. ^The [SQLITE_OPEN_DELETEONCLOSE] -** will be set for TEMP databases and their journals, transient -** databases, and subjournals. -** -** ^The [SQLITE_OPEN_EXCLUSIVE] flag is always used in conjunction -** with the [SQLITE_OPEN_CREATE] flag, which are both directly -** analogous to the O_EXCL and O_CREAT flags of the POSIX open() -** API. The SQLITE_OPEN_EXCLUSIVE flag, when paired with the -** SQLITE_OPEN_CREATE, is used to indicate that file should always -** be created, and that it is an error if it already exists. -** It is not used to indicate the file should be opened -** for exclusive access. -** -** ^At least szOsFile bytes of memory are allocated by SQLite -** to hold the [sqlite3_file] structure passed as the third -** argument to xOpen. The xOpen method does not have to -** allocate the structure; it should just fill it in. Note that -** the xOpen method must set the sqlite3_file.pMethods to either -** a valid [sqlite3_io_methods] object or to NULL. xOpen must do -** this even if the open fails. SQLite expects that the sqlite3_file.pMethods -** element will be valid after xOpen returns regardless of the success -** or failure of the xOpen call. -** -** [[sqlite3_vfs.xAccess]] -** ^The flags argument to xAccess() may be [SQLITE_ACCESS_EXISTS] -** to test for the existence of a file, or [SQLITE_ACCESS_READWRITE] to -** test whether a file is readable and writable, or [SQLITE_ACCESS_READ] -** to test whether a file is at least readable. The file can be a -** directory. -** -** ^SQLite will always allocate at least mxPathname+1 bytes for the -** output buffer xFullPathname. The exact size of the output buffer -** is also passed as a parameter to both methods. If the output buffer -** is not large enough, [SQLITE_CANTOPEN] should be returned. Since this is -** handled as a fatal error by SQLite, vfs implementations should endeavor -** to prevent this by setting mxPathname to a sufficiently large value. -** -** The xRandomness(), xSleep(), xCurrentTime(), and xCurrentTimeInt64() -** interfaces are not strictly a part of the filesystem, but they are -** included in the VFS structure for completeness. -** The xRandomness() function attempts to return nBytes bytes -** of good-quality randomness into zOut. The return value is -** the actual number of bytes of randomness obtained. -** The xSleep() method causes the calling thread to sleep for at -** least the number of microseconds given. ^The xCurrentTime() -** method returns a Julian Day Number for the current date and time as -** a floating point value. -** ^The xCurrentTimeInt64() method returns, as an integer, the Julian -** Day Number multiplied by 86400000 (the number of milliseconds in -** a 24-hour day). -** ^SQLite will use the xCurrentTimeInt64() method to get the current -** date and time if that method is available (if iVersion is 2 or -** greater and the function pointer is not NULL) and will fall back -** to xCurrentTime() if xCurrentTimeInt64() is unavailable. -** -** ^The xSetSystemCall(), xGetSystemCall(), and xNestSystemCall() interfaces -** are not used by the SQLite core. These optional interfaces are provided -** by some VFSes to facilitate testing of the VFS code. By overriding -** system calls with functions under its control, a test program can -** simulate faults and error conditions that would otherwise be difficult -** or impossible to induce. The set of system calls that can be overridden -** varies from one VFS to another, and from one version of the same VFS to the -** next. Applications that use these interfaces must be prepared for any -** or all of these interfaces to be NULL or for their behavior to change -** from one release to the next. Applications must not attempt to access -** any of these methods if the iVersion of the VFS is less than 3. -*/ -typedef struct sqlite3_vfs sqlite3_vfs; -typedef void (*sqlite3_syscall_ptr)(void); -struct sqlite3_vfs { - int iVersion; /* Structure version number (currently 3) */ - int szOsFile; /* Size of subclassed sqlite3_file */ - int mxPathname; /* Maximum file pathname length */ - sqlite3_vfs *pNext; /* Next registered VFS */ - const char *zName; /* Name of this virtual file system */ - void *pAppData; /* Pointer to application-specific data */ - int (*xOpen)(sqlite3_vfs*, const char *zName, sqlite3_file*, - int flags, int *pOutFlags); - int (*xDelete)(sqlite3_vfs*, const char *zName, int syncDir); - int (*xAccess)(sqlite3_vfs*, const char *zName, int flags, int *pResOut); - int (*xFullPathname)(sqlite3_vfs*, const char *zName, int nOut, char *zOut); - void *(*xDlOpen)(sqlite3_vfs*, const char *zFilename); - void (*xDlError)(sqlite3_vfs*, int nByte, char *zErrMsg); - void (*(*xDlSym)(sqlite3_vfs*,void*, const char *zSymbol))(void); - void (*xDlClose)(sqlite3_vfs*, void*); - int (*xRandomness)(sqlite3_vfs*, int nByte, char *zOut); - int (*xSleep)(sqlite3_vfs*, int microseconds); - int (*xCurrentTime)(sqlite3_vfs*, double*); - int (*xGetLastError)(sqlite3_vfs*, int, char *); - /* - ** The methods above are in version 1 of the sqlite_vfs object - ** definition. Those that follow are added in version 2 or later - */ - int (*xCurrentTimeInt64)(sqlite3_vfs*, sqlite3_int64*); - /* - ** The methods above are in versions 1 and 2 of the sqlite_vfs object. - ** Those below are for version 3 and greater. - */ - int (*xSetSystemCall)(sqlite3_vfs*, const char *zName, sqlite3_syscall_ptr); - sqlite3_syscall_ptr (*xGetSystemCall)(sqlite3_vfs*, const char *zName); - const char *(*xNextSystemCall)(sqlite3_vfs*, const char *zName); - /* - ** The methods above are in versions 1 through 3 of the sqlite_vfs object. - ** New fields may be appended in figure versions. The iVersion - ** value will increment whenever this happens. - */ -}; - -/* -** CAPI3REF: Flags for the xAccess VFS method -** -** These integer constants can be used as the third parameter to -** the xAccess method of an [sqlite3_vfs] object. They determine -** what kind of permissions the xAccess method is looking for. -** With SQLITE_ACCESS_EXISTS, the xAccess method -** simply checks whether the file exists. -** With SQLITE_ACCESS_READWRITE, the xAccess method -** checks whether the named directory is both readable and writable -** (in other words, if files can be added, removed, and renamed within -** the directory). -** The SQLITE_ACCESS_READWRITE constant is currently used only by the -** [temp_store_directory pragma], though this could change in a future -** release of SQLite. -** With SQLITE_ACCESS_READ, the xAccess method -** checks whether the file is readable. The SQLITE_ACCESS_READ constant is -** currently unused, though it might be used in a future release of -** SQLite. -*/ -#define SQLITE_ACCESS_EXISTS 0 -#define SQLITE_ACCESS_READWRITE 1 /* Used by PRAGMA temp_store_directory */ -#define SQLITE_ACCESS_READ 2 /* Unused */ - -/* -** CAPI3REF: Flags for the xShmLock VFS method -** -** These integer constants define the various locking operations -** allowed by the xShmLock method of [sqlite3_io_methods]. The -** following are the only legal combinations of flags to the -** xShmLock method: -** -**
    -**
  • SQLITE_SHM_LOCK | SQLITE_SHM_SHARED -**
  • SQLITE_SHM_LOCK | SQLITE_SHM_EXCLUSIVE -**
  • SQLITE_SHM_UNLOCK | SQLITE_SHM_SHARED -**
  • SQLITE_SHM_UNLOCK | SQLITE_SHM_EXCLUSIVE -**
-** -** When unlocking, the same SHARED or EXCLUSIVE flag must be supplied as -** was given no the corresponding lock. -** -** The xShmLock method can transition between unlocked and SHARED or -** between unlocked and EXCLUSIVE. It cannot transition between SHARED -** and EXCLUSIVE. -*/ -#define SQLITE_SHM_UNLOCK 1 -#define SQLITE_SHM_LOCK 2 -#define SQLITE_SHM_SHARED 4 -#define SQLITE_SHM_EXCLUSIVE 8 - -/* -** CAPI3REF: Maximum xShmLock index -** -** The xShmLock method on [sqlite3_io_methods] may use values -** between 0 and this upper bound as its "offset" argument. -** The SQLite core will never attempt to acquire or release a -** lock outside of this range -*/ -#define SQLITE_SHM_NLOCK 8 - - -/* -** CAPI3REF: Initialize The SQLite Library -** -** ^The sqlite3_initialize() routine initializes the -** SQLite library. ^The sqlite3_shutdown() routine -** deallocates any resources that were allocated by sqlite3_initialize(). -** These routines are designed to aid in process initialization and -** shutdown on embedded systems. Workstation applications using -** SQLite normally do not need to invoke either of these routines. -** -** A call to sqlite3_initialize() is an "effective" call if it is -** the first time sqlite3_initialize() is invoked during the lifetime of -** the process, or if it is the first time sqlite3_initialize() is invoked -** following a call to sqlite3_shutdown(). ^(Only an effective call -** of sqlite3_initialize() does any initialization. All other calls -** are harmless no-ops.)^ -** -** A call to sqlite3_shutdown() is an "effective" call if it is the first -** call to sqlite3_shutdown() since the last sqlite3_initialize(). ^(Only -** an effective call to sqlite3_shutdown() does any deinitialization. -** All other valid calls to sqlite3_shutdown() are harmless no-ops.)^ -** -** The sqlite3_initialize() interface is threadsafe, but sqlite3_shutdown() -** is not. The sqlite3_shutdown() interface must only be called from a -** single thread. All open [database connections] must be closed and all -** other SQLite resources must be deallocated prior to invoking -** sqlite3_shutdown(). -** -** Among other things, ^sqlite3_initialize() will invoke -** sqlite3_os_init(). Similarly, ^sqlite3_shutdown() -** will invoke sqlite3_os_end(). -** -** ^The sqlite3_initialize() routine returns [SQLITE_OK] on success. -** ^If for some reason, sqlite3_initialize() is unable to initialize -** the library (perhaps it is unable to allocate a needed resource such -** as a mutex) it returns an [error code] other than [SQLITE_OK]. -** -** ^The sqlite3_initialize() routine is called internally by many other -** SQLite interfaces so that an application usually does not need to -** invoke sqlite3_initialize() directly. For example, [sqlite3_open()] -** calls sqlite3_initialize() so the SQLite library will be automatically -** initialized when [sqlite3_open()] is called if it has not be initialized -** already. ^However, if SQLite is compiled with the [SQLITE_OMIT_AUTOINIT] -** compile-time option, then the automatic calls to sqlite3_initialize() -** are omitted and the application must call sqlite3_initialize() directly -** prior to using any other SQLite interface. For maximum portability, -** it is recommended that applications always invoke sqlite3_initialize() -** directly prior to using any other SQLite interface. Future releases -** of SQLite may require this. In other words, the behavior exhibited -** when SQLite is compiled with [SQLITE_OMIT_AUTOINIT] might become the -** default behavior in some future release of SQLite. -** -** The sqlite3_os_init() routine does operating-system specific -** initialization of the SQLite library. The sqlite3_os_end() -** routine undoes the effect of sqlite3_os_init(). Typical tasks -** performed by these routines include allocation or deallocation -** of static resources, initialization of global variables, -** setting up a default [sqlite3_vfs] module, or setting up -** a default configuration using [sqlite3_config()]. -** -** The application should never invoke either sqlite3_os_init() -** or sqlite3_os_end() directly. The application should only invoke -** sqlite3_initialize() and sqlite3_shutdown(). The sqlite3_os_init() -** interface is called automatically by sqlite3_initialize() and -** sqlite3_os_end() is called by sqlite3_shutdown(). Appropriate -** implementations for sqlite3_os_init() and sqlite3_os_end() -** are built into SQLite when it is compiled for Unix, Windows, or OS/2. -** When [custom builds | built for other platforms] -** (using the [SQLITE_OS_OTHER=1] compile-time -** option) the application must supply a suitable implementation for -** sqlite3_os_init() and sqlite3_os_end(). An application-supplied -** implementation of sqlite3_os_init() or sqlite3_os_end() -** must return [SQLITE_OK] on success and some other [error code] upon -** failure. -*/ -SQLITE_API int sqlite3_initialize(void); -SQLITE_API int sqlite3_shutdown(void); -SQLITE_API int sqlite3_os_init(void); -SQLITE_API int sqlite3_os_end(void); - -/* -** CAPI3REF: Configuring The SQLite Library -** -** The sqlite3_config() interface is used to make global configuration -** changes to SQLite in order to tune SQLite to the specific needs of -** the application. The default configuration is recommended for most -** applications and so this routine is usually not necessary. It is -** provided to support rare applications with unusual needs. -** -** The sqlite3_config() interface is not threadsafe. The application -** must insure that no other SQLite interfaces are invoked by other -** threads while sqlite3_config() is running. Furthermore, sqlite3_config() -** may only be invoked prior to library initialization using -** [sqlite3_initialize()] or after shutdown by [sqlite3_shutdown()]. -** ^If sqlite3_config() is called after [sqlite3_initialize()] and before -** [sqlite3_shutdown()] then it will return SQLITE_MISUSE. -** Note, however, that ^sqlite3_config() can be called as part of the -** implementation of an application-defined [sqlite3_os_init()]. -** -** The first argument to sqlite3_config() is an integer -** [configuration option] that determines -** what property of SQLite is to be configured. Subsequent arguments -** vary depending on the [configuration option] -** in the first argument. -** -** ^When a configuration option is set, sqlite3_config() returns [SQLITE_OK]. -** ^If the option is unknown or SQLite is unable to set the option -** then this routine returns a non-zero [error code]. -*/ -SQLITE_API int sqlite3_config(int, ...); - -/* -** CAPI3REF: Configure database connections -** -** The sqlite3_db_config() interface is used to make configuration -** changes to a [database connection]. The interface is similar to -** [sqlite3_config()] except that the changes apply to a single -** [database connection] (specified in the first argument). -** -** The second argument to sqlite3_db_config(D,V,...) is the -** [SQLITE_DBCONFIG_LOOKASIDE | configuration verb] - an integer code -** that indicates what aspect of the [database connection] is being configured. -** Subsequent arguments vary depending on the configuration verb. -** -** ^Calls to sqlite3_db_config() return SQLITE_OK if and only if -** the call is considered successful. -*/ -SQLITE_API int sqlite3_db_config(sqlite3*, int op, ...); - -/* -** CAPI3REF: Memory Allocation Routines -** -** An instance of this object defines the interface between SQLite -** and low-level memory allocation routines. -** -** This object is used in only one place in the SQLite interface. -** A pointer to an instance of this object is the argument to -** [sqlite3_config()] when the configuration option is -** [SQLITE_CONFIG_MALLOC] or [SQLITE_CONFIG_GETMALLOC]. -** By creating an instance of this object -** and passing it to [sqlite3_config]([SQLITE_CONFIG_MALLOC]) -** during configuration, an application can specify an alternative -** memory allocation subsystem for SQLite to use for all of its -** dynamic memory needs. -** -** Note that SQLite comes with several [built-in memory allocators] -** that are perfectly adequate for the overwhelming majority of applications -** and that this object is only useful to a tiny minority of applications -** with specialized memory allocation requirements. This object is -** also used during testing of SQLite in order to specify an alternative -** memory allocator that simulates memory out-of-memory conditions in -** order to verify that SQLite recovers gracefully from such -** conditions. -** -** The xMalloc, xRealloc, and xFree methods must work like the -** malloc(), realloc() and free() functions from the standard C library. -** ^SQLite guarantees that the second argument to -** xRealloc is always a value returned by a prior call to xRoundup. -** -** xSize should return the allocated size of a memory allocation -** previously obtained from xMalloc or xRealloc. The allocated size -** is always at least as big as the requested size but may be larger. -** -** The xRoundup method returns what would be the allocated size of -** a memory allocation given a particular requested size. Most memory -** allocators round up memory allocations at least to the next multiple -** of 8. Some allocators round up to a larger multiple or to a power of 2. -** Every memory allocation request coming in through [sqlite3_malloc()] -** or [sqlite3_realloc()] first calls xRoundup. If xRoundup returns 0, -** that causes the corresponding memory allocation to fail. -** -** The xInit method initializes the memory allocator. (For example, -** it might allocate any require mutexes or initialize internal data -** structures. The xShutdown method is invoked (indirectly) by -** [sqlite3_shutdown()] and should deallocate any resources acquired -** by xInit. The pAppData pointer is used as the only parameter to -** xInit and xShutdown. -** -** SQLite holds the [SQLITE_MUTEX_STATIC_MASTER] mutex when it invokes -** the xInit method, so the xInit method need not be threadsafe. The -** xShutdown method is only called from [sqlite3_shutdown()] so it does -** not need to be threadsafe either. For all other methods, SQLite -** holds the [SQLITE_MUTEX_STATIC_MEM] mutex as long as the -** [SQLITE_CONFIG_MEMSTATUS] configuration option is turned on (which -** it is by default) and so the methods are automatically serialized. -** However, if [SQLITE_CONFIG_MEMSTATUS] is disabled, then the other -** methods must be threadsafe or else make their own arrangements for -** serialization. -** -** SQLite will never invoke xInit() more than once without an intervening -** call to xShutdown(). -*/ -typedef struct sqlite3_mem_methods sqlite3_mem_methods; -struct sqlite3_mem_methods { - void *(*xMalloc)(int); /* Memory allocation function */ - void (*xFree)(void*); /* Free a prior allocation */ - void *(*xRealloc)(void*,int); /* Resize an allocation */ - int (*xSize)(void*); /* Return the size of an allocation */ - int (*xRoundup)(int); /* Round up request size to allocation size */ - int (*xInit)(void*); /* Initialize the memory allocator */ - void (*xShutdown)(void*); /* Deinitialize the memory allocator */ - void *pAppData; /* Argument to xInit() and xShutdown() */ -}; - -/* -** CAPI3REF: Configuration Options -** KEYWORDS: {configuration option} -** -** These constants are the available integer configuration options that -** can be passed as the first argument to the [sqlite3_config()] interface. -** -** New configuration options may be added in future releases of SQLite. -** Existing configuration options might be discontinued. Applications -** should check the return code from [sqlite3_config()] to make sure that -** the call worked. The [sqlite3_config()] interface will return a -** non-zero [error code] if a discontinued or unsupported configuration option -** is invoked. -** -**
-** [[SQLITE_CONFIG_SINGLETHREAD]]
SQLITE_CONFIG_SINGLETHREAD
-**
There are no arguments to this option. ^This option sets the -** [threading mode] to Single-thread. In other words, it disables -** all mutexing and puts SQLite into a mode where it can only be used -** by a single thread. ^If SQLite is compiled with -** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then -** it is not possible to change the [threading mode] from its default -** value of Single-thread and so [sqlite3_config()] will return -** [SQLITE_ERROR] if called with the SQLITE_CONFIG_SINGLETHREAD -** configuration option.
-** -** [[SQLITE_CONFIG_MULTITHREAD]]
SQLITE_CONFIG_MULTITHREAD
-**
There are no arguments to this option. ^This option sets the -** [threading mode] to Multi-thread. In other words, it disables -** mutexing on [database connection] and [prepared statement] objects. -** The application is responsible for serializing access to -** [database connections] and [prepared statements]. But other mutexes -** are enabled so that SQLite will be safe to use in a multi-threaded -** environment as long as no two threads attempt to use the same -** [database connection] at the same time. ^If SQLite is compiled with -** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then -** it is not possible to set the Multi-thread [threading mode] and -** [sqlite3_config()] will return [SQLITE_ERROR] if called with the -** SQLITE_CONFIG_MULTITHREAD configuration option.
-** -** [[SQLITE_CONFIG_SERIALIZED]]
SQLITE_CONFIG_SERIALIZED
-**
There are no arguments to this option. ^This option sets the -** [threading mode] to Serialized. In other words, this option enables -** all mutexes including the recursive -** mutexes on [database connection] and [prepared statement] objects. -** In this mode (which is the default when SQLite is compiled with -** [SQLITE_THREADSAFE=1]) the SQLite library will itself serialize access -** to [database connections] and [prepared statements] so that the -** application is free to use the same [database connection] or the -** same [prepared statement] in different threads at the same time. -** ^If SQLite is compiled with -** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then -** it is not possible to set the Serialized [threading mode] and -** [sqlite3_config()] will return [SQLITE_ERROR] if called with the -** SQLITE_CONFIG_SERIALIZED configuration option.
-** -** [[SQLITE_CONFIG_MALLOC]]
SQLITE_CONFIG_MALLOC
-**
^(This option takes a single argument which is a pointer to an -** instance of the [sqlite3_mem_methods] structure. The argument specifies -** alternative low-level memory allocation routines to be used in place of -** the memory allocation routines built into SQLite.)^ ^SQLite makes -** its own private copy of the content of the [sqlite3_mem_methods] structure -** before the [sqlite3_config()] call returns.
-** -** [[SQLITE_CONFIG_GETMALLOC]]
SQLITE_CONFIG_GETMALLOC
-**
^(This option takes a single argument which is a pointer to an -** instance of the [sqlite3_mem_methods] structure. The [sqlite3_mem_methods] -** structure is filled with the currently defined memory allocation routines.)^ -** This option can be used to overload the default memory allocation -** routines with a wrapper that simulations memory allocation failure or -** tracks memory usage, for example.
-** -** [[SQLITE_CONFIG_MEMSTATUS]]
SQLITE_CONFIG_MEMSTATUS
-**
^This option takes single argument of type int, interpreted as a -** boolean, which enables or disables the collection of memory allocation -** statistics. ^(When memory allocation statistics are disabled, the -** following SQLite interfaces become non-operational: -**
    -**
  • [sqlite3_memory_used()] -**
  • [sqlite3_memory_highwater()] -**
  • [sqlite3_soft_heap_limit64()] -**
  • [sqlite3_status()] -**
)^ -** ^Memory allocation statistics are enabled by default unless SQLite is -** compiled with [SQLITE_DEFAULT_MEMSTATUS]=0 in which case memory -** allocation statistics are disabled by default. -**
-** -** [[SQLITE_CONFIG_SCRATCH]]
SQLITE_CONFIG_SCRATCH
-**
^This option specifies a static memory buffer that SQLite can use for -** scratch memory. There are three arguments: A pointer an 8-byte -** aligned memory buffer from which the scratch allocations will be -** drawn, the size of each scratch allocation (sz), -** and the maximum number of scratch allocations (N). The sz -** argument must be a multiple of 16. -** The first argument must be a pointer to an 8-byte aligned buffer -** of at least sz*N bytes of memory. -** ^SQLite will use no more than two scratch buffers per thread. So -** N should be set to twice the expected maximum number of threads. -** ^SQLite will never require a scratch buffer that is more than 6 -** times the database page size. ^If SQLite needs needs additional -** scratch memory beyond what is provided by this configuration option, then -** [sqlite3_malloc()] will be used to obtain the memory needed.
-** -** [[SQLITE_CONFIG_PAGECACHE]]
SQLITE_CONFIG_PAGECACHE
-**
^This option specifies a static memory buffer that SQLite can use for -** the database page cache with the default page cache implementation. -** This configuration should not be used if an application-define page -** cache implementation is loaded using the SQLITE_CONFIG_PCACHE2 option. -** There are three arguments to this option: A pointer to 8-byte aligned -** memory, the size of each page buffer (sz), and the number of pages (N). -** The sz argument should be the size of the largest database page -** (a power of two between 512 and 32768) plus a little extra for each -** page header. ^The page header size is 20 to 40 bytes depending on -** the host architecture. ^It is harmless, apart from the wasted memory, -** to make sz a little too large. The first -** argument should point to an allocation of at least sz*N bytes of memory. -** ^SQLite will use the memory provided by the first argument to satisfy its -** memory needs for the first N pages that it adds to cache. ^If additional -** page cache memory is needed beyond what is provided by this option, then -** SQLite goes to [sqlite3_malloc()] for the additional storage space. -** The pointer in the first argument must -** be aligned to an 8-byte boundary or subsequent behavior of SQLite -** will be undefined.
-** -** [[SQLITE_CONFIG_HEAP]]
SQLITE_CONFIG_HEAP
-**
^This option specifies a static memory buffer that SQLite will use -** for all of its dynamic memory allocation needs beyond those provided -** for by [SQLITE_CONFIG_SCRATCH] and [SQLITE_CONFIG_PAGECACHE]. -** There are three arguments: An 8-byte aligned pointer to the memory, -** the number of bytes in the memory buffer, and the minimum allocation size. -** ^If the first pointer (the memory pointer) is NULL, then SQLite reverts -** to using its default memory allocator (the system malloc() implementation), -** undoing any prior invocation of [SQLITE_CONFIG_MALLOC]. ^If the -** memory pointer is not NULL and either [SQLITE_ENABLE_MEMSYS3] or -** [SQLITE_ENABLE_MEMSYS5] are defined, then the alternative memory -** allocator is engaged to handle all of SQLites memory allocation needs. -** The first pointer (the memory pointer) must be aligned to an 8-byte -** boundary or subsequent behavior of SQLite will be undefined. -** The minimum allocation size is capped at 2**12. Reasonable values -** for the minimum allocation size are 2**5 through 2**8.
-** -** [[SQLITE_CONFIG_MUTEX]]
SQLITE_CONFIG_MUTEX
-**
^(This option takes a single argument which is a pointer to an -** instance of the [sqlite3_mutex_methods] structure. The argument specifies -** alternative low-level mutex routines to be used in place -** the mutex routines built into SQLite.)^ ^SQLite makes a copy of the -** content of the [sqlite3_mutex_methods] structure before the call to -** [sqlite3_config()] returns. ^If SQLite is compiled with -** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then -** the entire mutexing subsystem is omitted from the build and hence calls to -** [sqlite3_config()] with the SQLITE_CONFIG_MUTEX configuration option will -** return [SQLITE_ERROR].
-** -** [[SQLITE_CONFIG_GETMUTEX]]
SQLITE_CONFIG_GETMUTEX
-**
^(This option takes a single argument which is a pointer to an -** instance of the [sqlite3_mutex_methods] structure. The -** [sqlite3_mutex_methods] -** structure is filled with the currently defined mutex routines.)^ -** This option can be used to overload the default mutex allocation -** routines with a wrapper used to track mutex usage for performance -** profiling or testing, for example. ^If SQLite is compiled with -** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then -** the entire mutexing subsystem is omitted from the build and hence calls to -** [sqlite3_config()] with the SQLITE_CONFIG_GETMUTEX configuration option will -** return [SQLITE_ERROR].
-** -** [[SQLITE_CONFIG_LOOKASIDE]]
SQLITE_CONFIG_LOOKASIDE
-**
^(This option takes two arguments that determine the default -** memory allocation for the lookaside memory allocator on each -** [database connection]. The first argument is the -** size of each lookaside buffer slot and the second is the number of -** slots allocated to each database connection.)^ ^(This option sets the -** default lookaside size. The [SQLITE_DBCONFIG_LOOKASIDE] -** verb to [sqlite3_db_config()] can be used to change the lookaside -** configuration on individual connections.)^
-** -** [[SQLITE_CONFIG_PCACHE2]]
SQLITE_CONFIG_PCACHE2
-**
^(This option takes a single argument which is a pointer to -** an [sqlite3_pcache_methods2] object. This object specifies the interface -** to a custom page cache implementation.)^ ^SQLite makes a copy of the -** object and uses it for page cache memory allocations.
-** -** [[SQLITE_CONFIG_GETPCACHE2]]
SQLITE_CONFIG_GETPCACHE2
-**
^(This option takes a single argument which is a pointer to an -** [sqlite3_pcache_methods2] object. SQLite copies of the current -** page cache implementation into that object.)^
-** -** [[SQLITE_CONFIG_LOG]]
SQLITE_CONFIG_LOG
-**
^The SQLITE_CONFIG_LOG option takes two arguments: a pointer to a -** function with a call signature of void(*)(void*,int,const char*), -** and a pointer to void. ^If the function pointer is not NULL, it is -** invoked by [sqlite3_log()] to process each logging event. ^If the -** function pointer is NULL, the [sqlite3_log()] interface becomes a no-op. -** ^The void pointer that is the second argument to SQLITE_CONFIG_LOG is -** passed through as the first parameter to the application-defined logger -** function whenever that function is invoked. ^The second parameter to -** the logger function is a copy of the first parameter to the corresponding -** [sqlite3_log()] call and is intended to be a [result code] or an -** [extended result code]. ^The third parameter passed to the logger is -** log message after formatting via [sqlite3_snprintf()]. -** The SQLite logging interface is not reentrant; the logger function -** supplied by the application must not invoke any SQLite interface. -** In a multi-threaded application, the application-defined logger -** function must be threadsafe.
-** -** [[SQLITE_CONFIG_URI]]
SQLITE_CONFIG_URI -**
This option takes a single argument of type int. If non-zero, then -** URI handling is globally enabled. If the parameter is zero, then URI handling -** is globally disabled. If URI handling is globally enabled, all filenames -** passed to [sqlite3_open()], [sqlite3_open_v2()], [sqlite3_open16()] or -** specified as part of [ATTACH] commands are interpreted as URIs, regardless -** of whether or not the [SQLITE_OPEN_URI] flag is set when the database -** connection is opened. If it is globally disabled, filenames are -** only interpreted as URIs if the SQLITE_OPEN_URI flag is set when the -** database connection is opened. By default, URI handling is globally -** disabled. The default value may be changed by compiling with the -** [SQLITE_USE_URI] symbol defined. -** -** [[SQLITE_CONFIG_PCACHE]] [[SQLITE_CONFIG_GETPCACHE]] -**
SQLITE_CONFIG_PCACHE and SQLITE_CONFIG_GETPCACHE -**
These options are obsolete and should not be used by new code. -** They are retained for backwards compatibility but are now no-ops. -**
-*/ -#define SQLITE_CONFIG_SINGLETHREAD 1 /* nil */ -#define SQLITE_CONFIG_MULTITHREAD 2 /* nil */ -#define SQLITE_CONFIG_SERIALIZED 3 /* nil */ -#define SQLITE_CONFIG_MALLOC 4 /* sqlite3_mem_methods* */ -#define SQLITE_CONFIG_GETMALLOC 5 /* sqlite3_mem_methods* */ -#define SQLITE_CONFIG_SCRATCH 6 /* void*, int sz, int N */ -#define SQLITE_CONFIG_PAGECACHE 7 /* void*, int sz, int N */ -#define SQLITE_CONFIG_HEAP 8 /* void*, int nByte, int min */ -#define SQLITE_CONFIG_MEMSTATUS 9 /* boolean */ -#define SQLITE_CONFIG_MUTEX 10 /* sqlite3_mutex_methods* */ -#define SQLITE_CONFIG_GETMUTEX 11 /* sqlite3_mutex_methods* */ -/* previously SQLITE_CONFIG_CHUNKALLOC 12 which is now unused. */ -#define SQLITE_CONFIG_LOOKASIDE 13 /* int int */ -#define SQLITE_CONFIG_PCACHE 14 /* no-op */ -#define SQLITE_CONFIG_GETPCACHE 15 /* no-op */ -#define SQLITE_CONFIG_LOG 16 /* xFunc, void* */ -#define SQLITE_CONFIG_URI 17 /* int */ -#define SQLITE_CONFIG_PCACHE2 18 /* sqlite3_pcache_methods2* */ -#define SQLITE_CONFIG_GETPCACHE2 19 /* sqlite3_pcache_methods2* */ - -/* -** CAPI3REF: Database Connection Configuration Options -** -** These constants are the available integer configuration options that -** can be passed as the second argument to the [sqlite3_db_config()] interface. -** -** New configuration options may be added in future releases of SQLite. -** Existing configuration options might be discontinued. Applications -** should check the return code from [sqlite3_db_config()] to make sure that -** the call worked. ^The [sqlite3_db_config()] interface will return a -** non-zero [error code] if a discontinued or unsupported configuration option -** is invoked. -** -**
-**
SQLITE_DBCONFIG_LOOKASIDE
-**
^This option takes three additional arguments that determine the -** [lookaside memory allocator] configuration for the [database connection]. -** ^The first argument (the third parameter to [sqlite3_db_config()] is a -** pointer to a memory buffer to use for lookaside memory. -** ^The first argument after the SQLITE_DBCONFIG_LOOKASIDE verb -** may be NULL in which case SQLite will allocate the -** lookaside buffer itself using [sqlite3_malloc()]. ^The second argument is the -** size of each lookaside buffer slot. ^The third argument is the number of -** slots. The size of the buffer in the first argument must be greater than -** or equal to the product of the second and third arguments. The buffer -** must be aligned to an 8-byte boundary. ^If the second argument to -** SQLITE_DBCONFIG_LOOKASIDE is not a multiple of 8, it is internally -** rounded down to the next smaller multiple of 8. ^(The lookaside memory -** configuration for a database connection can only be changed when that -** connection is not currently using lookaside memory, or in other words -** when the "current value" returned by -** [sqlite3_db_status](D,[SQLITE_CONFIG_LOOKASIDE],...) is zero. -** Any attempt to change the lookaside memory configuration when lookaside -** memory is in use leaves the configuration unchanged and returns -** [SQLITE_BUSY].)^
-** -**
SQLITE_DBCONFIG_ENABLE_FKEY
-**
^This option is used to enable or disable the enforcement of -** [foreign key constraints]. There should be two additional arguments. -** The first argument is an integer which is 0 to disable FK enforcement, -** positive to enable FK enforcement or negative to leave FK enforcement -** unchanged. The second parameter is a pointer to an integer into which -** is written 0 or 1 to indicate whether FK enforcement is off or on -** following this call. The second parameter may be a NULL pointer, in -** which case the FK enforcement setting is not reported back.
-** -**
SQLITE_DBCONFIG_ENABLE_TRIGGER
-**
^This option is used to enable or disable [CREATE TRIGGER | triggers]. -** There should be two additional arguments. -** The first argument is an integer which is 0 to disable triggers, -** positive to enable triggers or negative to leave the setting unchanged. -** The second parameter is a pointer to an integer into which -** is written 0 or 1 to indicate whether triggers are disabled or enabled -** following this call. The second parameter may be a NULL pointer, in -** which case the trigger setting is not reported back.
-** -**
-*/ -#define SQLITE_DBCONFIG_LOOKASIDE 1001 /* void* int int */ -#define SQLITE_DBCONFIG_ENABLE_FKEY 1002 /* int int* */ -#define SQLITE_DBCONFIG_ENABLE_TRIGGER 1003 /* int int* */ - - -/* -** CAPI3REF: Enable Or Disable Extended Result Codes -** -** ^The sqlite3_extended_result_codes() routine enables or disables the -** [extended result codes] feature of SQLite. ^The extended result -** codes are disabled by default for historical compatibility. -*/ -SQLITE_API int sqlite3_extended_result_codes(sqlite3*, int onoff); - -/* -** CAPI3REF: Last Insert Rowid -** -** ^Each entry in an SQLite table has a unique 64-bit signed -** integer key called the [ROWID | "rowid"]. ^The rowid is always available -** as an undeclared column named ROWID, OID, or _ROWID_ as long as those -** names are not also used by explicitly declared columns. ^If -** the table has a column of type [INTEGER PRIMARY KEY] then that column -** is another alias for the rowid. -** -** ^This routine returns the [rowid] of the most recent -** successful [INSERT] into the database from the [database connection] -** in the first argument. ^As of SQLite version 3.7.7, this routines -** records the last insert rowid of both ordinary tables and [virtual tables]. -** ^If no successful [INSERT]s -** have ever occurred on that database connection, zero is returned. -** -** ^(If an [INSERT] occurs within a trigger or within a [virtual table] -** method, then this routine will return the [rowid] of the inserted -** row as long as the trigger or virtual table method is running. -** But once the trigger or virtual table method ends, the value returned -** by this routine reverts to what it was before the trigger or virtual -** table method began.)^ -** -** ^An [INSERT] that fails due to a constraint violation is not a -** successful [INSERT] and does not change the value returned by this -** routine. ^Thus INSERT OR FAIL, INSERT OR IGNORE, INSERT OR ROLLBACK, -** and INSERT OR ABORT make no changes to the return value of this -** routine when their insertion fails. ^(When INSERT OR REPLACE -** encounters a constraint violation, it does not fail. The -** INSERT continues to completion after deleting rows that caused -** the constraint problem so INSERT OR REPLACE will always change -** the return value of this interface.)^ -** -** ^For the purposes of this routine, an [INSERT] is considered to -** be successful even if it is subsequently rolled back. -** -** This function is accessible to SQL statements via the -** [last_insert_rowid() SQL function]. -** -** If a separate thread performs a new [INSERT] on the same -** database connection while the [sqlite3_last_insert_rowid()] -** function is running and thus changes the last insert [rowid], -** then the value returned by [sqlite3_last_insert_rowid()] is -** unpredictable and might not equal either the old or the new -** last insert [rowid]. -*/ -SQLITE_API sqlite3_int64 sqlite3_last_insert_rowid(sqlite3*); - -/* -** CAPI3REF: Count The Number Of Rows Modified -** -** ^This function returns the number of database rows that were changed -** or inserted or deleted by the most recently completed SQL statement -** on the [database connection] specified by the first parameter. -** ^(Only changes that are directly specified by the [INSERT], [UPDATE], -** or [DELETE] statement are counted. Auxiliary changes caused by -** triggers or [foreign key actions] are not counted.)^ Use the -** [sqlite3_total_changes()] function to find the total number of changes -** including changes caused by triggers and foreign key actions. -** -** ^Changes to a view that are simulated by an [INSTEAD OF trigger] -** are not counted. Only real table changes are counted. -** -** ^(A "row change" is a change to a single row of a single table -** caused by an INSERT, DELETE, or UPDATE statement. Rows that -** are changed as side effects of [REPLACE] constraint resolution, -** rollback, ABORT processing, [DROP TABLE], or by any other -** mechanisms do not count as direct row changes.)^ -** -** A "trigger context" is a scope of execution that begins and -** ends with the script of a [CREATE TRIGGER | trigger]. -** Most SQL statements are -** evaluated outside of any trigger. This is the "top level" -** trigger context. If a trigger fires from the top level, a -** new trigger context is entered for the duration of that one -** trigger. Subtriggers create subcontexts for their duration. -** -** ^Calling [sqlite3_exec()] or [sqlite3_step()] recursively does -** not create a new trigger context. -** -** ^This function returns the number of direct row changes in the -** most recent INSERT, UPDATE, or DELETE statement within the same -** trigger context. -** -** ^Thus, when called from the top level, this function returns the -** number of changes in the most recent INSERT, UPDATE, or DELETE -** that also occurred at the top level. ^(Within the body of a trigger, -** the sqlite3_changes() interface can be called to find the number of -** changes in the most recently completed INSERT, UPDATE, or DELETE -** statement within the body of the same trigger. -** However, the number returned does not include changes -** caused by subtriggers since those have their own context.)^ -** -** See also the [sqlite3_total_changes()] interface, the -** [count_changes pragma], and the [changes() SQL function]. -** -** If a separate thread makes changes on the same database connection -** while [sqlite3_changes()] is running then the value returned -** is unpredictable and not meaningful. -*/ -SQLITE_API int sqlite3_changes(sqlite3*); - -/* -** CAPI3REF: Total Number Of Rows Modified -** -** ^This function returns the number of row changes caused by [INSERT], -** [UPDATE] or [DELETE] statements since the [database connection] was opened. -** ^(The count returned by sqlite3_total_changes() includes all changes -** from all [CREATE TRIGGER | trigger] contexts and changes made by -** [foreign key actions]. However, -** the count does not include changes used to implement [REPLACE] constraints, -** do rollbacks or ABORT processing, or [DROP TABLE] processing. The -** count does not include rows of views that fire an [INSTEAD OF trigger], -** though if the INSTEAD OF trigger makes changes of its own, those changes -** are counted.)^ -** ^The sqlite3_total_changes() function counts the changes as soon as -** the statement that makes them is completed (when the statement handle -** is passed to [sqlite3_reset()] or [sqlite3_finalize()]). -** -** See also the [sqlite3_changes()] interface, the -** [count_changes pragma], and the [total_changes() SQL function]. -** -** If a separate thread makes changes on the same database connection -** while [sqlite3_total_changes()] is running then the value -** returned is unpredictable and not meaningful. -*/ -SQLITE_API int sqlite3_total_changes(sqlite3*); - -/* -** CAPI3REF: Interrupt A Long-Running Query -** -** ^This function causes any pending database operation to abort and -** return at its earliest opportunity. This routine is typically -** called in response to a user action such as pressing "Cancel" -** or Ctrl-C where the user wants a long query operation to halt -** immediately. -** -** ^It is safe to call this routine from a thread different from the -** thread that is currently running the database operation. But it -** is not safe to call this routine with a [database connection] that -** is closed or might close before sqlite3_interrupt() returns. -** -** ^If an SQL operation is very nearly finished at the time when -** sqlite3_interrupt() is called, then it might not have an opportunity -** to be interrupted and might continue to completion. -** -** ^An SQL operation that is interrupted will return [SQLITE_INTERRUPT]. -** ^If the interrupted SQL operation is an INSERT, UPDATE, or DELETE -** that is inside an explicit transaction, then the entire transaction -** will be rolled back automatically. -** -** ^The sqlite3_interrupt(D) call is in effect until all currently running -** SQL statements on [database connection] D complete. ^Any new SQL statements -** that are started after the sqlite3_interrupt() call and before the -** running statements reaches zero are interrupted as if they had been -** running prior to the sqlite3_interrupt() call. ^New SQL statements -** that are started after the running statement count reaches zero are -** not effected by the sqlite3_interrupt(). -** ^A call to sqlite3_interrupt(D) that occurs when there are no running -** SQL statements is a no-op and has no effect on SQL statements -** that are started after the sqlite3_interrupt() call returns. -** -** If the database connection closes while [sqlite3_interrupt()] -** is running then bad things will likely happen. -*/ -SQLITE_API void sqlite3_interrupt(sqlite3*); - -/* -** CAPI3REF: Determine If An SQL Statement Is Complete -** -** These routines are useful during command-line input to determine if the -** currently entered text seems to form a complete SQL statement or -** if additional input is needed before sending the text into -** SQLite for parsing. ^These routines return 1 if the input string -** appears to be a complete SQL statement. ^A statement is judged to be -** complete if it ends with a semicolon token and is not a prefix of a -** well-formed CREATE TRIGGER statement. ^Semicolons that are embedded within -** string literals or quoted identifier names or comments are not -** independent tokens (they are part of the token in which they are -** embedded) and thus do not count as a statement terminator. ^Whitespace -** and comments that follow the final semicolon are ignored. -** -** ^These routines return 0 if the statement is incomplete. ^If a -** memory allocation fails, then SQLITE_NOMEM is returned. -** -** ^These routines do not parse the SQL statements thus -** will not detect syntactically incorrect SQL. -** -** ^(If SQLite has not been initialized using [sqlite3_initialize()] prior -** to invoking sqlite3_complete16() then sqlite3_initialize() is invoked -** automatically by sqlite3_complete16(). If that initialization fails, -** then the return value from sqlite3_complete16() will be non-zero -** regardless of whether or not the input SQL is complete.)^ -** -** The input to [sqlite3_complete()] must be a zero-terminated -** UTF-8 string. -** -** The input to [sqlite3_complete16()] must be a zero-terminated -** UTF-16 string in native byte order. -*/ -SQLITE_API int sqlite3_complete(const char *sql); -SQLITE_API int sqlite3_complete16(const void *sql); - -/* -** CAPI3REF: Register A Callback To Handle SQLITE_BUSY Errors -** -** ^This routine sets a callback function that might be invoked whenever -** an attempt is made to open a database table that another thread -** or process has locked. -** -** ^If the busy callback is NULL, then [SQLITE_BUSY] or [SQLITE_IOERR_BLOCKED] -** is returned immediately upon encountering the lock. ^If the busy callback -** is not NULL, then the callback might be invoked with two arguments. -** -** ^The first argument to the busy handler is a copy of the void* pointer which -** is the third argument to sqlite3_busy_handler(). ^The second argument to -** the busy handler callback is the number of times that the busy handler has -** been invoked for this locking event. ^If the -** busy callback returns 0, then no additional attempts are made to -** access the database and [SQLITE_BUSY] or [SQLITE_IOERR_BLOCKED] is returned. -** ^If the callback returns non-zero, then another attempt -** is made to open the database for reading and the cycle repeats. -** -** The presence of a busy handler does not guarantee that it will be invoked -** when there is lock contention. ^If SQLite determines that invoking the busy -** handler could result in a deadlock, it will go ahead and return [SQLITE_BUSY] -** or [SQLITE_IOERR_BLOCKED] instead of invoking the busy handler. -** Consider a scenario where one process is holding a read lock that -** it is trying to promote to a reserved lock and -** a second process is holding a reserved lock that it is trying -** to promote to an exclusive lock. The first process cannot proceed -** because it is blocked by the second and the second process cannot -** proceed because it is blocked by the first. If both processes -** invoke the busy handlers, neither will make any progress. Therefore, -** SQLite returns [SQLITE_BUSY] for the first process, hoping that this -** will induce the first process to release its read lock and allow -** the second process to proceed. -** -** ^The default busy callback is NULL. -** -** ^The [SQLITE_BUSY] error is converted to [SQLITE_IOERR_BLOCKED] -** when SQLite is in the middle of a large transaction where all the -** changes will not fit into the in-memory cache. SQLite will -** already hold a RESERVED lock on the database file, but it needs -** to promote this lock to EXCLUSIVE so that it can spill cache -** pages into the database file without harm to concurrent -** readers. ^If it is unable to promote the lock, then the in-memory -** cache will be left in an inconsistent state and so the error -** code is promoted from the relatively benign [SQLITE_BUSY] to -** the more severe [SQLITE_IOERR_BLOCKED]. ^This error code promotion -** forces an automatic rollback of the changes. See the -** -** CorruptionFollowingBusyError wiki page for a discussion of why -** this is important. -** -** ^(There can only be a single busy handler defined for each -** [database connection]. Setting a new busy handler clears any -** previously set handler.)^ ^Note that calling [sqlite3_busy_timeout()] -** will also set or clear the busy handler. -** -** The busy callback should not take any actions which modify the -** database connection that invoked the busy handler. Any such actions -** result in undefined behavior. -** -** A busy handler must not close the database connection -** or [prepared statement] that invoked the busy handler. -*/ -SQLITE_API int sqlite3_busy_handler(sqlite3*, int(*)(void*,int), void*); - -/* -** CAPI3REF: Set A Busy Timeout -** -** ^This routine sets a [sqlite3_busy_handler | busy handler] that sleeps -** for a specified amount of time when a table is locked. ^The handler -** will sleep multiple times until at least "ms" milliseconds of sleeping -** have accumulated. ^After at least "ms" milliseconds of sleeping, -** the handler returns 0 which causes [sqlite3_step()] to return -** [SQLITE_BUSY] or [SQLITE_IOERR_BLOCKED]. -** -** ^Calling this routine with an argument less than or equal to zero -** turns off all busy handlers. -** -** ^(There can only be a single busy handler for a particular -** [database connection] any any given moment. If another busy handler -** was defined (using [sqlite3_busy_handler()]) prior to calling -** this routine, that other busy handler is cleared.)^ -*/ -SQLITE_API int sqlite3_busy_timeout(sqlite3*, int ms); - -/* -** CAPI3REF: Convenience Routines For Running Queries -** -** This is a legacy interface that is preserved for backwards compatibility. -** Use of this interface is not recommended. -** -** Definition: A result table is memory data structure created by the -** [sqlite3_get_table()] interface. A result table records the -** complete query results from one or more queries. -** -** The table conceptually has a number of rows and columns. But -** these numbers are not part of the result table itself. These -** numbers are obtained separately. Let N be the number of rows -** and M be the number of columns. -** -** A result table is an array of pointers to zero-terminated UTF-8 strings. -** There are (N+1)*M elements in the array. The first M pointers point -** to zero-terminated strings that contain the names of the columns. -** The remaining entries all point to query results. NULL values result -** in NULL pointers. All other values are in their UTF-8 zero-terminated -** string representation as returned by [sqlite3_column_text()]. -** -** A result table might consist of one or more memory allocations. -** It is not safe to pass a result table directly to [sqlite3_free()]. -** A result table should be deallocated using [sqlite3_free_table()]. -** -** ^(As an example of the result table format, suppose a query result -** is as follows: -** -**
-**        Name        | Age
-**        -----------------------
-**        Alice       | 43
-**        Bob         | 28
-**        Cindy       | 21
-** 
-** -** There are two column (M==2) and three rows (N==3). Thus the -** result table has 8 entries. Suppose the result table is stored -** in an array names azResult. Then azResult holds this content: -** -**
-**        azResult[0] = "Name";
-**        azResult[1] = "Age";
-**        azResult[2] = "Alice";
-**        azResult[3] = "43";
-**        azResult[4] = "Bob";
-**        azResult[5] = "28";
-**        azResult[6] = "Cindy";
-**        azResult[7] = "21";
-** 
)^ -** -** ^The sqlite3_get_table() function evaluates one or more -** semicolon-separated SQL statements in the zero-terminated UTF-8 -** string of its 2nd parameter and returns a result table to the -** pointer given in its 3rd parameter. -** -** After the application has finished with the result from sqlite3_get_table(), -** it must pass the result table pointer to sqlite3_free_table() in order to -** release the memory that was malloced. Because of the way the -** [sqlite3_malloc()] happens within sqlite3_get_table(), the calling -** function must not try to call [sqlite3_free()] directly. Only -** [sqlite3_free_table()] is able to release the memory properly and safely. -** -** The sqlite3_get_table() interface is implemented as a wrapper around -** [sqlite3_exec()]. The sqlite3_get_table() routine does not have access -** to any internal data structures of SQLite. It uses only the public -** interface defined here. As a consequence, errors that occur in the -** wrapper layer outside of the internal [sqlite3_exec()] call are not -** reflected in subsequent calls to [sqlite3_errcode()] or -** [sqlite3_errmsg()]. -*/ -SQLITE_API int sqlite3_get_table( - sqlite3 *db, /* An open database */ - const char *zSql, /* SQL to be evaluated */ - char ***pazResult, /* Results of the query */ - int *pnRow, /* Number of result rows written here */ - int *pnColumn, /* Number of result columns written here */ - char **pzErrmsg /* Error msg written here */ -); -SQLITE_API void sqlite3_free_table(char **result); - -/* -** CAPI3REF: Formatted String Printing Functions -** -** These routines are work-alikes of the "printf()" family of functions -** from the standard C library. -** -** ^The sqlite3_mprintf() and sqlite3_vmprintf() routines write their -** results into memory obtained from [sqlite3_malloc()]. -** The strings returned by these two routines should be -** released by [sqlite3_free()]. ^Both routines return a -** NULL pointer if [sqlite3_malloc()] is unable to allocate enough -** memory to hold the resulting string. -** -** ^(The sqlite3_snprintf() routine is similar to "snprintf()" from -** the standard C library. The result is written into the -** buffer supplied as the second parameter whose size is given by -** the first parameter. Note that the order of the -** first two parameters is reversed from snprintf().)^ This is an -** historical accident that cannot be fixed without breaking -** backwards compatibility. ^(Note also that sqlite3_snprintf() -** returns a pointer to its buffer instead of the number of -** characters actually written into the buffer.)^ We admit that -** the number of characters written would be a more useful return -** value but we cannot change the implementation of sqlite3_snprintf() -** now without breaking compatibility. -** -** ^As long as the buffer size is greater than zero, sqlite3_snprintf() -** guarantees that the buffer is always zero-terminated. ^The first -** parameter "n" is the total size of the buffer, including space for -** the zero terminator. So the longest string that can be completely -** written will be n-1 characters. -** -** ^The sqlite3_vsnprintf() routine is a varargs version of sqlite3_snprintf(). -** -** These routines all implement some additional formatting -** options that are useful for constructing SQL statements. -** All of the usual printf() formatting options apply. In addition, there -** is are "%q", "%Q", and "%z" options. -** -** ^(The %q option works like %s in that it substitutes a nul-terminated -** string from the argument list. But %q also doubles every '\'' character. -** %q is designed for use inside a string literal.)^ By doubling each '\'' -** character it escapes that character and allows it to be inserted into -** the string. -** -** For example, assume the string variable zText contains text as follows: -** -**
-**  char *zText = "It's a happy day!";
-** 
-** -** One can use this text in an SQL statement as follows: -** -**
-**  char *zSQL = sqlite3_mprintf("INSERT INTO table VALUES('%q')", zText);
-**  sqlite3_exec(db, zSQL, 0, 0, 0);
-**  sqlite3_free(zSQL);
-** 
-** -** Because the %q format string is used, the '\'' character in zText -** is escaped and the SQL generated is as follows: -** -**
-**  INSERT INTO table1 VALUES('It''s a happy day!')
-** 
-** -** This is correct. Had we used %s instead of %q, the generated SQL -** would have looked like this: -** -**
-**  INSERT INTO table1 VALUES('It's a happy day!');
-** 
-** -** This second example is an SQL syntax error. As a general rule you should -** always use %q instead of %s when inserting text into a string literal. -** -** ^(The %Q option works like %q except it also adds single quotes around -** the outside of the total string. Additionally, if the parameter in the -** argument list is a NULL pointer, %Q substitutes the text "NULL" (without -** single quotes).)^ So, for example, one could say: -** -**
-**  char *zSQL = sqlite3_mprintf("INSERT INTO table VALUES(%Q)", zText);
-**  sqlite3_exec(db, zSQL, 0, 0, 0);
-**  sqlite3_free(zSQL);
-** 
-** -** The code above will render a correct SQL statement in the zSQL -** variable even if the zText variable is a NULL pointer. -** -** ^(The "%z" formatting option works like "%s" but with the -** addition that after the string has been read and copied into -** the result, [sqlite3_free()] is called on the input string.)^ -*/ -SQLITE_API char *sqlite3_mprintf(const char*,...); -SQLITE_API char *sqlite3_vmprintf(const char*, va_list); -SQLITE_API char *sqlite3_snprintf(int,char*,const char*, ...); -SQLITE_API char *sqlite3_vsnprintf(int,char*,const char*, va_list); - -/* -** CAPI3REF: Memory Allocation Subsystem -** -** The SQLite core uses these three routines for all of its own -** internal memory allocation needs. "Core" in the previous sentence -** does not include operating-system specific VFS implementation. The -** Windows VFS uses native malloc() and free() for some operations. -** -** ^The sqlite3_malloc() routine returns a pointer to a block -** of memory at least N bytes in length, where N is the parameter. -** ^If sqlite3_malloc() is unable to obtain sufficient free -** memory, it returns a NULL pointer. ^If the parameter N to -** sqlite3_malloc() is zero or negative then sqlite3_malloc() returns -** a NULL pointer. -** -** ^Calling sqlite3_free() with a pointer previously returned -** by sqlite3_malloc() or sqlite3_realloc() releases that memory so -** that it might be reused. ^The sqlite3_free() routine is -** a no-op if is called with a NULL pointer. Passing a NULL pointer -** to sqlite3_free() is harmless. After being freed, memory -** should neither be read nor written. Even reading previously freed -** memory might result in a segmentation fault or other severe error. -** Memory corruption, a segmentation fault, or other severe error -** might result if sqlite3_free() is called with a non-NULL pointer that -** was not obtained from sqlite3_malloc() or sqlite3_realloc(). -** -** ^(The sqlite3_realloc() interface attempts to resize a -** prior memory allocation to be at least N bytes, where N is the -** second parameter. The memory allocation to be resized is the first -** parameter.)^ ^ If the first parameter to sqlite3_realloc() -** is a NULL pointer then its behavior is identical to calling -** sqlite3_malloc(N) where N is the second parameter to sqlite3_realloc(). -** ^If the second parameter to sqlite3_realloc() is zero or -** negative then the behavior is exactly the same as calling -** sqlite3_free(P) where P is the first parameter to sqlite3_realloc(). -** ^sqlite3_realloc() returns a pointer to a memory allocation -** of at least N bytes in size or NULL if sufficient memory is unavailable. -** ^If M is the size of the prior allocation, then min(N,M) bytes -** of the prior allocation are copied into the beginning of buffer returned -** by sqlite3_realloc() and the prior allocation is freed. -** ^If sqlite3_realloc() returns NULL, then the prior allocation -** is not freed. -** -** ^The memory returned by sqlite3_malloc() and sqlite3_realloc() -** is always aligned to at least an 8 byte boundary, or to a -** 4 byte boundary if the [SQLITE_4_BYTE_ALIGNED_MALLOC] compile-time -** option is used. -** -** In SQLite version 3.5.0 and 3.5.1, it was possible to define -** the SQLITE_OMIT_MEMORY_ALLOCATION which would cause the built-in -** implementation of these routines to be omitted. That capability -** is no longer provided. Only built-in memory allocators can be used. -** -** Prior to SQLite version 3.7.10, the Windows OS interface layer called -** the system malloc() and free() directly when converting -** filenames between the UTF-8 encoding used by SQLite -** and whatever filename encoding is used by the particular Windows -** installation. Memory allocation errors were detected, but -** they were reported back as [SQLITE_CANTOPEN] or -** [SQLITE_IOERR] rather than [SQLITE_NOMEM]. -** -** The pointer arguments to [sqlite3_free()] and [sqlite3_realloc()] -** must be either NULL or else pointers obtained from a prior -** invocation of [sqlite3_malloc()] or [sqlite3_realloc()] that have -** not yet been released. -** -** The application must not read or write any part of -** a block of memory after it has been released using -** [sqlite3_free()] or [sqlite3_realloc()]. -*/ -SQLITE_API void *sqlite3_malloc(int); -SQLITE_API void *sqlite3_realloc(void*, int); -SQLITE_API void sqlite3_free(void*); - -/* -** CAPI3REF: Memory Allocator Statistics -** -** SQLite provides these two interfaces for reporting on the status -** of the [sqlite3_malloc()], [sqlite3_free()], and [sqlite3_realloc()] -** routines, which form the built-in memory allocation subsystem. -** -** ^The [sqlite3_memory_used()] routine returns the number of bytes -** of memory currently outstanding (malloced but not freed). -** ^The [sqlite3_memory_highwater()] routine returns the maximum -** value of [sqlite3_memory_used()] since the high-water mark -** was last reset. ^The values returned by [sqlite3_memory_used()] and -** [sqlite3_memory_highwater()] include any overhead -** added by SQLite in its implementation of [sqlite3_malloc()], -** but not overhead added by the any underlying system library -** routines that [sqlite3_malloc()] may call. -** -** ^The memory high-water mark is reset to the current value of -** [sqlite3_memory_used()] if and only if the parameter to -** [sqlite3_memory_highwater()] is true. ^The value returned -** by [sqlite3_memory_highwater(1)] is the high-water mark -** prior to the reset. -*/ -SQLITE_API sqlite3_int64 sqlite3_memory_used(void); -SQLITE_API sqlite3_int64 sqlite3_memory_highwater(int resetFlag); - -/* -** CAPI3REF: Pseudo-Random Number Generator -** -** SQLite contains a high-quality pseudo-random number generator (PRNG) used to -** select random [ROWID | ROWIDs] when inserting new records into a table that -** already uses the largest possible [ROWID]. The PRNG is also used for -** the build-in random() and randomblob() SQL functions. This interface allows -** applications to access the same PRNG for other purposes. -** -** ^A call to this routine stores N bytes of randomness into buffer P. -** -** ^The first time this routine is invoked (either internally or by -** the application) the PRNG is seeded using randomness obtained -** from the xRandomness method of the default [sqlite3_vfs] object. -** ^On all subsequent invocations, the pseudo-randomness is generated -** internally and without recourse to the [sqlite3_vfs] xRandomness -** method. -*/ -SQLITE_API void sqlite3_randomness(int N, void *P); - -/* -** CAPI3REF: Compile-Time Authorization Callbacks -** -** ^This routine registers an authorizer callback with a particular -** [database connection], supplied in the first argument. -** ^The authorizer callback is invoked as SQL statements are being compiled -** by [sqlite3_prepare()] or its variants [sqlite3_prepare_v2()], -** [sqlite3_prepare16()] and [sqlite3_prepare16_v2()]. ^At various -** points during the compilation process, as logic is being created -** to perform various actions, the authorizer callback is invoked to -** see if those actions are allowed. ^The authorizer callback should -** return [SQLITE_OK] to allow the action, [SQLITE_IGNORE] to disallow the -** specific action but allow the SQL statement to continue to be -** compiled, or [SQLITE_DENY] to cause the entire SQL statement to be -** rejected with an error. ^If the authorizer callback returns -** any value other than [SQLITE_IGNORE], [SQLITE_OK], or [SQLITE_DENY] -** then the [sqlite3_prepare_v2()] or equivalent call that triggered -** the authorizer will fail with an error message. -** -** When the callback returns [SQLITE_OK], that means the operation -** requested is ok. ^When the callback returns [SQLITE_DENY], the -** [sqlite3_prepare_v2()] or equivalent call that triggered the -** authorizer will fail with an error message explaining that -** access is denied. -** -** ^The first parameter to the authorizer callback is a copy of the third -** parameter to the sqlite3_set_authorizer() interface. ^The second parameter -** to the callback is an integer [SQLITE_COPY | action code] that specifies -** the particular action to be authorized. ^The third through sixth parameters -** to the callback are zero-terminated strings that contain additional -** details about the action to be authorized. -** -** ^If the action code is [SQLITE_READ] -** and the callback returns [SQLITE_IGNORE] then the -** [prepared statement] statement is constructed to substitute -** a NULL value in place of the table column that would have -** been read if [SQLITE_OK] had been returned. The [SQLITE_IGNORE] -** return can be used to deny an untrusted user access to individual -** columns of a table. -** ^If the action code is [SQLITE_DELETE] and the callback returns -** [SQLITE_IGNORE] then the [DELETE] operation proceeds but the -** [truncate optimization] is disabled and all rows are deleted individually. -** -** An authorizer is used when [sqlite3_prepare | preparing] -** SQL statements from an untrusted source, to ensure that the SQL statements -** do not try to access data they are not allowed to see, or that they do not -** try to execute malicious statements that damage the database. For -** example, an application may allow a user to enter arbitrary -** SQL queries for evaluation by a database. But the application does -** not want the user to be able to make arbitrary changes to the -** database. An authorizer could then be put in place while the -** user-entered SQL is being [sqlite3_prepare | prepared] that -** disallows everything except [SELECT] statements. -** -** Applications that need to process SQL from untrusted sources -** might also consider lowering resource limits using [sqlite3_limit()] -** and limiting database size using the [max_page_count] [PRAGMA] -** in addition to using an authorizer. -** -** ^(Only a single authorizer can be in place on a database connection -** at a time. Each call to sqlite3_set_authorizer overrides the -** previous call.)^ ^Disable the authorizer by installing a NULL callback. -** The authorizer is disabled by default. -** -** The authorizer callback must not do anything that will modify -** the database connection that invoked the authorizer callback. -** Note that [sqlite3_prepare_v2()] and [sqlite3_step()] both modify their -** database connections for the meaning of "modify" in this paragraph. -** -** ^When [sqlite3_prepare_v2()] is used to prepare a statement, the -** statement might be re-prepared during [sqlite3_step()] due to a -** schema change. Hence, the application should ensure that the -** correct authorizer callback remains in place during the [sqlite3_step()]. -** -** ^Note that the authorizer callback is invoked only during -** [sqlite3_prepare()] or its variants. Authorization is not -** performed during statement evaluation in [sqlite3_step()], unless -** as stated in the previous paragraph, sqlite3_step() invokes -** sqlite3_prepare_v2() to reprepare a statement after a schema change. -*/ -SQLITE_API int sqlite3_set_authorizer( - sqlite3*, - int (*xAuth)(void*,int,const char*,const char*,const char*,const char*), - void *pUserData -); - -/* -** CAPI3REF: Authorizer Return Codes -** -** The [sqlite3_set_authorizer | authorizer callback function] must -** return either [SQLITE_OK] or one of these two constants in order -** to signal SQLite whether or not the action is permitted. See the -** [sqlite3_set_authorizer | authorizer documentation] for additional -** information. -** -** Note that SQLITE_IGNORE is also used as a [SQLITE_ROLLBACK | return code] -** from the [sqlite3_vtab_on_conflict()] interface. -*/ -#define SQLITE_DENY 1 /* Abort the SQL statement with an error */ -#define SQLITE_IGNORE 2 /* Don't allow access, but don't generate an error */ - -/* -** CAPI3REF: Authorizer Action Codes -** -** The [sqlite3_set_authorizer()] interface registers a callback function -** that is invoked to authorize certain SQL statement actions. The -** second parameter to the callback is an integer code that specifies -** what action is being authorized. These are the integer action codes that -** the authorizer callback may be passed. -** -** These action code values signify what kind of operation is to be -** authorized. The 3rd and 4th parameters to the authorization -** callback function will be parameters or NULL depending on which of these -** codes is used as the second parameter. ^(The 5th parameter to the -** authorizer callback is the name of the database ("main", "temp", -** etc.) if applicable.)^ ^The 6th parameter to the authorizer callback -** is the name of the inner-most trigger or view that is responsible for -** the access attempt or NULL if this access attempt is directly from -** top-level SQL code. -*/ -/******************************************* 3rd ************ 4th ***********/ -#define SQLITE_CREATE_INDEX 1 /* Index Name Table Name */ -#define SQLITE_CREATE_TABLE 2 /* Table Name NULL */ -#define SQLITE_CREATE_TEMP_INDEX 3 /* Index Name Table Name */ -#define SQLITE_CREATE_TEMP_TABLE 4 /* Table Name NULL */ -#define SQLITE_CREATE_TEMP_TRIGGER 5 /* Trigger Name Table Name */ -#define SQLITE_CREATE_TEMP_VIEW 6 /* View Name NULL */ -#define SQLITE_CREATE_TRIGGER 7 /* Trigger Name Table Name */ -#define SQLITE_CREATE_VIEW 8 /* View Name NULL */ -#define SQLITE_DELETE 9 /* Table Name NULL */ -#define SQLITE_DROP_INDEX 10 /* Index Name Table Name */ -#define SQLITE_DROP_TABLE 11 /* Table Name NULL */ -#define SQLITE_DROP_TEMP_INDEX 12 /* Index Name Table Name */ -#define SQLITE_DROP_TEMP_TABLE 13 /* Table Name NULL */ -#define SQLITE_DROP_TEMP_TRIGGER 14 /* Trigger Name Table Name */ -#define SQLITE_DROP_TEMP_VIEW 15 /* View Name NULL */ -#define SQLITE_DROP_TRIGGER 16 /* Trigger Name Table Name */ -#define SQLITE_DROP_VIEW 17 /* View Name NULL */ -#define SQLITE_INSERT 18 /* Table Name NULL */ -#define SQLITE_PRAGMA 19 /* Pragma Name 1st arg or NULL */ -#define SQLITE_READ 20 /* Table Name Column Name */ -#define SQLITE_SELECT 21 /* NULL NULL */ -#define SQLITE_TRANSACTION 22 /* Operation NULL */ -#define SQLITE_UPDATE 23 /* Table Name Column Name */ -#define SQLITE_ATTACH 24 /* Filename NULL */ -#define SQLITE_DETACH 25 /* Database Name NULL */ -#define SQLITE_ALTER_TABLE 26 /* Database Name Table Name */ -#define SQLITE_REINDEX 27 /* Index Name NULL */ -#define SQLITE_ANALYZE 28 /* Table Name NULL */ -#define SQLITE_CREATE_VTABLE 29 /* Table Name Module Name */ -#define SQLITE_DROP_VTABLE 30 /* Table Name Module Name */ -#define SQLITE_FUNCTION 31 /* NULL Function Name */ -#define SQLITE_SAVEPOINT 32 /* Operation Savepoint Name */ -#define SQLITE_COPY 0 /* No longer used */ - -/* -** CAPI3REF: Tracing And Profiling Functions -** -** These routines register callback functions that can be used for -** tracing and profiling the execution of SQL statements. -** -** ^The callback function registered by sqlite3_trace() is invoked at -** various times when an SQL statement is being run by [sqlite3_step()]. -** ^The sqlite3_trace() callback is invoked with a UTF-8 rendering of the -** SQL statement text as the statement first begins executing. -** ^(Additional sqlite3_trace() callbacks might occur -** as each triggered subprogram is entered. The callbacks for triggers -** contain a UTF-8 SQL comment that identifies the trigger.)^ -** -** ^The callback function registered by sqlite3_profile() is invoked -** as each SQL statement finishes. ^The profile callback contains -** the original statement text and an estimate of wall-clock time -** of how long that statement took to run. ^The profile callback -** time is in units of nanoseconds, however the current implementation -** is only capable of millisecond resolution so the six least significant -** digits in the time are meaningless. Future versions of SQLite -** might provide greater resolution on the profiler callback. The -** sqlite3_profile() function is considered experimental and is -** subject to change in future versions of SQLite. -*/ -SQLITE_API void *sqlite3_trace(sqlite3*, void(*xTrace)(void*,const char*), void*); -SQLITE_API SQLITE_EXPERIMENTAL void *sqlite3_profile(sqlite3*, - void(*xProfile)(void*,const char*,sqlite3_uint64), void*); - -/* -** CAPI3REF: Query Progress Callbacks -** -** ^The sqlite3_progress_handler(D,N,X,P) interface causes the callback -** function X to be invoked periodically during long running calls to -** [sqlite3_exec()], [sqlite3_step()] and [sqlite3_get_table()] for -** database connection D. An example use for this -** interface is to keep a GUI updated during a large query. -** -** ^The parameter P is passed through as the only parameter to the -** callback function X. ^The parameter N is the number of -** [virtual machine instructions] that are evaluated between successive -** invocations of the callback X. -** -** ^Only a single progress handler may be defined at one time per -** [database connection]; setting a new progress handler cancels the -** old one. ^Setting parameter X to NULL disables the progress handler. -** ^The progress handler is also disabled by setting N to a value less -** than 1. -** -** ^If the progress callback returns non-zero, the operation is -** interrupted. This feature can be used to implement a -** "Cancel" button on a GUI progress dialog box. -** -** The progress handler callback must not do anything that will modify -** the database connection that invoked the progress handler. -** Note that [sqlite3_prepare_v2()] and [sqlite3_step()] both modify their -** database connections for the meaning of "modify" in this paragraph. -** -*/ -SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*); - -/* -** CAPI3REF: Opening A New Database Connection -** -** ^These routines open an SQLite database file as specified by the -** filename argument. ^The filename argument is interpreted as UTF-8 for -** sqlite3_open() and sqlite3_open_v2() and as UTF-16 in the native byte -** order for sqlite3_open16(). ^(A [database connection] handle is usually -** returned in *ppDb, even if an error occurs. The only exception is that -** if SQLite is unable to allocate memory to hold the [sqlite3] object, -** a NULL will be written into *ppDb instead of a pointer to the [sqlite3] -** object.)^ ^(If the database is opened (and/or created) successfully, then -** [SQLITE_OK] is returned. Otherwise an [error code] is returned.)^ ^The -** [sqlite3_errmsg()] or [sqlite3_errmsg16()] routines can be used to obtain -** an English language description of the error following a failure of any -** of the sqlite3_open() routines. -** -** ^The default encoding for the database will be UTF-8 if -** sqlite3_open() or sqlite3_open_v2() is called and -** UTF-16 in the native byte order if sqlite3_open16() is used. -** -** Whether or not an error occurs when it is opened, resources -** associated with the [database connection] handle should be released by -** passing it to [sqlite3_close()] when it is no longer required. -** -** The sqlite3_open_v2() interface works like sqlite3_open() -** except that it accepts two additional parameters for additional control -** over the new database connection. ^(The flags parameter to -** sqlite3_open_v2() can take one of -** the following three values, optionally combined with the -** [SQLITE_OPEN_NOMUTEX], [SQLITE_OPEN_FULLMUTEX], [SQLITE_OPEN_SHAREDCACHE], -** [SQLITE_OPEN_PRIVATECACHE], and/or [SQLITE_OPEN_URI] flags:)^ -** -**
-** ^(
[SQLITE_OPEN_READONLY]
-**
The database is opened in read-only mode. If the database does not -** already exist, an error is returned.
)^ -** -** ^(
[SQLITE_OPEN_READWRITE]
-**
The database is opened for reading and writing if possible, or reading -** only if the file is write protected by the operating system. In either -** case the database must already exist, otherwise an error is returned.
)^ -** -** ^(
[SQLITE_OPEN_READWRITE] | [SQLITE_OPEN_CREATE]
-**
The database is opened for reading and writing, and is created if -** it does not already exist. This is the behavior that is always used for -** sqlite3_open() and sqlite3_open16().
)^ -**
-** -** If the 3rd parameter to sqlite3_open_v2() is not one of the -** combinations shown above optionally combined with other -** [SQLITE_OPEN_READONLY | SQLITE_OPEN_* bits] -** then the behavior is undefined. -** -** ^If the [SQLITE_OPEN_NOMUTEX] flag is set, then the database connection -** opens in the multi-thread [threading mode] as long as the single-thread -** mode has not been set at compile-time or start-time. ^If the -** [SQLITE_OPEN_FULLMUTEX] flag is set then the database connection opens -** in the serialized [threading mode] unless single-thread was -** previously selected at compile-time or start-time. -** ^The [SQLITE_OPEN_SHAREDCACHE] flag causes the database connection to be -** eligible to use [shared cache mode], regardless of whether or not shared -** cache is enabled using [sqlite3_enable_shared_cache()]. ^The -** [SQLITE_OPEN_PRIVATECACHE] flag causes the database connection to not -** participate in [shared cache mode] even if it is enabled. -** -** ^The fourth parameter to sqlite3_open_v2() is the name of the -** [sqlite3_vfs] object that defines the operating system interface that -** the new database connection should use. ^If the fourth parameter is -** a NULL pointer then the default [sqlite3_vfs] object is used. -** -** ^If the filename is ":memory:", then a private, temporary in-memory database -** is created for the connection. ^This in-memory database will vanish when -** the database connection is closed. Future versions of SQLite might -** make use of additional special filenames that begin with the ":" character. -** It is recommended that when a database filename actually does begin with -** a ":" character you should prefix the filename with a pathname such as -** "./" to avoid ambiguity. -** -** ^If the filename is an empty string, then a private, temporary -** on-disk database will be created. ^This private database will be -** automatically deleted as soon as the database connection is closed. -** -** [[URI filenames in sqlite3_open()]]

URI Filenames

-** -** ^If [URI filename] interpretation is enabled, and the filename argument -** begins with "file:", then the filename is interpreted as a URI. ^URI -** filename interpretation is enabled if the [SQLITE_OPEN_URI] flag is -** set in the fourth argument to sqlite3_open_v2(), or if it has -** been enabled globally using the [SQLITE_CONFIG_URI] option with the -** [sqlite3_config()] method or by the [SQLITE_USE_URI] compile-time option. -** As of SQLite version 3.7.7, URI filename interpretation is turned off -** by default, but future releases of SQLite might enable URI filename -** interpretation by default. See "[URI filenames]" for additional -** information. -** -** URI filenames are parsed according to RFC 3986. ^If the URI contains an -** authority, then it must be either an empty string or the string -** "localhost". ^If the authority is not an empty string or "localhost", an -** error is returned to the caller. ^The fragment component of a URI, if -** present, is ignored. -** -** ^SQLite uses the path component of the URI as the name of the disk file -** which contains the database. ^If the path begins with a '/' character, -** then it is interpreted as an absolute path. ^If the path does not begin -** with a '/' (meaning that the authority section is omitted from the URI) -** then the path is interpreted as a relative path. -** ^On windows, the first component of an absolute path -** is a drive specification (e.g. "C:"). -** -** [[core URI query parameters]] -** The query component of a URI may contain parameters that are interpreted -** either by SQLite itself, or by a [VFS | custom VFS implementation]. -** SQLite interprets the following three query parameters: -** -**
    -**
  • vfs: ^The "vfs" parameter may be used to specify the name of -** a VFS object that provides the operating system interface that should -** be used to access the database file on disk. ^If this option is set to -** an empty string the default VFS object is used. ^Specifying an unknown -** VFS is an error. ^If sqlite3_open_v2() is used and the vfs option is -** present, then the VFS specified by the option takes precedence over -** the value passed as the fourth parameter to sqlite3_open_v2(). -** -**
  • mode: ^(The mode parameter may be set to either "ro", "rw", -** "rwc", or "memory". Attempting to set it to any other value is -** an error)^. -** ^If "ro" is specified, then the database is opened for read-only -** access, just as if the [SQLITE_OPEN_READONLY] flag had been set in the -** third argument to sqlite3_prepare_v2(). ^If the mode option is set to -** "rw", then the database is opened for read-write (but not create) -** access, as if SQLITE_OPEN_READWRITE (but not SQLITE_OPEN_CREATE) had -** been set. ^Value "rwc" is equivalent to setting both -** SQLITE_OPEN_READWRITE and SQLITE_OPEN_CREATE. ^If the mode option is -** set to "memory" then a pure [in-memory database] that never reads -** or writes from disk is used. ^It is an error to specify a value for -** the mode parameter that is less restrictive than that specified by -** the flags passed in the third parameter to sqlite3_open_v2(). -** -**
  • cache: ^The cache parameter may be set to either "shared" or -** "private". ^Setting it to "shared" is equivalent to setting the -** SQLITE_OPEN_SHAREDCACHE bit in the flags argument passed to -** sqlite3_open_v2(). ^Setting the cache parameter to "private" is -** equivalent to setting the SQLITE_OPEN_PRIVATECACHE bit. -** ^If sqlite3_open_v2() is used and the "cache" parameter is present in -** a URI filename, its value overrides any behaviour requested by setting -** SQLITE_OPEN_PRIVATECACHE or SQLITE_OPEN_SHAREDCACHE flag. -**
-** -** ^Specifying an unknown parameter in the query component of a URI is not an -** error. Future versions of SQLite might understand additional query -** parameters. See "[query parameters with special meaning to SQLite]" for -** additional information. -** -** [[URI filename examples]]

URI filename examples

-** -**
-**
URI filenames Results -**
file:data.db -** Open the file "data.db" in the current directory. -**
file:/home/fred/data.db
-** file:///home/fred/data.db
-** file://localhost/home/fred/data.db
-** Open the database file "/home/fred/data.db". -**
file://darkstar/home/fred/data.db -** An error. "darkstar" is not a recognized authority. -**
-** file:///C:/Documents%20and%20Settings/fred/Desktop/data.db -** Windows only: Open the file "data.db" on fred's desktop on drive -** C:. Note that the %20 escaping in this example is not strictly -** necessary - space characters can be used literally -** in URI filenames. -**
file:data.db?mode=ro&cache=private -** Open file "data.db" in the current directory for read-only access. -** Regardless of whether or not shared-cache mode is enabled by -** default, use a private cache. -**
file:/home/fred/data.db?vfs=unix-nolock -** Open file "/home/fred/data.db". Use the special VFS "unix-nolock". -**
file:data.db?mode=readonly -** An error. "readonly" is not a valid option for the "mode" parameter. -**
-** -** ^URI hexadecimal escape sequences (%HH) are supported within the path and -** query components of a URI. A hexadecimal escape sequence consists of a -** percent sign - "%" - followed by exactly two hexadecimal digits -** specifying an octet value. ^Before the path or query components of a -** URI filename are interpreted, they are encoded using UTF-8 and all -** hexadecimal escape sequences replaced by a single byte containing the -** corresponding octet. If this process generates an invalid UTF-8 encoding, -** the results are undefined. -** -** Note to Windows users: The encoding used for the filename argument -** of sqlite3_open() and sqlite3_open_v2() must be UTF-8, not whatever -** codepage is currently defined. Filenames containing international -** characters must be converted to UTF-8 prior to passing them into -** sqlite3_open() or sqlite3_open_v2(). -*/ -SQLITE_API int sqlite3_open( - const char *filename, /* Database filename (UTF-8) */ - sqlite3 **ppDb /* OUT: SQLite db handle */ -); -SQLITE_API int sqlite3_open16( - const void *filename, /* Database filename (UTF-16) */ - sqlite3 **ppDb /* OUT: SQLite db handle */ -); -SQLITE_API int sqlite3_open_v2( - const char *filename, /* Database filename (UTF-8) */ - sqlite3 **ppDb, /* OUT: SQLite db handle */ - int flags, /* Flags */ - const char *zVfs /* Name of VFS module to use */ -); - -/* -** CAPI3REF: Obtain Values For URI Parameters -** -** These are utility routines, useful to VFS implementations, that check -** to see if a database file was a URI that contained a specific query -** parameter, and if so obtains the value of that query parameter. -** -** If F is the database filename pointer passed into the xOpen() method of -** a VFS implementation when the flags parameter to xOpen() has one or -** more of the [SQLITE_OPEN_URI] or [SQLITE_OPEN_MAIN_DB] bits set and -** P is the name of the query parameter, then -** sqlite3_uri_parameter(F,P) returns the value of the P -** parameter if it exists or a NULL pointer if P does not appear as a -** query parameter on F. If P is a query parameter of F -** has no explicit value, then sqlite3_uri_parameter(F,P) returns -** a pointer to an empty string. -** -** The sqlite3_uri_boolean(F,P,B) routine assumes that P is a boolean -** parameter and returns true (1) or false (0) according to the value -** of P. The sqlite3_uri_boolean(F,P,B) routine returns true (1) if the -** value of query parameter P is one of "yes", "true", or "on" in any -** case or if the value begins with a non-zero number. The -** sqlite3_uri_boolean(F,P,B) routines returns false (0) if the value of -** query parameter P is one of "no", "false", or "off" in any case or -** if the value begins with a numeric zero. If P is not a query -** parameter on F or if the value of P is does not match any of the -** above, then sqlite3_uri_boolean(F,P,B) returns (B!=0). -** -** The sqlite3_uri_int64(F,P,D) routine converts the value of P into a -** 64-bit signed integer and returns that integer, or D if P does not -** exist. If the value of P is something other than an integer, then -** zero is returned. -** -** If F is a NULL pointer, then sqlite3_uri_parameter(F,P) returns NULL and -** sqlite3_uri_boolean(F,P,B) returns B. If F is not a NULL pointer and -** is not a database file pathname pointer that SQLite passed into the xOpen -** VFS method, then the behavior of this routine is undefined and probably -** undesirable. -*/ -SQLITE_API const char *sqlite3_uri_parameter(const char *zFilename, const char *zParam); -SQLITE_API int sqlite3_uri_boolean(const char *zFile, const char *zParam, int bDefault); -SQLITE_API sqlite3_int64 sqlite3_uri_int64(const char*, const char*, sqlite3_int64); - - -/* -** CAPI3REF: Error Codes And Messages -** -** ^The sqlite3_errcode() interface returns the numeric [result code] or -** [extended result code] for the most recent failed sqlite3_* API call -** associated with a [database connection]. If a prior API call failed -** but the most recent API call succeeded, the return value from -** sqlite3_errcode() is undefined. ^The sqlite3_extended_errcode() -** interface is the same except that it always returns the -** [extended result code] even when extended result codes are -** disabled. -** -** ^The sqlite3_errmsg() and sqlite3_errmsg16() return English-language -** text that describes the error, as either UTF-8 or UTF-16 respectively. -** ^(Memory to hold the error message string is managed internally. -** The application does not need to worry about freeing the result. -** However, the error string might be overwritten or deallocated by -** subsequent calls to other SQLite interface functions.)^ -** -** When the serialized [threading mode] is in use, it might be the -** case that a second error occurs on a separate thread in between -** the time of the first error and the call to these interfaces. -** When that happens, the second error will be reported since these -** interfaces always report the most recent result. To avoid -** this, each thread can obtain exclusive use of the [database connection] D -** by invoking [sqlite3_mutex_enter]([sqlite3_db_mutex](D)) before beginning -** to use D and invoking [sqlite3_mutex_leave]([sqlite3_db_mutex](D)) after -** all calls to the interfaces listed here are completed. -** -** If an interface fails with SQLITE_MISUSE, that means the interface -** was invoked incorrectly by the application. In that case, the -** error code and message may or may not be set. -*/ -SQLITE_API int sqlite3_errcode(sqlite3 *db); -SQLITE_API int sqlite3_extended_errcode(sqlite3 *db); -SQLITE_API const char *sqlite3_errmsg(sqlite3*); -SQLITE_API const void *sqlite3_errmsg16(sqlite3*); - -/* -** CAPI3REF: SQL Statement Object -** KEYWORDS: {prepared statement} {prepared statements} -** -** An instance of this object represents a single SQL statement. -** This object is variously known as a "prepared statement" or a -** "compiled SQL statement" or simply as a "statement". -** -** The life of a statement object goes something like this: -** -**
    -**
  1. Create the object using [sqlite3_prepare_v2()] or a related -** function. -**
  2. Bind values to [host parameters] using the sqlite3_bind_*() -** interfaces. -**
  3. Run the SQL by calling [sqlite3_step()] one or more times. -**
  4. Reset the statement using [sqlite3_reset()] then go back -** to step 2. Do this zero or more times. -**
  5. Destroy the object using [sqlite3_finalize()]. -**
-** -** Refer to documentation on individual methods above for additional -** information. -*/ -typedef struct sqlite3_stmt sqlite3_stmt; - -/* -** CAPI3REF: Run-time Limits -** -** ^(This interface allows the size of various constructs to be limited -** on a connection by connection basis. The first parameter is the -** [database connection] whose limit is to be set or queried. The -** second parameter is one of the [limit categories] that define a -** class of constructs to be size limited. The third parameter is the -** new limit for that construct.)^ -** -** ^If the new limit is a negative number, the limit is unchanged. -** ^(For each limit category SQLITE_LIMIT_NAME there is a -** [limits | hard upper bound] -** set at compile-time by a C preprocessor macro called -** [limits | SQLITE_MAX_NAME]. -** (The "_LIMIT_" in the name is changed to "_MAX_".))^ -** ^Attempts to increase a limit above its hard upper bound are -** silently truncated to the hard upper bound. -** -** ^Regardless of whether or not the limit was changed, the -** [sqlite3_limit()] interface returns the prior value of the limit. -** ^Hence, to find the current value of a limit without changing it, -** simply invoke this interface with the third parameter set to -1. -** -** Run-time limits are intended for use in applications that manage -** both their own internal database and also databases that are controlled -** by untrusted external sources. An example application might be a -** web browser that has its own databases for storing history and -** separate databases controlled by JavaScript applications downloaded -** off the Internet. The internal databases can be given the -** large, default limits. Databases managed by external sources can -** be given much smaller limits designed to prevent a denial of service -** attack. Developers might also want to use the [sqlite3_set_authorizer()] -** interface to further control untrusted SQL. The size of the database -** created by an untrusted script can be contained using the -** [max_page_count] [PRAGMA]. -** -** New run-time limit categories may be added in future releases. -*/ -SQLITE_API int sqlite3_limit(sqlite3*, int id, int newVal); - -/* -** CAPI3REF: Run-Time Limit Categories -** KEYWORDS: {limit category} {*limit categories} -** -** These constants define various performance limits -** that can be lowered at run-time using [sqlite3_limit()]. -** The synopsis of the meanings of the various limits is shown below. -** Additional information is available at [limits | Limits in SQLite]. -** -**
-** [[SQLITE_LIMIT_LENGTH]] ^(
SQLITE_LIMIT_LENGTH
-**
The maximum size of any string or BLOB or table row, in bytes.
)^ -** -** [[SQLITE_LIMIT_SQL_LENGTH]] ^(
SQLITE_LIMIT_SQL_LENGTH
-**
The maximum length of an SQL statement, in bytes.
)^ -** -** [[SQLITE_LIMIT_COLUMN]] ^(
SQLITE_LIMIT_COLUMN
-**
The maximum number of columns in a table definition or in the -** result set of a [SELECT] or the maximum number of columns in an index -** or in an ORDER BY or GROUP BY clause.
)^ -** -** [[SQLITE_LIMIT_EXPR_DEPTH]] ^(
SQLITE_LIMIT_EXPR_DEPTH
-**
The maximum depth of the parse tree on any expression.
)^ -** -** [[SQLITE_LIMIT_COMPOUND_SELECT]] ^(
SQLITE_LIMIT_COMPOUND_SELECT
-**
The maximum number of terms in a compound SELECT statement.
)^ -** -** [[SQLITE_LIMIT_VDBE_OP]] ^(
SQLITE_LIMIT_VDBE_OP
-**
The maximum number of instructions in a virtual machine program -** used to implement an SQL statement. This limit is not currently -** enforced, though that might be added in some future release of -** SQLite.
)^ -** -** [[SQLITE_LIMIT_FUNCTION_ARG]] ^(
SQLITE_LIMIT_FUNCTION_ARG
-**
The maximum number of arguments on a function.
)^ -** -** [[SQLITE_LIMIT_ATTACHED]] ^(
SQLITE_LIMIT_ATTACHED
-**
The maximum number of [ATTACH | attached databases].)^
-** -** [[SQLITE_LIMIT_LIKE_PATTERN_LENGTH]] -** ^(
SQLITE_LIMIT_LIKE_PATTERN_LENGTH
-**
The maximum length of the pattern argument to the [LIKE] or -** [GLOB] operators.
)^ -** -** [[SQLITE_LIMIT_VARIABLE_NUMBER]] -** ^(
SQLITE_LIMIT_VARIABLE_NUMBER
-**
The maximum index number of any [parameter] in an SQL statement.)^ -** -** [[SQLITE_LIMIT_TRIGGER_DEPTH]] ^(
SQLITE_LIMIT_TRIGGER_DEPTH
-**
The maximum depth of recursion for triggers.
)^ -**
-*/ -#define SQLITE_LIMIT_LENGTH 0 -#define SQLITE_LIMIT_SQL_LENGTH 1 -#define SQLITE_LIMIT_COLUMN 2 -#define SQLITE_LIMIT_EXPR_DEPTH 3 -#define SQLITE_LIMIT_COMPOUND_SELECT 4 -#define SQLITE_LIMIT_VDBE_OP 5 -#define SQLITE_LIMIT_FUNCTION_ARG 6 -#define SQLITE_LIMIT_ATTACHED 7 -#define SQLITE_LIMIT_LIKE_PATTERN_LENGTH 8 -#define SQLITE_LIMIT_VARIABLE_NUMBER 9 -#define SQLITE_LIMIT_TRIGGER_DEPTH 10 - -/* -** CAPI3REF: Compiling An SQL Statement -** KEYWORDS: {SQL statement compiler} -** -** To execute an SQL query, it must first be compiled into a byte-code -** program using one of these routines. -** -** The first argument, "db", is a [database connection] obtained from a -** prior successful call to [sqlite3_open()], [sqlite3_open_v2()] or -** [sqlite3_open16()]. The database connection must not have been closed. -** -** The second argument, "zSql", is the statement to be compiled, encoded -** as either UTF-8 or UTF-16. The sqlite3_prepare() and sqlite3_prepare_v2() -** interfaces use UTF-8, and sqlite3_prepare16() and sqlite3_prepare16_v2() -** use UTF-16. -** -** ^If the nByte argument is less than zero, then zSql is read up to the -** first zero terminator. ^If nByte is non-negative, then it is the maximum -** number of bytes read from zSql. ^When nByte is non-negative, the -** zSql string ends at either the first '\000' or '\u0000' character or -** the nByte-th byte, whichever comes first. If the caller knows -** that the supplied string is nul-terminated, then there is a small -** performance advantage to be gained by passing an nByte parameter that -** is equal to the number of bytes in the input string including -** the nul-terminator bytes as this saves SQLite from having to -** make a copy of the input string. -** -** ^If pzTail is not NULL then *pzTail is made to point to the first byte -** past the end of the first SQL statement in zSql. These routines only -** compile the first statement in zSql, so *pzTail is left pointing to -** what remains uncompiled. -** -** ^*ppStmt is left pointing to a compiled [prepared statement] that can be -** executed using [sqlite3_step()]. ^If there is an error, *ppStmt is set -** to NULL. ^If the input text contains no SQL (if the input is an empty -** string or a comment) then *ppStmt is set to NULL. -** The calling procedure is responsible for deleting the compiled -** SQL statement using [sqlite3_finalize()] after it has finished with it. -** ppStmt may not be NULL. -** -** ^On success, the sqlite3_prepare() family of routines return [SQLITE_OK]; -** otherwise an [error code] is returned. -** -** The sqlite3_prepare_v2() and sqlite3_prepare16_v2() interfaces are -** recommended for all new programs. The two older interfaces are retained -** for backwards compatibility, but their use is discouraged. -** ^In the "v2" interfaces, the prepared statement -** that is returned (the [sqlite3_stmt] object) contains a copy of the -** original SQL text. This causes the [sqlite3_step()] interface to -** behave differently in three ways: -** -**
    -**
  1. -** ^If the database schema changes, instead of returning [SQLITE_SCHEMA] as it -** always used to do, [sqlite3_step()] will automatically recompile the SQL -** statement and try to run it again. -**
  2. -** -**
  3. -** ^When an error occurs, [sqlite3_step()] will return one of the detailed -** [error codes] or [extended error codes]. ^The legacy behavior was that -** [sqlite3_step()] would only return a generic [SQLITE_ERROR] result code -** and the application would have to make a second call to [sqlite3_reset()] -** in order to find the underlying cause of the problem. With the "v2" prepare -** interfaces, the underlying reason for the error is returned immediately. -**
  4. -** -**
  5. -** ^If the specific value bound to [parameter | host parameter] in the -** WHERE clause might influence the choice of query plan for a statement, -** then the statement will be automatically recompiled, as if there had been -** a schema change, on the first [sqlite3_step()] call following any change -** to the [sqlite3_bind_text | bindings] of that [parameter]. -** ^The specific value of WHERE-clause [parameter] might influence the -** choice of query plan if the parameter is the left-hand side of a [LIKE] -** or [GLOB] operator or if the parameter is compared to an indexed column -** and the [SQLITE_ENABLE_STAT3] compile-time option is enabled. -** the -**
  6. -**
-*/ -SQLITE_API int sqlite3_prepare( - sqlite3 *db, /* Database handle */ - const char *zSql, /* SQL statement, UTF-8 encoded */ - int nByte, /* Maximum length of zSql in bytes. */ - sqlite3_stmt **ppStmt, /* OUT: Statement handle */ - const char **pzTail /* OUT: Pointer to unused portion of zSql */ -); -SQLITE_API int sqlite3_prepare_v2( - sqlite3 *db, /* Database handle */ - const char *zSql, /* SQL statement, UTF-8 encoded */ - int nByte, /* Maximum length of zSql in bytes. */ - sqlite3_stmt **ppStmt, /* OUT: Statement handle */ - const char **pzTail /* OUT: Pointer to unused portion of zSql */ -); -SQLITE_API int sqlite3_prepare16( - sqlite3 *db, /* Database handle */ - const void *zSql, /* SQL statement, UTF-16 encoded */ - int nByte, /* Maximum length of zSql in bytes. */ - sqlite3_stmt **ppStmt, /* OUT: Statement handle */ - const void **pzTail /* OUT: Pointer to unused portion of zSql */ -); -SQLITE_API int sqlite3_prepare16_v2( - sqlite3 *db, /* Database handle */ - const void *zSql, /* SQL statement, UTF-16 encoded */ - int nByte, /* Maximum length of zSql in bytes. */ - sqlite3_stmt **ppStmt, /* OUT: Statement handle */ - const void **pzTail /* OUT: Pointer to unused portion of zSql */ -); - -/* -** CAPI3REF: Retrieving Statement SQL -** -** ^This interface can be used to retrieve a saved copy of the original -** SQL text used to create a [prepared statement] if that statement was -** compiled using either [sqlite3_prepare_v2()] or [sqlite3_prepare16_v2()]. -*/ -SQLITE_API const char *sqlite3_sql(sqlite3_stmt *pStmt); - -/* -** CAPI3REF: Determine If An SQL Statement Writes The Database -** -** ^The sqlite3_stmt_readonly(X) interface returns true (non-zero) if -** and only if the [prepared statement] X makes no direct changes to -** the content of the database file. -** -** Note that [application-defined SQL functions] or -** [virtual tables] might change the database indirectly as a side effect. -** ^(For example, if an application defines a function "eval()" that -** calls [sqlite3_exec()], then the following SQL statement would -** change the database file through side-effects: -** -**
-**    SELECT eval('DELETE FROM t1') FROM t2;
-** 
-** -** But because the [SELECT] statement does not change the database file -** directly, sqlite3_stmt_readonly() would still return true.)^ -** -** ^Transaction control statements such as [BEGIN], [COMMIT], [ROLLBACK], -** [SAVEPOINT], and [RELEASE] cause sqlite3_stmt_readonly() to return true, -** since the statements themselves do not actually modify the database but -** rather they control the timing of when other statements modify the -** database. ^The [ATTACH] and [DETACH] statements also cause -** sqlite3_stmt_readonly() to return true since, while those statements -** change the configuration of a database connection, they do not make -** changes to the content of the database files on disk. -*/ -SQLITE_API int sqlite3_stmt_readonly(sqlite3_stmt *pStmt); - -/* -** CAPI3REF: Determine If A Prepared Statement Has Been Reset -** -** ^The sqlite3_stmt_busy(S) interface returns true (non-zero) if the -** [prepared statement] S has been stepped at least once using -** [sqlite3_step(S)] but has not run to completion and/or has not -** been reset using [sqlite3_reset(S)]. ^The sqlite3_stmt_busy(S) -** interface returns false if S is a NULL pointer. If S is not a -** NULL pointer and is not a pointer to a valid [prepared statement] -** object, then the behavior is undefined and probably undesirable. -** -** This interface can be used in combination [sqlite3_next_stmt()] -** to locate all prepared statements associated with a database -** connection that are in need of being reset. This can be used, -** for example, in diagnostic routines to search for prepared -** statements that are holding a transaction open. -*/ -SQLITE_API int sqlite3_stmt_busy(sqlite3_stmt*); - -/* -** CAPI3REF: Dynamically Typed Value Object -** KEYWORDS: {protected sqlite3_value} {unprotected sqlite3_value} -** -** SQLite uses the sqlite3_value object to represent all values -** that can be stored in a database table. SQLite uses dynamic typing -** for the values it stores. ^Values stored in sqlite3_value objects -** can be integers, floating point values, strings, BLOBs, or NULL. -** -** An sqlite3_value object may be either "protected" or "unprotected". -** Some interfaces require a protected sqlite3_value. Other interfaces -** will accept either a protected or an unprotected sqlite3_value. -** Every interface that accepts sqlite3_value arguments specifies -** whether or not it requires a protected sqlite3_value. -** -** The terms "protected" and "unprotected" refer to whether or not -** a mutex is held. An internal mutex is held for a protected -** sqlite3_value object but no mutex is held for an unprotected -** sqlite3_value object. If SQLite is compiled to be single-threaded -** (with [SQLITE_THREADSAFE=0] and with [sqlite3_threadsafe()] returning 0) -** or if SQLite is run in one of reduced mutex modes -** [SQLITE_CONFIG_SINGLETHREAD] or [SQLITE_CONFIG_MULTITHREAD] -** then there is no distinction between protected and unprotected -** sqlite3_value objects and they can be used interchangeably. However, -** for maximum code portability it is recommended that applications -** still make the distinction between protected and unprotected -** sqlite3_value objects even when not strictly required. -** -** ^The sqlite3_value objects that are passed as parameters into the -** implementation of [application-defined SQL functions] are protected. -** ^The sqlite3_value object returned by -** [sqlite3_column_value()] is unprotected. -** Unprotected sqlite3_value objects may only be used with -** [sqlite3_result_value()] and [sqlite3_bind_value()]. -** The [sqlite3_value_blob | sqlite3_value_type()] family of -** interfaces require protected sqlite3_value objects. -*/ -typedef struct Mem sqlite3_value; - -/* -** CAPI3REF: SQL Function Context Object -** -** The context in which an SQL function executes is stored in an -** sqlite3_context object. ^A pointer to an sqlite3_context object -** is always first parameter to [application-defined SQL functions]. -** The application-defined SQL function implementation will pass this -** pointer through into calls to [sqlite3_result_int | sqlite3_result()], -** [sqlite3_aggregate_context()], [sqlite3_user_data()], -** [sqlite3_context_db_handle()], [sqlite3_get_auxdata()], -** and/or [sqlite3_set_auxdata()]. -*/ -typedef struct sqlite3_context sqlite3_context; - -/* -** CAPI3REF: Binding Values To Prepared Statements -** KEYWORDS: {host parameter} {host parameters} {host parameter name} -** KEYWORDS: {SQL parameter} {SQL parameters} {parameter binding} -** -** ^(In the SQL statement text input to [sqlite3_prepare_v2()] and its variants, -** literals may be replaced by a [parameter] that matches one of following -** templates: -** -**
    -**
  • ? -**
  • ?NNN -**
  • :VVV -**
  • @VVV -**
  • $VVV -**
-** -** In the templates above, NNN represents an integer literal, -** and VVV represents an alphanumeric identifier.)^ ^The values of these -** parameters (also called "host parameter names" or "SQL parameters") -** can be set using the sqlite3_bind_*() routines defined here. -** -** ^The first argument to the sqlite3_bind_*() routines is always -** a pointer to the [sqlite3_stmt] object returned from -** [sqlite3_prepare_v2()] or its variants. -** -** ^The second argument is the index of the SQL parameter to be set. -** ^The leftmost SQL parameter has an index of 1. ^When the same named -** SQL parameter is used more than once, second and subsequent -** occurrences have the same index as the first occurrence. -** ^The index for named parameters can be looked up using the -** [sqlite3_bind_parameter_index()] API if desired. ^The index -** for "?NNN" parameters is the value of NNN. -** ^The NNN value must be between 1 and the [sqlite3_limit()] -** parameter [SQLITE_LIMIT_VARIABLE_NUMBER] (default value: 999). -** -** ^The third argument is the value to bind to the parameter. -** -** ^(In those routines that have a fourth argument, its value is the -** number of bytes in the parameter. To be clear: the value is the -** number of bytes in the value, not the number of characters.)^ -** ^If the fourth parameter is negative, the length of the string is -** the number of bytes up to the first zero terminator. -** If a non-negative fourth parameter is provided to sqlite3_bind_text() -** or sqlite3_bind_text16() then that parameter must be the byte offset -** where the NUL terminator would occur assuming the string were NUL -** terminated. If any NUL characters occur at byte offsets less than -** the value of the fourth parameter then the resulting string value will -** contain embedded NULs. The result of expressions involving strings -** with embedded NULs is undefined. -** -** ^The fifth argument to sqlite3_bind_blob(), sqlite3_bind_text(), and -** sqlite3_bind_text16() is a destructor used to dispose of the BLOB or -** string after SQLite has finished with it. ^The destructor is called -** to dispose of the BLOB or string even if the call to sqlite3_bind_blob(), -** sqlite3_bind_text(), or sqlite3_bind_text16() fails. -** ^If the fifth argument is -** the special value [SQLITE_STATIC], then SQLite assumes that the -** information is in static, unmanaged space and does not need to be freed. -** ^If the fifth argument has the value [SQLITE_TRANSIENT], then -** SQLite makes its own private copy of the data immediately, before -** the sqlite3_bind_*() routine returns. -** -** ^The sqlite3_bind_zeroblob() routine binds a BLOB of length N that -** is filled with zeroes. ^A zeroblob uses a fixed amount of memory -** (just an integer to hold its size) while it is being processed. -** Zeroblobs are intended to serve as placeholders for BLOBs whose -** content is later written using -** [sqlite3_blob_open | incremental BLOB I/O] routines. -** ^A negative value for the zeroblob results in a zero-length BLOB. -** -** ^If any of the sqlite3_bind_*() routines are called with a NULL pointer -** for the [prepared statement] or with a prepared statement for which -** [sqlite3_step()] has been called more recently than [sqlite3_reset()], -** then the call will return [SQLITE_MISUSE]. If any sqlite3_bind_() -** routine is passed a [prepared statement] that has been finalized, the -** result is undefined and probably harmful. -** -** ^Bindings are not cleared by the [sqlite3_reset()] routine. -** ^Unbound parameters are interpreted as NULL. -** -** ^The sqlite3_bind_* routines return [SQLITE_OK] on success or an -** [error code] if anything goes wrong. -** ^[SQLITE_RANGE] is returned if the parameter -** index is out of range. ^[SQLITE_NOMEM] is returned if malloc() fails. -** -** See also: [sqlite3_bind_parameter_count()], -** [sqlite3_bind_parameter_name()], and [sqlite3_bind_parameter_index()]. -*/ -SQLITE_API int sqlite3_bind_blob(sqlite3_stmt*, int, const void*, int n, void(*)(void*)); -SQLITE_API int sqlite3_bind_double(sqlite3_stmt*, int, double); -SQLITE_API int sqlite3_bind_int(sqlite3_stmt*, int, int); -SQLITE_API int sqlite3_bind_int64(sqlite3_stmt*, int, sqlite3_int64); -SQLITE_API int sqlite3_bind_null(sqlite3_stmt*, int); -SQLITE_API int sqlite3_bind_text(sqlite3_stmt*, int, const char*, int n, void(*)(void*)); -SQLITE_API int sqlite3_bind_text16(sqlite3_stmt*, int, const void*, int, void(*)(void*)); -SQLITE_API int sqlite3_bind_value(sqlite3_stmt*, int, const sqlite3_value*); -SQLITE_API int sqlite3_bind_zeroblob(sqlite3_stmt*, int, int n); - -/* -** CAPI3REF: Number Of SQL Parameters -** -** ^This routine can be used to find the number of [SQL parameters] -** in a [prepared statement]. SQL parameters are tokens of the -** form "?", "?NNN", ":AAA", "$AAA", or "@AAA" that serve as -** placeholders for values that are [sqlite3_bind_blob | bound] -** to the parameters at a later time. -** -** ^(This routine actually returns the index of the largest (rightmost) -** parameter. For all forms except ?NNN, this will correspond to the -** number of unique parameters. If parameters of the ?NNN form are used, -** there may be gaps in the list.)^ -** -** See also: [sqlite3_bind_blob|sqlite3_bind()], -** [sqlite3_bind_parameter_name()], and -** [sqlite3_bind_parameter_index()]. -*/ -SQLITE_API int sqlite3_bind_parameter_count(sqlite3_stmt*); - -/* -** CAPI3REF: Name Of A Host Parameter -** -** ^The sqlite3_bind_parameter_name(P,N) interface returns -** the name of the N-th [SQL parameter] in the [prepared statement] P. -** ^(SQL parameters of the form "?NNN" or ":AAA" or "@AAA" or "$AAA" -** have a name which is the string "?NNN" or ":AAA" or "@AAA" or "$AAA" -** respectively. -** In other words, the initial ":" or "$" or "@" or "?" -** is included as part of the name.)^ -** ^Parameters of the form "?" without a following integer have no name -** and are referred to as "nameless" or "anonymous parameters". -** -** ^The first host parameter has an index of 1, not 0. -** -** ^If the value N is out of range or if the N-th parameter is -** nameless, then NULL is returned. ^The returned string is -** always in UTF-8 encoding even if the named parameter was -** originally specified as UTF-16 in [sqlite3_prepare16()] or -** [sqlite3_prepare16_v2()]. -** -** See also: [sqlite3_bind_blob|sqlite3_bind()], -** [sqlite3_bind_parameter_count()], and -** [sqlite3_bind_parameter_index()]. -*/ -SQLITE_API const char *sqlite3_bind_parameter_name(sqlite3_stmt*, int); - -/* -** CAPI3REF: Index Of A Parameter With A Given Name -** -** ^Return the index of an SQL parameter given its name. ^The -** index value returned is suitable for use as the second -** parameter to [sqlite3_bind_blob|sqlite3_bind()]. ^A zero -** is returned if no matching parameter is found. ^The parameter -** name must be given in UTF-8 even if the original statement -** was prepared from UTF-16 text using [sqlite3_prepare16_v2()]. -** -** See also: [sqlite3_bind_blob|sqlite3_bind()], -** [sqlite3_bind_parameter_count()], and -** [sqlite3_bind_parameter_index()]. -*/ -SQLITE_API int sqlite3_bind_parameter_index(sqlite3_stmt*, const char *zName); - -/* -** CAPI3REF: Reset All Bindings On A Prepared Statement -** -** ^Contrary to the intuition of many, [sqlite3_reset()] does not reset -** the [sqlite3_bind_blob | bindings] on a [prepared statement]. -** ^Use this routine to reset all host parameters to NULL. -*/ -SQLITE_API int sqlite3_clear_bindings(sqlite3_stmt*); - -/* -** CAPI3REF: Number Of Columns In A Result Set -** -** ^Return the number of columns in the result set returned by the -** [prepared statement]. ^This routine returns 0 if pStmt is an SQL -** statement that does not return data (for example an [UPDATE]). -** -** See also: [sqlite3_data_count()] -*/ -SQLITE_API int sqlite3_column_count(sqlite3_stmt *pStmt); - -/* -** CAPI3REF: Column Names In A Result Set -** -** ^These routines return the name assigned to a particular column -** in the result set of a [SELECT] statement. ^The sqlite3_column_name() -** interface returns a pointer to a zero-terminated UTF-8 string -** and sqlite3_column_name16() returns a pointer to a zero-terminated -** UTF-16 string. ^The first parameter is the [prepared statement] -** that implements the [SELECT] statement. ^The second parameter is the -** column number. ^The leftmost column is number 0. -** -** ^The returned string pointer is valid until either the [prepared statement] -** is destroyed by [sqlite3_finalize()] or until the statement is automatically -** reprepared by the first call to [sqlite3_step()] for a particular run -** or until the next call to -** sqlite3_column_name() or sqlite3_column_name16() on the same column. -** -** ^If sqlite3_malloc() fails during the processing of either routine -** (for example during a conversion from UTF-8 to UTF-16) then a -** NULL pointer is returned. -** -** ^The name of a result column is the value of the "AS" clause for -** that column, if there is an AS clause. If there is no AS clause -** then the name of the column is unspecified and may change from -** one release of SQLite to the next. -*/ -SQLITE_API const char *sqlite3_column_name(sqlite3_stmt*, int N); -SQLITE_API const void *sqlite3_column_name16(sqlite3_stmt*, int N); - -/* -** CAPI3REF: Source Of Data In A Query Result -** -** ^These routines provide a means to determine the database, table, and -** table column that is the origin of a particular result column in -** [SELECT] statement. -** ^The name of the database or table or column can be returned as -** either a UTF-8 or UTF-16 string. ^The _database_ routines return -** the database name, the _table_ routines return the table name, and -** the origin_ routines return the column name. -** ^The returned string is valid until the [prepared statement] is destroyed -** using [sqlite3_finalize()] or until the statement is automatically -** reprepared by the first call to [sqlite3_step()] for a particular run -** or until the same information is requested -** again in a different encoding. -** -** ^The names returned are the original un-aliased names of the -** database, table, and column. -** -** ^The first argument to these interfaces is a [prepared statement]. -** ^These functions return information about the Nth result column returned by -** the statement, where N is the second function argument. -** ^The left-most column is column 0 for these routines. -** -** ^If the Nth column returned by the statement is an expression or -** subquery and is not a column value, then all of these functions return -** NULL. ^These routine might also return NULL if a memory allocation error -** occurs. ^Otherwise, they return the name of the attached database, table, -** or column that query result column was extracted from. -** -** ^As with all other SQLite APIs, those whose names end with "16" return -** UTF-16 encoded strings and the other functions return UTF-8. -** -** ^These APIs are only available if the library was compiled with the -** [SQLITE_ENABLE_COLUMN_METADATA] C-preprocessor symbol. -** -** If two or more threads call one or more of these routines against the same -** prepared statement and column at the same time then the results are -** undefined. -** -** If two or more threads call one or more -** [sqlite3_column_database_name | column metadata interfaces] -** for the same [prepared statement] and result column -** at the same time then the results are undefined. -*/ -SQLITE_API const char *sqlite3_column_database_name(sqlite3_stmt*,int); -SQLITE_API const void *sqlite3_column_database_name16(sqlite3_stmt*,int); -SQLITE_API const char *sqlite3_column_table_name(sqlite3_stmt*,int); -SQLITE_API const void *sqlite3_column_table_name16(sqlite3_stmt*,int); -SQLITE_API const char *sqlite3_column_origin_name(sqlite3_stmt*,int); -SQLITE_API const void *sqlite3_column_origin_name16(sqlite3_stmt*,int); - -/* -** CAPI3REF: Declared Datatype Of A Query Result -** -** ^(The first parameter is a [prepared statement]. -** If this statement is a [SELECT] statement and the Nth column of the -** returned result set of that [SELECT] is a table column (not an -** expression or subquery) then the declared type of the table -** column is returned.)^ ^If the Nth column of the result set is an -** expression or subquery, then a NULL pointer is returned. -** ^The returned string is always UTF-8 encoded. -** -** ^(For example, given the database schema: -** -** CREATE TABLE t1(c1 VARIANT); -** -** and the following statement to be compiled: -** -** SELECT c1 + 1, c1 FROM t1; -** -** this routine would return the string "VARIANT" for the second result -** column (i==1), and a NULL pointer for the first result column (i==0).)^ -** -** ^SQLite uses dynamic run-time typing. ^So just because a column -** is declared to contain a particular type does not mean that the -** data stored in that column is of the declared type. SQLite is -** strongly typed, but the typing is dynamic not static. ^Type -** is associated with individual values, not with the containers -** used to hold those values. -*/ -SQLITE_API const char *sqlite3_column_decltype(sqlite3_stmt*,int); -SQLITE_API const void *sqlite3_column_decltype16(sqlite3_stmt*,int); - -/* -** CAPI3REF: Evaluate An SQL Statement -** -** After a [prepared statement] has been prepared using either -** [sqlite3_prepare_v2()] or [sqlite3_prepare16_v2()] or one of the legacy -** interfaces [sqlite3_prepare()] or [sqlite3_prepare16()], this function -** must be called one or more times to evaluate the statement. -** -** The details of the behavior of the sqlite3_step() interface depend -** on whether the statement was prepared using the newer "v2" interface -** [sqlite3_prepare_v2()] and [sqlite3_prepare16_v2()] or the older legacy -** interface [sqlite3_prepare()] and [sqlite3_prepare16()]. The use of the -** new "v2" interface is recommended for new applications but the legacy -** interface will continue to be supported. -** -** ^In the legacy interface, the return value will be either [SQLITE_BUSY], -** [SQLITE_DONE], [SQLITE_ROW], [SQLITE_ERROR], or [SQLITE_MISUSE]. -** ^With the "v2" interface, any of the other [result codes] or -** [extended result codes] might be returned as well. -** -** ^[SQLITE_BUSY] means that the database engine was unable to acquire the -** database locks it needs to do its job. ^If the statement is a [COMMIT] -** or occurs outside of an explicit transaction, then you can retry the -** statement. If the statement is not a [COMMIT] and occurs within an -** explicit transaction then you should rollback the transaction before -** continuing. -** -** ^[SQLITE_DONE] means that the statement has finished executing -** successfully. sqlite3_step() should not be called again on this virtual -** machine without first calling [sqlite3_reset()] to reset the virtual -** machine back to its initial state. -** -** ^If the SQL statement being executed returns any data, then [SQLITE_ROW] -** is returned each time a new row of data is ready for processing by the -** caller. The values may be accessed using the [column access functions]. -** sqlite3_step() is called again to retrieve the next row of data. -** -** ^[SQLITE_ERROR] means that a run-time error (such as a constraint -** violation) has occurred. sqlite3_step() should not be called again on -** the VM. More information may be found by calling [sqlite3_errmsg()]. -** ^With the legacy interface, a more specific error code (for example, -** [SQLITE_INTERRUPT], [SQLITE_SCHEMA], [SQLITE_CORRUPT], and so forth) -** can be obtained by calling [sqlite3_reset()] on the -** [prepared statement]. ^In the "v2" interface, -** the more specific error code is returned directly by sqlite3_step(). -** -** [SQLITE_MISUSE] means that the this routine was called inappropriately. -** Perhaps it was called on a [prepared statement] that has -** already been [sqlite3_finalize | finalized] or on one that had -** previously returned [SQLITE_ERROR] or [SQLITE_DONE]. Or it could -** be the case that the same database connection is being used by two or -** more threads at the same moment in time. -** -** For all versions of SQLite up to and including 3.6.23.1, a call to -** [sqlite3_reset()] was required after sqlite3_step() returned anything -** other than [SQLITE_ROW] before any subsequent invocation of -** sqlite3_step(). Failure to reset the prepared statement using -** [sqlite3_reset()] would result in an [SQLITE_MISUSE] return from -** sqlite3_step(). But after version 3.6.23.1, sqlite3_step() began -** calling [sqlite3_reset()] automatically in this circumstance rather -** than returning [SQLITE_MISUSE]. This is not considered a compatibility -** break because any application that ever receives an SQLITE_MISUSE error -** is broken by definition. The [SQLITE_OMIT_AUTORESET] compile-time option -** can be used to restore the legacy behavior. -** -** Goofy Interface Alert: In the legacy interface, the sqlite3_step() -** API always returns a generic error code, [SQLITE_ERROR], following any -** error other than [SQLITE_BUSY] and [SQLITE_MISUSE]. You must call -** [sqlite3_reset()] or [sqlite3_finalize()] in order to find one of the -** specific [error codes] that better describes the error. -** We admit that this is a goofy design. The problem has been fixed -** with the "v2" interface. If you prepare all of your SQL statements -** using either [sqlite3_prepare_v2()] or [sqlite3_prepare16_v2()] instead -** of the legacy [sqlite3_prepare()] and [sqlite3_prepare16()] interfaces, -** then the more specific [error codes] are returned directly -** by sqlite3_step(). The use of the "v2" interface is recommended. -*/ -SQLITE_API int sqlite3_step(sqlite3_stmt*); - -/* -** CAPI3REF: Number of columns in a result set -** -** ^The sqlite3_data_count(P) interface returns the number of columns in the -** current row of the result set of [prepared statement] P. -** ^If prepared statement P does not have results ready to return -** (via calls to the [sqlite3_column_int | sqlite3_column_*()] of -** interfaces) then sqlite3_data_count(P) returns 0. -** ^The sqlite3_data_count(P) routine also returns 0 if P is a NULL pointer. -** ^The sqlite3_data_count(P) routine returns 0 if the previous call to -** [sqlite3_step](P) returned [SQLITE_DONE]. ^The sqlite3_data_count(P) -** will return non-zero if previous call to [sqlite3_step](P) returned -** [SQLITE_ROW], except in the case of the [PRAGMA incremental_vacuum] -** where it always returns zero since each step of that multi-step -** pragma returns 0 columns of data. -** -** See also: [sqlite3_column_count()] -*/ -SQLITE_API int sqlite3_data_count(sqlite3_stmt *pStmt); - -/* -** CAPI3REF: Fundamental Datatypes -** KEYWORDS: SQLITE_TEXT -** -** ^(Every value in SQLite has one of five fundamental datatypes: -** -**
    -**
  • 64-bit signed integer -**
  • 64-bit IEEE floating point number -**
  • string -**
  • BLOB -**
  • NULL -**
)^ -** -** These constants are codes for each of those types. -** -** Note that the SQLITE_TEXT constant was also used in SQLite version 2 -** for a completely different meaning. Software that links against both -** SQLite version 2 and SQLite version 3 should use SQLITE3_TEXT, not -** SQLITE_TEXT. -*/ -#define SQLITE_INTEGER 1 -#define SQLITE_FLOAT 2 -#define SQLITE_BLOB 4 -#define SQLITE_NULL 5 -#ifdef SQLITE_TEXT -# undef SQLITE_TEXT -#else -# define SQLITE_TEXT 3 -#endif -#define SQLITE3_TEXT 3 - -/* -** CAPI3REF: Result Values From A Query -** KEYWORDS: {column access functions} -** -** These routines form the "result set" interface. -** -** ^These routines return information about a single column of the current -** result row of a query. ^In every case the first argument is a pointer -** to the [prepared statement] that is being evaluated (the [sqlite3_stmt*] -** that was returned from [sqlite3_prepare_v2()] or one of its variants) -** and the second argument is the index of the column for which information -** should be returned. ^The leftmost column of the result set has the index 0. -** ^The number of columns in the result can be determined using -** [sqlite3_column_count()]. -** -** If the SQL statement does not currently point to a valid row, or if the -** column index is out of range, the result is undefined. -** These routines may only be called when the most recent call to -** [sqlite3_step()] has returned [SQLITE_ROW] and neither -** [sqlite3_reset()] nor [sqlite3_finalize()] have been called subsequently. -** If any of these routines are called after [sqlite3_reset()] or -** [sqlite3_finalize()] or after [sqlite3_step()] has returned -** something other than [SQLITE_ROW], the results are undefined. -** If [sqlite3_step()] or [sqlite3_reset()] or [sqlite3_finalize()] -** are called from a different thread while any of these routines -** are pending, then the results are undefined. -** -** ^The sqlite3_column_type() routine returns the -** [SQLITE_INTEGER | datatype code] for the initial data type -** of the result column. ^The returned value is one of [SQLITE_INTEGER], -** [SQLITE_FLOAT], [SQLITE_TEXT], [SQLITE_BLOB], or [SQLITE_NULL]. The value -** returned by sqlite3_column_type() is only meaningful if no type -** conversions have occurred as described below. After a type conversion, -** the value returned by sqlite3_column_type() is undefined. Future -** versions of SQLite may change the behavior of sqlite3_column_type() -** following a type conversion. -** -** ^If the result is a BLOB or UTF-8 string then the sqlite3_column_bytes() -** routine returns the number of bytes in that BLOB or string. -** ^If the result is a UTF-16 string, then sqlite3_column_bytes() converts -** the string to UTF-8 and then returns the number of bytes. -** ^If the result is a numeric value then sqlite3_column_bytes() uses -** [sqlite3_snprintf()] to convert that value to a UTF-8 string and returns -** the number of bytes in that string. -** ^If the result is NULL, then sqlite3_column_bytes() returns zero. -** -** ^If the result is a BLOB or UTF-16 string then the sqlite3_column_bytes16() -** routine returns the number of bytes in that BLOB or string. -** ^If the result is a UTF-8 string, then sqlite3_column_bytes16() converts -** the string to UTF-16 and then returns the number of bytes. -** ^If the result is a numeric value then sqlite3_column_bytes16() uses -** [sqlite3_snprintf()] to convert that value to a UTF-16 string and returns -** the number of bytes in that string. -** ^If the result is NULL, then sqlite3_column_bytes16() returns zero. -** -** ^The values returned by [sqlite3_column_bytes()] and -** [sqlite3_column_bytes16()] do not include the zero terminators at the end -** of the string. ^For clarity: the values returned by -** [sqlite3_column_bytes()] and [sqlite3_column_bytes16()] are the number of -** bytes in the string, not the number of characters. -** -** ^Strings returned by sqlite3_column_text() and sqlite3_column_text16(), -** even empty strings, are always zero-terminated. ^The return -** value from sqlite3_column_blob() for a zero-length BLOB is a NULL pointer. -** -** ^The object returned by [sqlite3_column_value()] is an -** [unprotected sqlite3_value] object. An unprotected sqlite3_value object -** may only be used with [sqlite3_bind_value()] and [sqlite3_result_value()]. -** If the [unprotected sqlite3_value] object returned by -** [sqlite3_column_value()] is used in any other way, including calls -** to routines like [sqlite3_value_int()], [sqlite3_value_text()], -** or [sqlite3_value_bytes()], then the behavior is undefined. -** -** These routines attempt to convert the value where appropriate. ^For -** example, if the internal representation is FLOAT and a text result -** is requested, [sqlite3_snprintf()] is used internally to perform the -** conversion automatically. ^(The following table details the conversions -** that are applied: -** -**
-** -**
Internal
Type
Requested
Type
Conversion -** -**
NULL INTEGER Result is 0 -**
NULL FLOAT Result is 0.0 -**
NULL TEXT Result is NULL pointer -**
NULL BLOB Result is NULL pointer -**
INTEGER FLOAT Convert from integer to float -**
INTEGER TEXT ASCII rendering of the integer -**
INTEGER BLOB Same as INTEGER->TEXT -**
FLOAT INTEGER Convert from float to integer -**
FLOAT TEXT ASCII rendering of the float -**
FLOAT BLOB Same as FLOAT->TEXT -**
TEXT INTEGER Use atoi() -**
TEXT FLOAT Use atof() -**
TEXT BLOB No change -**
BLOB INTEGER Convert to TEXT then use atoi() -**
BLOB FLOAT Convert to TEXT then use atof() -**
BLOB TEXT Add a zero terminator if needed -**
-**
)^ -** -** The table above makes reference to standard C library functions atoi() -** and atof(). SQLite does not really use these functions. It has its -** own equivalent internal routines. The atoi() and atof() names are -** used in the table for brevity and because they are familiar to most -** C programmers. -** -** Note that when type conversions occur, pointers returned by prior -** calls to sqlite3_column_blob(), sqlite3_column_text(), and/or -** sqlite3_column_text16() may be invalidated. -** Type conversions and pointer invalidations might occur -** in the following cases: -** -**
    -**
  • The initial content is a BLOB and sqlite3_column_text() or -** sqlite3_column_text16() is called. A zero-terminator might -** need to be added to the string.
  • -**
  • The initial content is UTF-8 text and sqlite3_column_bytes16() or -** sqlite3_column_text16() is called. The content must be converted -** to UTF-16.
  • -**
  • The initial content is UTF-16 text and sqlite3_column_bytes() or -** sqlite3_column_text() is called. The content must be converted -** to UTF-8.
  • -**
-** -** ^Conversions between UTF-16be and UTF-16le are always done in place and do -** not invalidate a prior pointer, though of course the content of the buffer -** that the prior pointer references will have been modified. Other kinds -** of conversion are done in place when it is possible, but sometimes they -** are not possible and in those cases prior pointers are invalidated. -** -** The safest and easiest to remember policy is to invoke these routines -** in one of the following ways: -** -**
    -**
  • sqlite3_column_text() followed by sqlite3_column_bytes()
  • -**
  • sqlite3_column_blob() followed by sqlite3_column_bytes()
  • -**
  • sqlite3_column_text16() followed by sqlite3_column_bytes16()
  • -**
-** -** In other words, you should call sqlite3_column_text(), -** sqlite3_column_blob(), or sqlite3_column_text16() first to force the result -** into the desired format, then invoke sqlite3_column_bytes() or -** sqlite3_column_bytes16() to find the size of the result. Do not mix calls -** to sqlite3_column_text() or sqlite3_column_blob() with calls to -** sqlite3_column_bytes16(), and do not mix calls to sqlite3_column_text16() -** with calls to sqlite3_column_bytes(). -** -** ^The pointers returned are valid until a type conversion occurs as -** described above, or until [sqlite3_step()] or [sqlite3_reset()] or -** [sqlite3_finalize()] is called. ^The memory space used to hold strings -** and BLOBs is freed automatically. Do not pass the pointers returned -** [sqlite3_column_blob()], [sqlite3_column_text()], etc. into -** [sqlite3_free()]. -** -** ^(If a memory allocation error occurs during the evaluation of any -** of these routines, a default value is returned. The default value -** is either the integer 0, the floating point number 0.0, or a NULL -** pointer. Subsequent calls to [sqlite3_errcode()] will return -** [SQLITE_NOMEM].)^ -*/ -SQLITE_API const void *sqlite3_column_blob(sqlite3_stmt*, int iCol); -SQLITE_API int sqlite3_column_bytes(sqlite3_stmt*, int iCol); -SQLITE_API int sqlite3_column_bytes16(sqlite3_stmt*, int iCol); -SQLITE_API double sqlite3_column_double(sqlite3_stmt*, int iCol); -SQLITE_API int sqlite3_column_int(sqlite3_stmt*, int iCol); -SQLITE_API sqlite3_int64 sqlite3_column_int64(sqlite3_stmt*, int iCol); -SQLITE_API const unsigned char *sqlite3_column_text(sqlite3_stmt*, int iCol); -SQLITE_API const void *sqlite3_column_text16(sqlite3_stmt*, int iCol); -SQLITE_API int sqlite3_column_type(sqlite3_stmt*, int iCol); -SQLITE_API sqlite3_value *sqlite3_column_value(sqlite3_stmt*, int iCol); - -/* -** CAPI3REF: Destroy A Prepared Statement Object -** -** ^The sqlite3_finalize() function is called to delete a [prepared statement]. -** ^If the most recent evaluation of the statement encountered no errors -** or if the statement is never been evaluated, then sqlite3_finalize() returns -** SQLITE_OK. ^If the most recent evaluation of statement S failed, then -** sqlite3_finalize(S) returns the appropriate [error code] or -** [extended error code]. -** -** ^The sqlite3_finalize(S) routine can be called at any point during -** the life cycle of [prepared statement] S: -** before statement S is ever evaluated, after -** one or more calls to [sqlite3_reset()], or after any call -** to [sqlite3_step()] regardless of whether or not the statement has -** completed execution. -** -** ^Invoking sqlite3_finalize() on a NULL pointer is a harmless no-op. -** -** The application must finalize every [prepared statement] in order to avoid -** resource leaks. It is a grievous error for the application to try to use -** a prepared statement after it has been finalized. Any use of a prepared -** statement after it has been finalized can result in undefined and -** undesirable behavior such as segfaults and heap corruption. -*/ -SQLITE_API int sqlite3_finalize(sqlite3_stmt *pStmt); - -/* -** CAPI3REF: Reset A Prepared Statement Object -** -** The sqlite3_reset() function is called to reset a [prepared statement] -** object back to its initial state, ready to be re-executed. -** ^Any SQL statement variables that had values bound to them using -** the [sqlite3_bind_blob | sqlite3_bind_*() API] retain their values. -** Use [sqlite3_clear_bindings()] to reset the bindings. -** -** ^The [sqlite3_reset(S)] interface resets the [prepared statement] S -** back to the beginning of its program. -** -** ^If the most recent call to [sqlite3_step(S)] for the -** [prepared statement] S returned [SQLITE_ROW] or [SQLITE_DONE], -** or if [sqlite3_step(S)] has never before been called on S, -** then [sqlite3_reset(S)] returns [SQLITE_OK]. -** -** ^If the most recent call to [sqlite3_step(S)] for the -** [prepared statement] S indicated an error, then -** [sqlite3_reset(S)] returns an appropriate [error code]. -** -** ^The [sqlite3_reset(S)] interface does not change the values -** of any [sqlite3_bind_blob|bindings] on the [prepared statement] S. -*/ -SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt); - -/* -** CAPI3REF: Create Or Redefine SQL Functions -** KEYWORDS: {function creation routines} -** KEYWORDS: {application-defined SQL function} -** KEYWORDS: {application-defined SQL functions} -** -** ^These functions (collectively known as "function creation routines") -** are used to add SQL functions or aggregates or to redefine the behavior -** of existing SQL functions or aggregates. The only differences between -** these routines are the text encoding expected for -** the second parameter (the name of the function being created) -** and the presence or absence of a destructor callback for -** the application data pointer. -** -** ^The first parameter is the [database connection] to which the SQL -** function is to be added. ^If an application uses more than one database -** connection then application-defined SQL functions must be added -** to each database connection separately. -** -** ^The second parameter is the name of the SQL function to be created or -** redefined. ^The length of the name is limited to 255 bytes in a UTF-8 -** representation, exclusive of the zero-terminator. ^Note that the name -** length limit is in UTF-8 bytes, not characters nor UTF-16 bytes. -** ^Any attempt to create a function with a longer name -** will result in [SQLITE_MISUSE] being returned. -** -** ^The third parameter (nArg) -** is the number of arguments that the SQL function or -** aggregate takes. ^If this parameter is -1, then the SQL function or -** aggregate may take any number of arguments between 0 and the limit -** set by [sqlite3_limit]([SQLITE_LIMIT_FUNCTION_ARG]). If the third -** parameter is less than -1 or greater than 127 then the behavior is -** undefined. -** -** ^The fourth parameter, eTextRep, specifies what -** [SQLITE_UTF8 | text encoding] this SQL function prefers for -** its parameters. Every SQL function implementation must be able to work -** with UTF-8, UTF-16le, or UTF-16be. But some implementations may be -** more efficient with one encoding than another. ^An application may -** invoke sqlite3_create_function() or sqlite3_create_function16() multiple -** times with the same function but with different values of eTextRep. -** ^When multiple implementations of the same function are available, SQLite -** will pick the one that involves the least amount of data conversion. -** If there is only a single implementation which does not care what text -** encoding is used, then the fourth argument should be [SQLITE_ANY]. -** -** ^(The fifth parameter is an arbitrary pointer. The implementation of the -** function can gain access to this pointer using [sqlite3_user_data()].)^ -** -** ^The sixth, seventh and eighth parameters, xFunc, xStep and xFinal, are -** pointers to C-language functions that implement the SQL function or -** aggregate. ^A scalar SQL function requires an implementation of the xFunc -** callback only; NULL pointers must be passed as the xStep and xFinal -** parameters. ^An aggregate SQL function requires an implementation of xStep -** and xFinal and NULL pointer must be passed for xFunc. ^To delete an existing -** SQL function or aggregate, pass NULL pointers for all three function -** callbacks. -** -** ^(If the ninth parameter to sqlite3_create_function_v2() is not NULL, -** then it is destructor for the application data pointer. -** The destructor is invoked when the function is deleted, either by being -** overloaded or when the database connection closes.)^ -** ^The destructor is also invoked if the call to -** sqlite3_create_function_v2() fails. -** ^When the destructor callback of the tenth parameter is invoked, it -** is passed a single argument which is a copy of the application data -** pointer which was the fifth parameter to sqlite3_create_function_v2(). -** -** ^It is permitted to register multiple implementations of the same -** functions with the same name but with either differing numbers of -** arguments or differing preferred text encodings. ^SQLite will use -** the implementation that most closely matches the way in which the -** SQL function is used. ^A function implementation with a non-negative -** nArg parameter is a better match than a function implementation with -** a negative nArg. ^A function where the preferred text encoding -** matches the database encoding is a better -** match than a function where the encoding is different. -** ^A function where the encoding difference is between UTF16le and UTF16be -** is a closer match than a function where the encoding difference is -** between UTF8 and UTF16. -** -** ^Built-in functions may be overloaded by new application-defined functions. -** -** ^An application-defined function is permitted to call other -** SQLite interfaces. However, such calls must not -** close the database connection nor finalize or reset the prepared -** statement in which the function is running. -*/ -SQLITE_API int sqlite3_create_function( - sqlite3 *db, - const char *zFunctionName, - int nArg, - int eTextRep, - void *pApp, - void (*xFunc)(sqlite3_context*,int,sqlite3_value**), - void (*xStep)(sqlite3_context*,int,sqlite3_value**), - void (*xFinal)(sqlite3_context*) -); -SQLITE_API int sqlite3_create_function16( - sqlite3 *db, - const void *zFunctionName, - int nArg, - int eTextRep, - void *pApp, - void (*xFunc)(sqlite3_context*,int,sqlite3_value**), - void (*xStep)(sqlite3_context*,int,sqlite3_value**), - void (*xFinal)(sqlite3_context*) -); -SQLITE_API int sqlite3_create_function_v2( - sqlite3 *db, - const char *zFunctionName, - int nArg, - int eTextRep, - void *pApp, - void (*xFunc)(sqlite3_context*,int,sqlite3_value**), - void (*xStep)(sqlite3_context*,int,sqlite3_value**), - void (*xFinal)(sqlite3_context*), - void(*xDestroy)(void*) -); - -/* -** CAPI3REF: Text Encodings -** -** These constant define integer codes that represent the various -** text encodings supported by SQLite. -*/ -#define SQLITE_UTF8 1 -#define SQLITE_UTF16LE 2 -#define SQLITE_UTF16BE 3 -#define SQLITE_UTF16 4 /* Use native byte order */ -#define SQLITE_ANY 5 /* sqlite3_create_function only */ -#define SQLITE_UTF16_ALIGNED 8 /* sqlite3_create_collation only */ - -/* -** CAPI3REF: Deprecated Functions -** DEPRECATED -** -** These functions are [deprecated]. In order to maintain -** backwards compatibility with older code, these functions continue -** to be supported. However, new applications should avoid -** the use of these functions. To help encourage people to avoid -** using these functions, we are not going to tell you what they do. -*/ -#ifndef SQLITE_OMIT_DEPRECATED -SQLITE_API SQLITE_DEPRECATED int sqlite3_aggregate_count(sqlite3_context*); -SQLITE_API SQLITE_DEPRECATED int sqlite3_expired(sqlite3_stmt*); -SQLITE_API SQLITE_DEPRECATED int sqlite3_transfer_bindings(sqlite3_stmt*, sqlite3_stmt*); -SQLITE_API SQLITE_DEPRECATED int sqlite3_global_recover(void); -SQLITE_API SQLITE_DEPRECATED void sqlite3_thread_cleanup(void); -SQLITE_API SQLITE_DEPRECATED int sqlite3_memory_alarm(void(*)(void*,sqlite3_int64,int),void*,sqlite3_int64); -#endif - -/* -** CAPI3REF: Obtaining SQL Function Parameter Values -** -** The C-language implementation of SQL functions and aggregates uses -** this set of interface routines to access the parameter values on -** the function or aggregate. -** -** The xFunc (for scalar functions) or xStep (for aggregates) parameters -** to [sqlite3_create_function()] and [sqlite3_create_function16()] -** define callbacks that implement the SQL functions and aggregates. -** The 3rd parameter to these callbacks is an array of pointers to -** [protected sqlite3_value] objects. There is one [sqlite3_value] object for -** each parameter to the SQL function. These routines are used to -** extract values from the [sqlite3_value] objects. -** -** These routines work only with [protected sqlite3_value] objects. -** Any attempt to use these routines on an [unprotected sqlite3_value] -** object results in undefined behavior. -** -** ^These routines work just like the corresponding [column access functions] -** except that these routines take a single [protected sqlite3_value] object -** pointer instead of a [sqlite3_stmt*] pointer and an integer column number. -** -** ^The sqlite3_value_text16() interface extracts a UTF-16 string -** in the native byte-order of the host machine. ^The -** sqlite3_value_text16be() and sqlite3_value_text16le() interfaces -** extract UTF-16 strings as big-endian and little-endian respectively. -** -** ^(The sqlite3_value_numeric_type() interface attempts to apply -** numeric affinity to the value. This means that an attempt is -** made to convert the value to an integer or floating point. If -** such a conversion is possible without loss of information (in other -** words, if the value is a string that looks like a number) -** then the conversion is performed. Otherwise no conversion occurs. -** The [SQLITE_INTEGER | datatype] after conversion is returned.)^ -** -** Please pay particular attention to the fact that the pointer returned -** from [sqlite3_value_blob()], [sqlite3_value_text()], or -** [sqlite3_value_text16()] can be invalidated by a subsequent call to -** [sqlite3_value_bytes()], [sqlite3_value_bytes16()], [sqlite3_value_text()], -** or [sqlite3_value_text16()]. -** -** These routines must be called from the same thread as -** the SQL function that supplied the [sqlite3_value*] parameters. -*/ -SQLITE_API const void *sqlite3_value_blob(sqlite3_value*); -SQLITE_API int sqlite3_value_bytes(sqlite3_value*); -SQLITE_API int sqlite3_value_bytes16(sqlite3_value*); -SQLITE_API double sqlite3_value_double(sqlite3_value*); -SQLITE_API int sqlite3_value_int(sqlite3_value*); -SQLITE_API sqlite3_int64 sqlite3_value_int64(sqlite3_value*); -SQLITE_API const unsigned char *sqlite3_value_text(sqlite3_value*); -SQLITE_API const void *sqlite3_value_text16(sqlite3_value*); -SQLITE_API const void *sqlite3_value_text16le(sqlite3_value*); -SQLITE_API const void *sqlite3_value_text16be(sqlite3_value*); -SQLITE_API int sqlite3_value_type(sqlite3_value*); -SQLITE_API int sqlite3_value_numeric_type(sqlite3_value*); - -/* -** CAPI3REF: Obtain Aggregate Function Context -** -** Implementations of aggregate SQL functions use this -** routine to allocate memory for storing their state. -** -** ^The first time the sqlite3_aggregate_context(C,N) routine is called -** for a particular aggregate function, SQLite -** allocates N of memory, zeroes out that memory, and returns a pointer -** to the new memory. ^On second and subsequent calls to -** sqlite3_aggregate_context() for the same aggregate function instance, -** the same buffer is returned. Sqlite3_aggregate_context() is normally -** called once for each invocation of the xStep callback and then one -** last time when the xFinal callback is invoked. ^(When no rows match -** an aggregate query, the xStep() callback of the aggregate function -** implementation is never called and xFinal() is called exactly once. -** In those cases, sqlite3_aggregate_context() might be called for the -** first time from within xFinal().)^ -** -** ^The sqlite3_aggregate_context(C,N) routine returns a NULL pointer if N is -** less than or equal to zero or if a memory allocate error occurs. -** -** ^(The amount of space allocated by sqlite3_aggregate_context(C,N) is -** determined by the N parameter on first successful call. Changing the -** value of N in subsequent call to sqlite3_aggregate_context() within -** the same aggregate function instance will not resize the memory -** allocation.)^ -** -** ^SQLite automatically frees the memory allocated by -** sqlite3_aggregate_context() when the aggregate query concludes. -** -** The first parameter must be a copy of the -** [sqlite3_context | SQL function context] that is the first parameter -** to the xStep or xFinal callback routine that implements the aggregate -** function. -** -** This routine must be called from the same thread in which -** the aggregate SQL function is running. -*/ -SQLITE_API void *sqlite3_aggregate_context(sqlite3_context*, int nBytes); - -/* -** CAPI3REF: User Data For Functions -** -** ^The sqlite3_user_data() interface returns a copy of -** the pointer that was the pUserData parameter (the 5th parameter) -** of the [sqlite3_create_function()] -** and [sqlite3_create_function16()] routines that originally -** registered the application defined function. -** -** This routine must be called from the same thread in which -** the application-defined function is running. -*/ -SQLITE_API void *sqlite3_user_data(sqlite3_context*); - -/* -** CAPI3REF: Database Connection For Functions -** -** ^The sqlite3_context_db_handle() interface returns a copy of -** the pointer to the [database connection] (the 1st parameter) -** of the [sqlite3_create_function()] -** and [sqlite3_create_function16()] routines that originally -** registered the application defined function. -*/ -SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context*); - -/* -** CAPI3REF: Function Auxiliary Data -** -** The following two functions may be used by scalar SQL functions to -** associate metadata with argument values. If the same value is passed to -** multiple invocations of the same SQL function during query execution, under -** some circumstances the associated metadata may be preserved. This may -** be used, for example, to add a regular-expression matching scalar -** function. The compiled version of the regular expression is stored as -** metadata associated with the SQL value passed as the regular expression -** pattern. The compiled regular expression can be reused on multiple -** invocations of the same function so that the original pattern string -** does not need to be recompiled on each invocation. -** -** ^The sqlite3_get_auxdata() interface returns a pointer to the metadata -** associated by the sqlite3_set_auxdata() function with the Nth argument -** value to the application-defined function. ^If no metadata has been ever -** been set for the Nth argument of the function, or if the corresponding -** function parameter has changed since the meta-data was set, -** then sqlite3_get_auxdata() returns a NULL pointer. -** -** ^The sqlite3_set_auxdata() interface saves the metadata -** pointed to by its 3rd parameter as the metadata for the N-th -** argument of the application-defined function. Subsequent -** calls to sqlite3_get_auxdata() might return this data, if it has -** not been destroyed. -** ^If it is not NULL, SQLite will invoke the destructor -** function given by the 4th parameter to sqlite3_set_auxdata() on -** the metadata when the corresponding function parameter changes -** or when the SQL statement completes, whichever comes first. -** -** SQLite is free to call the destructor and drop metadata on any -** parameter of any function at any time. ^The only guarantee is that -** the destructor will be called before the metadata is dropped. -** -** ^(In practice, metadata is preserved between function calls for -** expressions that are constant at compile time. This includes literal -** values and [parameters].)^ -** -** These routines must be called from the same thread in which -** the SQL function is running. -*/ -SQLITE_API void *sqlite3_get_auxdata(sqlite3_context*, int N); -SQLITE_API void sqlite3_set_auxdata(sqlite3_context*, int N, void*, void (*)(void*)); - - -/* -** CAPI3REF: Constants Defining Special Destructor Behavior -** -** These are special values for the destructor that is passed in as the -** final argument to routines like [sqlite3_result_blob()]. ^If the destructor -** argument is SQLITE_STATIC, it means that the content pointer is constant -** and will never change. It does not need to be destroyed. ^The -** SQLITE_TRANSIENT value means that the content will likely change in -** the near future and that SQLite should make its own private copy of -** the content before returning. -** -** The typedef is necessary to work around problems in certain -** C++ compilers. See ticket #2191. -*/ -typedef void (*sqlite3_destructor_type)(void*); -#define SQLITE_STATIC ((sqlite3_destructor_type)0) -#define SQLITE_TRANSIENT ((sqlite3_destructor_type)-1) - -/* -** CAPI3REF: Setting The Result Of An SQL Function -** -** These routines are used by the xFunc or xFinal callbacks that -** implement SQL functions and aggregates. See -** [sqlite3_create_function()] and [sqlite3_create_function16()] -** for additional information. -** -** These functions work very much like the [parameter binding] family of -** functions used to bind values to host parameters in prepared statements. -** Refer to the [SQL parameter] documentation for additional information. -** -** ^The sqlite3_result_blob() interface sets the result from -** an application-defined function to be the BLOB whose content is pointed -** to by the second parameter and which is N bytes long where N is the -** third parameter. -** -** ^The sqlite3_result_zeroblob() interfaces set the result of -** the application-defined function to be a BLOB containing all zero -** bytes and N bytes in size, where N is the value of the 2nd parameter. -** -** ^The sqlite3_result_double() interface sets the result from -** an application-defined function to be a floating point value specified -** by its 2nd argument. -** -** ^The sqlite3_result_error() and sqlite3_result_error16() functions -** cause the implemented SQL function to throw an exception. -** ^SQLite uses the string pointed to by the -** 2nd parameter of sqlite3_result_error() or sqlite3_result_error16() -** as the text of an error message. ^SQLite interprets the error -** message string from sqlite3_result_error() as UTF-8. ^SQLite -** interprets the string from sqlite3_result_error16() as UTF-16 in native -** byte order. ^If the third parameter to sqlite3_result_error() -** or sqlite3_result_error16() is negative then SQLite takes as the error -** message all text up through the first zero character. -** ^If the third parameter to sqlite3_result_error() or -** sqlite3_result_error16() is non-negative then SQLite takes that many -** bytes (not characters) from the 2nd parameter as the error message. -** ^The sqlite3_result_error() and sqlite3_result_error16() -** routines make a private copy of the error message text before -** they return. Hence, the calling function can deallocate or -** modify the text after they return without harm. -** ^The sqlite3_result_error_code() function changes the error code -** returned by SQLite as a result of an error in a function. ^By default, -** the error code is SQLITE_ERROR. ^A subsequent call to sqlite3_result_error() -** or sqlite3_result_error16() resets the error code to SQLITE_ERROR. -** -** ^The sqlite3_result_toobig() interface causes SQLite to throw an error -** indicating that a string or BLOB is too long to represent. -** -** ^The sqlite3_result_nomem() interface causes SQLite to throw an error -** indicating that a memory allocation failed. -** -** ^The sqlite3_result_int() interface sets the return value -** of the application-defined function to be the 32-bit signed integer -** value given in the 2nd argument. -** ^The sqlite3_result_int64() interface sets the return value -** of the application-defined function to be the 64-bit signed integer -** value given in the 2nd argument. -** -** ^The sqlite3_result_null() interface sets the return value -** of the application-defined function to be NULL. -** -** ^The sqlite3_result_text(), sqlite3_result_text16(), -** sqlite3_result_text16le(), and sqlite3_result_text16be() interfaces -** set the return value of the application-defined function to be -** a text string which is represented as UTF-8, UTF-16 native byte order, -** UTF-16 little endian, or UTF-16 big endian, respectively. -** ^SQLite takes the text result from the application from -** the 2nd parameter of the sqlite3_result_text* interfaces. -** ^If the 3rd parameter to the sqlite3_result_text* interfaces -** is negative, then SQLite takes result text from the 2nd parameter -** through the first zero character. -** ^If the 3rd parameter to the sqlite3_result_text* interfaces -** is non-negative, then as many bytes (not characters) of the text -** pointed to by the 2nd parameter are taken as the application-defined -** function result. If the 3rd parameter is non-negative, then it -** must be the byte offset into the string where the NUL terminator would -** appear if the string where NUL terminated. If any NUL characters occur -** in the string at a byte offset that is less than the value of the 3rd -** parameter, then the resulting string will contain embedded NULs and the -** result of expressions operating on strings with embedded NULs is undefined. -** ^If the 4th parameter to the sqlite3_result_text* interfaces -** or sqlite3_result_blob is a non-NULL pointer, then SQLite calls that -** function as the destructor on the text or BLOB result when it has -** finished using that result. -** ^If the 4th parameter to the sqlite3_result_text* interfaces or to -** sqlite3_result_blob is the special constant SQLITE_STATIC, then SQLite -** assumes that the text or BLOB result is in constant space and does not -** copy the content of the parameter nor call a destructor on the content -** when it has finished using that result. -** ^If the 4th parameter to the sqlite3_result_text* interfaces -** or sqlite3_result_blob is the special constant SQLITE_TRANSIENT -** then SQLite makes a copy of the result into space obtained from -** from [sqlite3_malloc()] before it returns. -** -** ^The sqlite3_result_value() interface sets the result of -** the application-defined function to be a copy the -** [unprotected sqlite3_value] object specified by the 2nd parameter. ^The -** sqlite3_result_value() interface makes a copy of the [sqlite3_value] -** so that the [sqlite3_value] specified in the parameter may change or -** be deallocated after sqlite3_result_value() returns without harm. -** ^A [protected sqlite3_value] object may always be used where an -** [unprotected sqlite3_value] object is required, so either -** kind of [sqlite3_value] object can be used with this interface. -** -** If these routines are called from within the different thread -** than the one containing the application-defined function that received -** the [sqlite3_context] pointer, the results are undefined. -*/ -SQLITE_API void sqlite3_result_blob(sqlite3_context*, const void*, int, void(*)(void*)); -SQLITE_API void sqlite3_result_double(sqlite3_context*, double); -SQLITE_API void sqlite3_result_error(sqlite3_context*, const char*, int); -SQLITE_API void sqlite3_result_error16(sqlite3_context*, const void*, int); -SQLITE_API void sqlite3_result_error_toobig(sqlite3_context*); -SQLITE_API void sqlite3_result_error_nomem(sqlite3_context*); -SQLITE_API void sqlite3_result_error_code(sqlite3_context*, int); -SQLITE_API void sqlite3_result_int(sqlite3_context*, int); -SQLITE_API void sqlite3_result_int64(sqlite3_context*, sqlite3_int64); -SQLITE_API void sqlite3_result_null(sqlite3_context*); -SQLITE_API void sqlite3_result_text(sqlite3_context*, const char*, int, void(*)(void*)); -SQLITE_API void sqlite3_result_text16(sqlite3_context*, const void*, int, void(*)(void*)); -SQLITE_API void sqlite3_result_text16le(sqlite3_context*, const void*, int,void(*)(void*)); -SQLITE_API void sqlite3_result_text16be(sqlite3_context*, const void*, int,void(*)(void*)); -SQLITE_API void sqlite3_result_value(sqlite3_context*, sqlite3_value*); -SQLITE_API void sqlite3_result_zeroblob(sqlite3_context*, int n); - -/* -** CAPI3REF: Define New Collating Sequences -** -** ^These functions add, remove, or modify a [collation] associated -** with the [database connection] specified as the first argument. -** -** ^The name of the collation is a UTF-8 string -** for sqlite3_create_collation() and sqlite3_create_collation_v2() -** and a UTF-16 string in native byte order for sqlite3_create_collation16(). -** ^Collation names that compare equal according to [sqlite3_strnicmp()] are -** considered to be the same name. -** -** ^(The third argument (eTextRep) must be one of the constants: -**
    -**
  • [SQLITE_UTF8], -**
  • [SQLITE_UTF16LE], -**
  • [SQLITE_UTF16BE], -**
  • [SQLITE_UTF16], or -**
  • [SQLITE_UTF16_ALIGNED]. -**
)^ -** ^The eTextRep argument determines the encoding of strings passed -** to the collating function callback, xCallback. -** ^The [SQLITE_UTF16] and [SQLITE_UTF16_ALIGNED] values for eTextRep -** force strings to be UTF16 with native byte order. -** ^The [SQLITE_UTF16_ALIGNED] value for eTextRep forces strings to begin -** on an even byte address. -** -** ^The fourth argument, pArg, is an application data pointer that is passed -** through as the first argument to the collating function callback. -** -** ^The fifth argument, xCallback, is a pointer to the collating function. -** ^Multiple collating functions can be registered using the same name but -** with different eTextRep parameters and SQLite will use whichever -** function requires the least amount of data transformation. -** ^If the xCallback argument is NULL then the collating function is -** deleted. ^When all collating functions having the same name are deleted, -** that collation is no longer usable. -** -** ^The collating function callback is invoked with a copy of the pArg -** application data pointer and with two strings in the encoding specified -** by the eTextRep argument. The collating function must return an -** integer that is negative, zero, or positive -** if the first string is less than, equal to, or greater than the second, -** respectively. A collating function must always return the same answer -** given the same inputs. If two or more collating functions are registered -** to the same collation name (using different eTextRep values) then all -** must give an equivalent answer when invoked with equivalent strings. -** The collating function must obey the following properties for all -** strings A, B, and C: -** -**
    -**
  1. If A==B then B==A. -**
  2. If A==B and B==C then A==C. -**
  3. If A<B THEN B>A. -**
  4. If A<B and B<C then A<C. -**
-** -** If a collating function fails any of the above constraints and that -** collating function is registered and used, then the behavior of SQLite -** is undefined. -** -** ^The sqlite3_create_collation_v2() works like sqlite3_create_collation() -** with the addition that the xDestroy callback is invoked on pArg when -** the collating function is deleted. -** ^Collating functions are deleted when they are overridden by later -** calls to the collation creation functions or when the -** [database connection] is closed using [sqlite3_close()]. -** -** ^The xDestroy callback is not called if the -** sqlite3_create_collation_v2() function fails. Applications that invoke -** sqlite3_create_collation_v2() with a non-NULL xDestroy argument should -** check the return code and dispose of the application data pointer -** themselves rather than expecting SQLite to deal with it for them. -** This is different from every other SQLite interface. The inconsistency -** is unfortunate but cannot be changed without breaking backwards -** compatibility. -** -** See also: [sqlite3_collation_needed()] and [sqlite3_collation_needed16()]. -*/ -SQLITE_API int sqlite3_create_collation( - sqlite3*, - const char *zName, - int eTextRep, - void *pArg, - int(*xCompare)(void*,int,const void*,int,const void*) -); -SQLITE_API int sqlite3_create_collation_v2( - sqlite3*, - const char *zName, - int eTextRep, - void *pArg, - int(*xCompare)(void*,int,const void*,int,const void*), - void(*xDestroy)(void*) -); -SQLITE_API int sqlite3_create_collation16( - sqlite3*, - const void *zName, - int eTextRep, - void *pArg, - int(*xCompare)(void*,int,const void*,int,const void*) -); - -/* -** CAPI3REF: Collation Needed Callbacks -** -** ^To avoid having to register all collation sequences before a database -** can be used, a single callback function may be registered with the -** [database connection] to be invoked whenever an undefined collation -** sequence is required. -** -** ^If the function is registered using the sqlite3_collation_needed() API, -** then it is passed the names of undefined collation sequences as strings -** encoded in UTF-8. ^If sqlite3_collation_needed16() is used, -** the names are passed as UTF-16 in machine native byte order. -** ^A call to either function replaces the existing collation-needed callback. -** -** ^(When the callback is invoked, the first argument passed is a copy -** of the second argument to sqlite3_collation_needed() or -** sqlite3_collation_needed16(). The second argument is the database -** connection. The third argument is one of [SQLITE_UTF8], [SQLITE_UTF16BE], -** or [SQLITE_UTF16LE], indicating the most desirable form of the collation -** sequence function required. The fourth parameter is the name of the -** required collation sequence.)^ -** -** The callback function should register the desired collation using -** [sqlite3_create_collation()], [sqlite3_create_collation16()], or -** [sqlite3_create_collation_v2()]. -*/ -SQLITE_API int sqlite3_collation_needed( - sqlite3*, - void*, - void(*)(void*,sqlite3*,int eTextRep,const char*) -); -SQLITE_API int sqlite3_collation_needed16( - sqlite3*, - void*, - void(*)(void*,sqlite3*,int eTextRep,const void*) -); - -#ifdef SQLITE_HAS_CODEC -/* -** Specify the key for an encrypted database. This routine should be -** called right after sqlite3_open(). -** -** The code to implement this API is not available in the public release -** of SQLite. -*/ -SQLITE_API int sqlite3_key( - sqlite3 *db, /* Database to be rekeyed */ - const void *pKey, int nKey /* The key */ -); - -/* -** Change the key on an open database. If the current database is not -** encrypted, this routine will encrypt it. If pNew==0 or nNew==0, the -** database is decrypted. -** -** The code to implement this API is not available in the public release -** of SQLite. -*/ -SQLITE_API int sqlite3_rekey( - sqlite3 *db, /* Database to be rekeyed */ - const void *pKey, int nKey /* The new key */ -); - -/* -** Specify the activation key for a SEE database. Unless -** activated, none of the SEE routines will work. -*/ -SQLITE_API void sqlite3_activate_see( - const char *zPassPhrase /* Activation phrase */ -); -#endif - -#ifdef SQLITE_ENABLE_CEROD -/* -** Specify the activation key for a CEROD database. Unless -** activated, none of the CEROD routines will work. -*/ -SQLITE_API void sqlite3_activate_cerod( - const char *zPassPhrase /* Activation phrase */ -); -#endif - -/* -** CAPI3REF: Suspend Execution For A Short Time -** -** The sqlite3_sleep() function causes the current thread to suspend execution -** for at least a number of milliseconds specified in its parameter. -** -** If the operating system does not support sleep requests with -** millisecond time resolution, then the time will be rounded up to -** the nearest second. The number of milliseconds of sleep actually -** requested from the operating system is returned. -** -** ^SQLite implements this interface by calling the xSleep() -** method of the default [sqlite3_vfs] object. If the xSleep() method -** of the default VFS is not implemented correctly, or not implemented at -** all, then the behavior of sqlite3_sleep() may deviate from the description -** in the previous paragraphs. -*/ -SQLITE_API int sqlite3_sleep(int); - -/* -** CAPI3REF: Name Of The Folder Holding Temporary Files -** -** ^(If this global variable is made to point to a string which is -** the name of a folder (a.k.a. directory), then all temporary files -** created by SQLite when using a built-in [sqlite3_vfs | VFS] -** will be placed in that directory.)^ ^If this variable -** is a NULL pointer, then SQLite performs a search for an appropriate -** temporary file directory. -** -** It is not safe to read or modify this variable in more than one -** thread at a time. It is not safe to read or modify this variable -** if a [database connection] is being used at the same time in a separate -** thread. -** It is intended that this variable be set once -** as part of process initialization and before any SQLite interface -** routines have been called and that this variable remain unchanged -** thereafter. -** -** ^The [temp_store_directory pragma] may modify this variable and cause -** it to point to memory obtained from [sqlite3_malloc]. ^Furthermore, -** the [temp_store_directory pragma] always assumes that any string -** that this variable points to is held in memory obtained from -** [sqlite3_malloc] and the pragma may attempt to free that memory -** using [sqlite3_free]. -** Hence, if this variable is modified directly, either it should be -** made NULL or made to point to memory obtained from [sqlite3_malloc] -** or else the use of the [temp_store_directory pragma] should be avoided. -*/ -SQLITE_API char *sqlite3_temp_directory; - -/* -** CAPI3REF: Name Of The Folder Holding Database Files -** -** ^(If this global variable is made to point to a string which is -** the name of a folder (a.k.a. directory), then all database files -** specified with a relative pathname and created or accessed by -** SQLite when using a built-in windows [sqlite3_vfs | VFS] will be assumed -** to be relative to that directory.)^ ^If this variable is a NULL -** pointer, then SQLite assumes that all database files specified -** with a relative pathname are relative to the current directory -** for the process. Only the windows VFS makes use of this global -** variable; it is ignored by the unix VFS. -** -** Changing the value of this variable while a database connection is -** open can result in a corrupt database. -** -** It is not safe to read or modify this variable in more than one -** thread at a time. It is not safe to read or modify this variable -** if a [database connection] is being used at the same time in a separate -** thread. -** It is intended that this variable be set once -** as part of process initialization and before any SQLite interface -** routines have been called and that this variable remain unchanged -** thereafter. -** -** ^The [data_store_directory pragma] may modify this variable and cause -** it to point to memory obtained from [sqlite3_malloc]. ^Furthermore, -** the [data_store_directory pragma] always assumes that any string -** that this variable points to is held in memory obtained from -** [sqlite3_malloc] and the pragma may attempt to free that memory -** using [sqlite3_free]. -** Hence, if this variable is modified directly, either it should be -** made NULL or made to point to memory obtained from [sqlite3_malloc] -** or else the use of the [data_store_directory pragma] should be avoided. -*/ -SQLITE_API char *sqlite3_data_directory; - -/* -** CAPI3REF: Test For Auto-Commit Mode -** KEYWORDS: {autocommit mode} -** -** ^The sqlite3_get_autocommit() interface returns non-zero or -** zero if the given database connection is or is not in autocommit mode, -** respectively. ^Autocommit mode is on by default. -** ^Autocommit mode is disabled by a [BEGIN] statement. -** ^Autocommit mode is re-enabled by a [COMMIT] or [ROLLBACK]. -** -** If certain kinds of errors occur on a statement within a multi-statement -** transaction (errors including [SQLITE_FULL], [SQLITE_IOERR], -** [SQLITE_NOMEM], [SQLITE_BUSY], and [SQLITE_INTERRUPT]) then the -** transaction might be rolled back automatically. The only way to -** find out whether SQLite automatically rolled back the transaction after -** an error is to use this function. -** -** If another thread changes the autocommit status of the database -** connection while this routine is running, then the return value -** is undefined. -*/ -SQLITE_API int sqlite3_get_autocommit(sqlite3*); - -/* -** CAPI3REF: Find The Database Handle Of A Prepared Statement -** -** ^The sqlite3_db_handle interface returns the [database connection] handle -** to which a [prepared statement] belongs. ^The [database connection] -** returned by sqlite3_db_handle is the same [database connection] -** that was the first argument -** to the [sqlite3_prepare_v2()] call (or its variants) that was used to -** create the statement in the first place. -*/ -SQLITE_API sqlite3 *sqlite3_db_handle(sqlite3_stmt*); - -/* -** CAPI3REF: Return The Filename For A Database Connection -** -** ^The sqlite3_db_filename(D,N) interface returns a pointer to a filename -** associated with database N of connection D. ^The main database file -** has the name "main". If there is no attached database N on the database -** connection D, or if database N is a temporary or in-memory database, then -** a NULL pointer is returned. -** -** ^The filename returned by this function is the output of the -** xFullPathname method of the [VFS]. ^In other words, the filename -** will be an absolute pathname, even if the filename used -** to open the database originally was a URI or relative pathname. -*/ -SQLITE_API const char *sqlite3_db_filename(sqlite3 *db, const char *zDbName); - -/* -** CAPI3REF: Determine if a database is read-only -** -** ^The sqlite3_db_readonly(D,N) interface returns 1 if the database N -** of connection D is read-only, 0 if it is read/write, or -1 if N is not -** the name of a database on connection D. -*/ -SQLITE_API int sqlite3_db_readonly(sqlite3 *db, const char *zDbName); - -/* -** CAPI3REF: Find the next prepared statement -** -** ^This interface returns a pointer to the next [prepared statement] after -** pStmt associated with the [database connection] pDb. ^If pStmt is NULL -** then this interface returns a pointer to the first prepared statement -** associated with the database connection pDb. ^If no prepared statement -** satisfies the conditions of this routine, it returns NULL. -** -** The [database connection] pointer D in a call to -** [sqlite3_next_stmt(D,S)] must refer to an open database -** connection and in particular must not be a NULL pointer. -*/ -SQLITE_API sqlite3_stmt *sqlite3_next_stmt(sqlite3 *pDb, sqlite3_stmt *pStmt); - -/* -** CAPI3REF: Commit And Rollback Notification Callbacks -** -** ^The sqlite3_commit_hook() interface registers a callback -** function to be invoked whenever a transaction is [COMMIT | committed]. -** ^Any callback set by a previous call to sqlite3_commit_hook() -** for the same database connection is overridden. -** ^The sqlite3_rollback_hook() interface registers a callback -** function to be invoked whenever a transaction is [ROLLBACK | rolled back]. -** ^Any callback set by a previous call to sqlite3_rollback_hook() -** for the same database connection is overridden. -** ^The pArg argument is passed through to the callback. -** ^If the callback on a commit hook function returns non-zero, -** then the commit is converted into a rollback. -** -** ^The sqlite3_commit_hook(D,C,P) and sqlite3_rollback_hook(D,C,P) functions -** return the P argument from the previous call of the same function -** on the same [database connection] D, or NULL for -** the first call for each function on D. -** -** The commit and rollback hook callbacks are not reentrant. -** The callback implementation must not do anything that will modify -** the database connection that invoked the callback. Any actions -** to modify the database connection must be deferred until after the -** completion of the [sqlite3_step()] call that triggered the commit -** or rollback hook in the first place. -** Note that running any other SQL statements, including SELECT statements, -** or merely calling [sqlite3_prepare_v2()] and [sqlite3_step()] will modify -** the database connections for the meaning of "modify" in this paragraph. -** -** ^Registering a NULL function disables the callback. -** -** ^When the commit hook callback routine returns zero, the [COMMIT] -** operation is allowed to continue normally. ^If the commit hook -** returns non-zero, then the [COMMIT] is converted into a [ROLLBACK]. -** ^The rollback hook is invoked on a rollback that results from a commit -** hook returning non-zero, just as it would be with any other rollback. -** -** ^For the purposes of this API, a transaction is said to have been -** rolled back if an explicit "ROLLBACK" statement is executed, or -** an error or constraint causes an implicit rollback to occur. -** ^The rollback callback is not invoked if a transaction is -** automatically rolled back because the database connection is closed. -** -** See also the [sqlite3_update_hook()] interface. -*/ -SQLITE_API void *sqlite3_commit_hook(sqlite3*, int(*)(void*), void*); -SQLITE_API void *sqlite3_rollback_hook(sqlite3*, void(*)(void *), void*); - -/* -** CAPI3REF: Data Change Notification Callbacks -** -** ^The sqlite3_update_hook() interface registers a callback function -** with the [database connection] identified by the first argument -** to be invoked whenever a row is updated, inserted or deleted. -** ^Any callback set by a previous call to this function -** for the same database connection is overridden. -** -** ^The second argument is a pointer to the function to invoke when a -** row is updated, inserted or deleted. -** ^The first argument to the callback is a copy of the third argument -** to sqlite3_update_hook(). -** ^The second callback argument is one of [SQLITE_INSERT], [SQLITE_DELETE], -** or [SQLITE_UPDATE], depending on the operation that caused the callback -** to be invoked. -** ^The third and fourth arguments to the callback contain pointers to the -** database and table name containing the affected row. -** ^The final callback parameter is the [rowid] of the row. -** ^In the case of an update, this is the [rowid] after the update takes place. -** -** ^(The update hook is not invoked when internal system tables are -** modified (i.e. sqlite_master and sqlite_sequence).)^ -** -** ^In the current implementation, the update hook -** is not invoked when duplication rows are deleted because of an -** [ON CONFLICT | ON CONFLICT REPLACE] clause. ^Nor is the update hook -** invoked when rows are deleted using the [truncate optimization]. -** The exceptions defined in this paragraph might change in a future -** release of SQLite. -** -** The update hook implementation must not do anything that will modify -** the database connection that invoked the update hook. Any actions -** to modify the database connection must be deferred until after the -** completion of the [sqlite3_step()] call that triggered the update hook. -** Note that [sqlite3_prepare_v2()] and [sqlite3_step()] both modify their -** database connections for the meaning of "modify" in this paragraph. -** -** ^The sqlite3_update_hook(D,C,P) function -** returns the P argument from the previous call -** on the same [database connection] D, or NULL for -** the first call on D. -** -** See also the [sqlite3_commit_hook()] and [sqlite3_rollback_hook()] -** interfaces. -*/ -SQLITE_API void *sqlite3_update_hook( - sqlite3*, - void(*)(void *,int ,char const *,char const *,sqlite3_int64), - void* -); - -/* -** CAPI3REF: Enable Or Disable Shared Pager Cache -** -** ^(This routine enables or disables the sharing of the database cache -** and schema data structures between [database connection | connections] -** to the same database. Sharing is enabled if the argument is true -** and disabled if the argument is false.)^ -** -** ^Cache sharing is enabled and disabled for an entire process. -** This is a change as of SQLite version 3.5.0. In prior versions of SQLite, -** sharing was enabled or disabled for each thread separately. -** -** ^(The cache sharing mode set by this interface effects all subsequent -** calls to [sqlite3_open()], [sqlite3_open_v2()], and [sqlite3_open16()]. -** Existing database connections continue use the sharing mode -** that was in effect at the time they were opened.)^ -** -** ^(This routine returns [SQLITE_OK] if shared cache was enabled or disabled -** successfully. An [error code] is returned otherwise.)^ -** -** ^Shared cache is disabled by default. But this might change in -** future releases of SQLite. Applications that care about shared -** cache setting should set it explicitly. -** -** See Also: [SQLite Shared-Cache Mode] -*/ -SQLITE_API int sqlite3_enable_shared_cache(int); - -/* -** CAPI3REF: Attempt To Free Heap Memory -** -** ^The sqlite3_release_memory() interface attempts to free N bytes -** of heap memory by deallocating non-essential memory allocations -** held by the database library. Memory used to cache database -** pages to improve performance is an example of non-essential memory. -** ^sqlite3_release_memory() returns the number of bytes actually freed, -** which might be more or less than the amount requested. -** ^The sqlite3_release_memory() routine is a no-op returning zero -** if SQLite is not compiled with [SQLITE_ENABLE_MEMORY_MANAGEMENT]. -** -** See also: [sqlite3_db_release_memory()] -*/ -SQLITE_API int sqlite3_release_memory(int); - -/* -** CAPI3REF: Free Memory Used By A Database Connection -** -** ^The sqlite3_db_release_memory(D) interface attempts to free as much heap -** memory as possible from database connection D. Unlike the -** [sqlite3_release_memory()] interface, this interface is effect even -** when then [SQLITE_ENABLE_MEMORY_MANAGEMENT] compile-time option is -** omitted. -** -** See also: [sqlite3_release_memory()] -*/ -SQLITE_API int sqlite3_db_release_memory(sqlite3*); - -/* -** CAPI3REF: Impose A Limit On Heap Size -** -** ^The sqlite3_soft_heap_limit64() interface sets and/or queries the -** soft limit on the amount of heap memory that may be allocated by SQLite. -** ^SQLite strives to keep heap memory utilization below the soft heap -** limit by reducing the number of pages held in the page cache -** as heap memory usages approaches the limit. -** ^The soft heap limit is "soft" because even though SQLite strives to stay -** below the limit, it will exceed the limit rather than generate -** an [SQLITE_NOMEM] error. In other words, the soft heap limit -** is advisory only. -** -** ^The return value from sqlite3_soft_heap_limit64() is the size of -** the soft heap limit prior to the call, or negative in the case of an -** error. ^If the argument N is negative -** then no change is made to the soft heap limit. Hence, the current -** size of the soft heap limit can be determined by invoking -** sqlite3_soft_heap_limit64() with a negative argument. -** -** ^If the argument N is zero then the soft heap limit is disabled. -** -** ^(The soft heap limit is not enforced in the current implementation -** if one or more of following conditions are true: -** -**
    -**
  • The soft heap limit is set to zero. -**
  • Memory accounting is disabled using a combination of the -** [sqlite3_config]([SQLITE_CONFIG_MEMSTATUS],...) start-time option and -** the [SQLITE_DEFAULT_MEMSTATUS] compile-time option. -**
  • An alternative page cache implementation is specified using -** [sqlite3_config]([SQLITE_CONFIG_PCACHE2],...). -**
  • The page cache allocates from its own memory pool supplied -** by [sqlite3_config]([SQLITE_CONFIG_PAGECACHE],...) rather than -** from the heap. -**
)^ -** -** Beginning with SQLite version 3.7.3, the soft heap limit is enforced -** regardless of whether or not the [SQLITE_ENABLE_MEMORY_MANAGEMENT] -** compile-time option is invoked. With [SQLITE_ENABLE_MEMORY_MANAGEMENT], -** the soft heap limit is enforced on every memory allocation. Without -** [SQLITE_ENABLE_MEMORY_MANAGEMENT], the soft heap limit is only enforced -** when memory is allocated by the page cache. Testing suggests that because -** the page cache is the predominate memory user in SQLite, most -** applications will achieve adequate soft heap limit enforcement without -** the use of [SQLITE_ENABLE_MEMORY_MANAGEMENT]. -** -** The circumstances under which SQLite will enforce the soft heap limit may -** changes in future releases of SQLite. -*/ -SQLITE_API sqlite3_int64 sqlite3_soft_heap_limit64(sqlite3_int64 N); - -/* -** CAPI3REF: Deprecated Soft Heap Limit Interface -** DEPRECATED -** -** This is a deprecated version of the [sqlite3_soft_heap_limit64()] -** interface. This routine is provided for historical compatibility -** only. All new applications should use the -** [sqlite3_soft_heap_limit64()] interface rather than this one. -*/ -SQLITE_API SQLITE_DEPRECATED void sqlite3_soft_heap_limit(int N); - - -/* -** CAPI3REF: Extract Metadata About A Column Of A Table -** -** ^This routine returns metadata about a specific column of a specific -** database table accessible using the [database connection] handle -** passed as the first function argument. -** -** ^The column is identified by the second, third and fourth parameters to -** this function. ^The second parameter is either the name of the database -** (i.e. "main", "temp", or an attached database) containing the specified -** table or NULL. ^If it is NULL, then all attached databases are searched -** for the table using the same algorithm used by the database engine to -** resolve unqualified table references. -** -** ^The third and fourth parameters to this function are the table and column -** name of the desired column, respectively. Neither of these parameters -** may be NULL. -** -** ^Metadata is returned by writing to the memory locations passed as the 5th -** and subsequent parameters to this function. ^Any of these arguments may be -** NULL, in which case the corresponding element of metadata is omitted. -** -** ^(
-** -**
Parameter Output
Type
Description -** -**
5th const char* Data type -**
6th const char* Name of default collation sequence -**
7th int True if column has a NOT NULL constraint -**
8th int True if column is part of the PRIMARY KEY -**
9th int True if column is [AUTOINCREMENT] -**
-**
)^ -** -** ^The memory pointed to by the character pointers returned for the -** declaration type and collation sequence is valid only until the next -** call to any SQLite API function. -** -** ^If the specified table is actually a view, an [error code] is returned. -** -** ^If the specified column is "rowid", "oid" or "_rowid_" and an -** [INTEGER PRIMARY KEY] column has been explicitly declared, then the output -** parameters are set for the explicitly declared column. ^(If there is no -** explicitly declared [INTEGER PRIMARY KEY] column, then the output -** parameters are set as follows: -** -**
-**     data type: "INTEGER"
-**     collation sequence: "BINARY"
-**     not null: 0
-**     primary key: 1
-**     auto increment: 0
-** 
)^ -** -** ^(This function may load one or more schemas from database files. If an -** error occurs during this process, or if the requested table or column -** cannot be found, an [error code] is returned and an error message left -** in the [database connection] (to be retrieved using sqlite3_errmsg()).)^ -** -** ^This API is only available if the library was compiled with the -** [SQLITE_ENABLE_COLUMN_METADATA] C-preprocessor symbol defined. -*/ -SQLITE_API int sqlite3_table_column_metadata( - sqlite3 *db, /* Connection handle */ - const char *zDbName, /* Database name or NULL */ - const char *zTableName, /* Table name */ - const char *zColumnName, /* Column name */ - char const **pzDataType, /* OUTPUT: Declared data type */ - char const **pzCollSeq, /* OUTPUT: Collation sequence name */ - int *pNotNull, /* OUTPUT: True if NOT NULL constraint exists */ - int *pPrimaryKey, /* OUTPUT: True if column part of PK */ - int *pAutoinc /* OUTPUT: True if column is auto-increment */ -); - -/* -** CAPI3REF: Load An Extension -** -** ^This interface loads an SQLite extension library from the named file. -** -** ^The sqlite3_load_extension() interface attempts to load an -** SQLite extension library contained in the file zFile. -** -** ^The entry point is zProc. -** ^zProc may be 0, in which case the name of the entry point -** defaults to "sqlite3_extension_init". -** ^The sqlite3_load_extension() interface returns -** [SQLITE_OK] on success and [SQLITE_ERROR] if something goes wrong. -** ^If an error occurs and pzErrMsg is not 0, then the -** [sqlite3_load_extension()] interface shall attempt to -** fill *pzErrMsg with error message text stored in memory -** obtained from [sqlite3_malloc()]. The calling function -** should free this memory by calling [sqlite3_free()]. -** -** ^Extension loading must be enabled using -** [sqlite3_enable_load_extension()] prior to calling this API, -** otherwise an error will be returned. -** -** See also the [load_extension() SQL function]. -*/ -SQLITE_API int sqlite3_load_extension( - sqlite3 *db, /* Load the extension into this database connection */ - const char *zFile, /* Name of the shared library containing extension */ - const char *zProc, /* Entry point. Derived from zFile if 0 */ - char **pzErrMsg /* Put error message here if not 0 */ -); - -/* -** CAPI3REF: Enable Or Disable Extension Loading -** -** ^So as not to open security holes in older applications that are -** unprepared to deal with extension loading, and as a means of disabling -** extension loading while evaluating user-entered SQL, the following API -** is provided to turn the [sqlite3_load_extension()] mechanism on and off. -** -** ^Extension loading is off by default. See ticket #1863. -** ^Call the sqlite3_enable_load_extension() routine with onoff==1 -** to turn extension loading on and call it with onoff==0 to turn -** it back off again. -*/ -SQLITE_API int sqlite3_enable_load_extension(sqlite3 *db, int onoff); - -/* -** CAPI3REF: Automatically Load Statically Linked Extensions -** -** ^This interface causes the xEntryPoint() function to be invoked for -** each new [database connection] that is created. The idea here is that -** xEntryPoint() is the entry point for a statically linked SQLite extension -** that is to be automatically loaded into all new database connections. -** -** ^(Even though the function prototype shows that xEntryPoint() takes -** no arguments and returns void, SQLite invokes xEntryPoint() with three -** arguments and expects and integer result as if the signature of the -** entry point where as follows: -** -**
-**    int xEntryPoint(
-**      sqlite3 *db,
-**      const char **pzErrMsg,
-**      const struct sqlite3_api_routines *pThunk
-**    );
-** 
)^ -** -** If the xEntryPoint routine encounters an error, it should make *pzErrMsg -** point to an appropriate error message (obtained from [sqlite3_mprintf()]) -** and return an appropriate [error code]. ^SQLite ensures that *pzErrMsg -** is NULL before calling the xEntryPoint(). ^SQLite will invoke -** [sqlite3_free()] on *pzErrMsg after xEntryPoint() returns. ^If any -** xEntryPoint() returns an error, the [sqlite3_open()], [sqlite3_open16()], -** or [sqlite3_open_v2()] call that provoked the xEntryPoint() will fail. -** -** ^Calling sqlite3_auto_extension(X) with an entry point X that is already -** on the list of automatic extensions is a harmless no-op. ^No entry point -** will be called more than once for each database connection that is opened. -** -** See also: [sqlite3_reset_auto_extension()]. -*/ -SQLITE_API int sqlite3_auto_extension(void (*xEntryPoint)(void)); - -/* -** CAPI3REF: Reset Automatic Extension Loading -** -** ^This interface disables all automatic extensions previously -** registered using [sqlite3_auto_extension()]. -*/ -SQLITE_API void sqlite3_reset_auto_extension(void); - -/* -** The interface to the virtual-table mechanism is currently considered -** to be experimental. The interface might change in incompatible ways. -** If this is a problem for you, do not use the interface at this time. -** -** When the virtual-table mechanism stabilizes, we will declare the -** interface fixed, support it indefinitely, and remove this comment. -*/ - -/* -** Structures used by the virtual table interface -*/ -typedef struct sqlite3_vtab sqlite3_vtab; -typedef struct sqlite3_index_info sqlite3_index_info; -typedef struct sqlite3_vtab_cursor sqlite3_vtab_cursor; -typedef struct sqlite3_module sqlite3_module; - -/* -** CAPI3REF: Virtual Table Object -** KEYWORDS: sqlite3_module {virtual table module} -** -** This structure, sometimes called a "virtual table module", -** defines the implementation of a [virtual tables]. -** This structure consists mostly of methods for the module. -** -** ^A virtual table module is created by filling in a persistent -** instance of this structure and passing a pointer to that instance -** to [sqlite3_create_module()] or [sqlite3_create_module_v2()]. -** ^The registration remains valid until it is replaced by a different -** module or until the [database connection] closes. The content -** of this structure must not change while it is registered with -** any database connection. -*/ -struct sqlite3_module { - int iVersion; - int (*xCreate)(sqlite3*, void *pAux, - int argc, const char *const*argv, - sqlite3_vtab **ppVTab, char**); - int (*xConnect)(sqlite3*, void *pAux, - int argc, const char *const*argv, - sqlite3_vtab **ppVTab, char**); - int (*xBestIndex)(sqlite3_vtab *pVTab, sqlite3_index_info*); - int (*xDisconnect)(sqlite3_vtab *pVTab); - int (*xDestroy)(sqlite3_vtab *pVTab); - int (*xOpen)(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor); - int (*xClose)(sqlite3_vtab_cursor*); - int (*xFilter)(sqlite3_vtab_cursor*, int idxNum, const char *idxStr, - int argc, sqlite3_value **argv); - int (*xNext)(sqlite3_vtab_cursor*); - int (*xEof)(sqlite3_vtab_cursor*); - int (*xColumn)(sqlite3_vtab_cursor*, sqlite3_context*, int); - int (*xRowid)(sqlite3_vtab_cursor*, sqlite3_int64 *pRowid); - int (*xUpdate)(sqlite3_vtab *, int, sqlite3_value **, sqlite3_int64 *); - int (*xBegin)(sqlite3_vtab *pVTab); - int (*xSync)(sqlite3_vtab *pVTab); - int (*xCommit)(sqlite3_vtab *pVTab); - int (*xRollback)(sqlite3_vtab *pVTab); - int (*xFindFunction)(sqlite3_vtab *pVtab, int nArg, const char *zName, - void (**pxFunc)(sqlite3_context*,int,sqlite3_value**), - void **ppArg); - int (*xRename)(sqlite3_vtab *pVtab, const char *zNew); - /* The methods above are in version 1 of the sqlite_module object. Those - ** below are for version 2 and greater. */ - int (*xSavepoint)(sqlite3_vtab *pVTab, int); - int (*xRelease)(sqlite3_vtab *pVTab, int); - int (*xRollbackTo)(sqlite3_vtab *pVTab, int); -}; - -/* -** CAPI3REF: Virtual Table Indexing Information -** KEYWORDS: sqlite3_index_info -** -** The sqlite3_index_info structure and its substructures is used as part -** of the [virtual table] interface to -** pass information into and receive the reply from the [xBestIndex] -** method of a [virtual table module]. The fields under **Inputs** are the -** inputs to xBestIndex and are read-only. xBestIndex inserts its -** results into the **Outputs** fields. -** -** ^(The aConstraint[] array records WHERE clause constraints of the form: -** -**
column OP expr
-** -** where OP is =, <, <=, >, or >=.)^ ^(The particular operator is -** stored in aConstraint[].op using one of the -** [SQLITE_INDEX_CONSTRAINT_EQ | SQLITE_INDEX_CONSTRAINT_ values].)^ -** ^(The index of the column is stored in -** aConstraint[].iColumn.)^ ^(aConstraint[].usable is TRUE if the -** expr on the right-hand side can be evaluated (and thus the constraint -** is usable) and false if it cannot.)^ -** -** ^The optimizer automatically inverts terms of the form "expr OP column" -** and makes other simplifications to the WHERE clause in an attempt to -** get as many WHERE clause terms into the form shown above as possible. -** ^The aConstraint[] array only reports WHERE clause terms that are -** relevant to the particular virtual table being queried. -** -** ^Information about the ORDER BY clause is stored in aOrderBy[]. -** ^Each term of aOrderBy records a column of the ORDER BY clause. -** -** The [xBestIndex] method must fill aConstraintUsage[] with information -** about what parameters to pass to xFilter. ^If argvIndex>0 then -** the right-hand side of the corresponding aConstraint[] is evaluated -** and becomes the argvIndex-th entry in argv. ^(If aConstraintUsage[].omit -** is true, then the constraint is assumed to be fully handled by the -** virtual table and is not checked again by SQLite.)^ -** -** ^The idxNum and idxPtr values are recorded and passed into the -** [xFilter] method. -** ^[sqlite3_free()] is used to free idxPtr if and only if -** needToFreeIdxPtr is true. -** -** ^The orderByConsumed means that output from [xFilter]/[xNext] will occur in -** the correct order to satisfy the ORDER BY clause so that no separate -** sorting step is required. -** -** ^The estimatedCost value is an estimate of the cost of doing the -** particular lookup. A full scan of a table with N entries should have -** a cost of N. A binary search of a table of N entries should have a -** cost of approximately log(N). -*/ -struct sqlite3_index_info { - /* Inputs */ - int nConstraint; /* Number of entries in aConstraint */ - struct sqlite3_index_constraint { - int iColumn; /* Column on left-hand side of constraint */ - unsigned char op; /* Constraint operator */ - unsigned char usable; /* True if this constraint is usable */ - int iTermOffset; /* Used internally - xBestIndex should ignore */ - } *aConstraint; /* Table of WHERE clause constraints */ - int nOrderBy; /* Number of terms in the ORDER BY clause */ - struct sqlite3_index_orderby { - int iColumn; /* Column number */ - unsigned char desc; /* True for DESC. False for ASC. */ - } *aOrderBy; /* The ORDER BY clause */ - /* Outputs */ - struct sqlite3_index_constraint_usage { - int argvIndex; /* if >0, constraint is part of argv to xFilter */ - unsigned char omit; /* Do not code a test for this constraint */ - } *aConstraintUsage; - int idxNum; /* Number used to identify the index */ - char *idxStr; /* String, possibly obtained from sqlite3_malloc */ - int needToFreeIdxStr; /* Free idxStr using sqlite3_free() if true */ - int orderByConsumed; /* True if output is already ordered */ - double estimatedCost; /* Estimated cost of using this index */ -}; - -/* -** CAPI3REF: Virtual Table Constraint Operator Codes -** -** These macros defined the allowed values for the -** [sqlite3_index_info].aConstraint[].op field. Each value represents -** an operator that is part of a constraint term in the wHERE clause of -** a query that uses a [virtual table]. -*/ -#define SQLITE_INDEX_CONSTRAINT_EQ 2 -#define SQLITE_INDEX_CONSTRAINT_GT 4 -#define SQLITE_INDEX_CONSTRAINT_LE 8 -#define SQLITE_INDEX_CONSTRAINT_LT 16 -#define SQLITE_INDEX_CONSTRAINT_GE 32 -#define SQLITE_INDEX_CONSTRAINT_MATCH 64 - -/* -** CAPI3REF: Register A Virtual Table Implementation -** -** ^These routines are used to register a new [virtual table module] name. -** ^Module names must be registered before -** creating a new [virtual table] using the module and before using a -** preexisting [virtual table] for the module. -** -** ^The module name is registered on the [database connection] specified -** by the first parameter. ^The name of the module is given by the -** second parameter. ^The third parameter is a pointer to -** the implementation of the [virtual table module]. ^The fourth -** parameter is an arbitrary client data pointer that is passed through -** into the [xCreate] and [xConnect] methods of the virtual table module -** when a new virtual table is be being created or reinitialized. -** -** ^The sqlite3_create_module_v2() interface has a fifth parameter which -** is a pointer to a destructor for the pClientData. ^SQLite will -** invoke the destructor function (if it is not NULL) when SQLite -** no longer needs the pClientData pointer. ^The destructor will also -** be invoked if the call to sqlite3_create_module_v2() fails. -** ^The sqlite3_create_module() -** interface is equivalent to sqlite3_create_module_v2() with a NULL -** destructor. -*/ -SQLITE_API int sqlite3_create_module( - sqlite3 *db, /* SQLite connection to register module with */ - const char *zName, /* Name of the module */ - const sqlite3_module *p, /* Methods for the module */ - void *pClientData /* Client data for xCreate/xConnect */ -); -SQLITE_API int sqlite3_create_module_v2( - sqlite3 *db, /* SQLite connection to register module with */ - const char *zName, /* Name of the module */ - const sqlite3_module *p, /* Methods for the module */ - void *pClientData, /* Client data for xCreate/xConnect */ - void(*xDestroy)(void*) /* Module destructor function */ -); - -/* -** CAPI3REF: Virtual Table Instance Object -** KEYWORDS: sqlite3_vtab -** -** Every [virtual table module] implementation uses a subclass -** of this object to describe a particular instance -** of the [virtual table]. Each subclass will -** be tailored to the specific needs of the module implementation. -** The purpose of this superclass is to define certain fields that are -** common to all module implementations. -** -** ^Virtual tables methods can set an error message by assigning a -** string obtained from [sqlite3_mprintf()] to zErrMsg. The method should -** take care that any prior string is freed by a call to [sqlite3_free()] -** prior to assigning a new string to zErrMsg. ^After the error message -** is delivered up to the client application, the string will be automatically -** freed by sqlite3_free() and the zErrMsg field will be zeroed. -*/ -struct sqlite3_vtab { - const sqlite3_module *pModule; /* The module for this virtual table */ - int nRef; /* NO LONGER USED */ - char *zErrMsg; /* Error message from sqlite3_mprintf() */ - /* Virtual table implementations will typically add additional fields */ -}; - -/* -** CAPI3REF: Virtual Table Cursor Object -** KEYWORDS: sqlite3_vtab_cursor {virtual table cursor} -** -** Every [virtual table module] implementation uses a subclass of the -** following structure to describe cursors that point into the -** [virtual table] and are used -** to loop through the virtual table. Cursors are created using the -** [sqlite3_module.xOpen | xOpen] method of the module and are destroyed -** by the [sqlite3_module.xClose | xClose] method. Cursors are used -** by the [xFilter], [xNext], [xEof], [xColumn], and [xRowid] methods -** of the module. Each module implementation will define -** the content of a cursor structure to suit its own needs. -** -** This superclass exists in order to define fields of the cursor that -** are common to all implementations. -*/ -struct sqlite3_vtab_cursor { - sqlite3_vtab *pVtab; /* Virtual table of this cursor */ - /* Virtual table implementations will typically add additional fields */ -}; - -/* -** CAPI3REF: Declare The Schema Of A Virtual Table -** -** ^The [xCreate] and [xConnect] methods of a -** [virtual table module] call this interface -** to declare the format (the names and datatypes of the columns) of -** the virtual tables they implement. -*/ -SQLITE_API int sqlite3_declare_vtab(sqlite3*, const char *zSQL); - -/* -** CAPI3REF: Overload A Function For A Virtual Table -** -** ^(Virtual tables can provide alternative implementations of functions -** using the [xFindFunction] method of the [virtual table module]. -** But global versions of those functions -** must exist in order to be overloaded.)^ -** -** ^(This API makes sure a global version of a function with a particular -** name and number of parameters exists. If no such function exists -** before this API is called, a new function is created.)^ ^The implementation -** of the new function always causes an exception to be thrown. So -** the new function is not good for anything by itself. Its only -** purpose is to be a placeholder function that can be overloaded -** by a [virtual table]. -*/ -SQLITE_API int sqlite3_overload_function(sqlite3*, const char *zFuncName, int nArg); - -/* -** The interface to the virtual-table mechanism defined above (back up -** to a comment remarkably similar to this one) is currently considered -** to be experimental. The interface might change in incompatible ways. -** If this is a problem for you, do not use the interface at this time. -** -** When the virtual-table mechanism stabilizes, we will declare the -** interface fixed, support it indefinitely, and remove this comment. -*/ - -/* -** CAPI3REF: A Handle To An Open BLOB -** KEYWORDS: {BLOB handle} {BLOB handles} -** -** An instance of this object represents an open BLOB on which -** [sqlite3_blob_open | incremental BLOB I/O] can be performed. -** ^Objects of this type are created by [sqlite3_blob_open()] -** and destroyed by [sqlite3_blob_close()]. -** ^The [sqlite3_blob_read()] and [sqlite3_blob_write()] interfaces -** can be used to read or write small subsections of the BLOB. -** ^The [sqlite3_blob_bytes()] interface returns the size of the BLOB in bytes. -*/ -typedef struct sqlite3_blob sqlite3_blob; - -/* -** CAPI3REF: Open A BLOB For Incremental I/O -** -** ^(This interfaces opens a [BLOB handle | handle] to the BLOB located -** in row iRow, column zColumn, table zTable in database zDb; -** in other words, the same BLOB that would be selected by: -** -**
-**     SELECT zColumn FROM zDb.zTable WHERE [rowid] = iRow;
-** 
)^ -** -** ^If the flags parameter is non-zero, then the BLOB is opened for read -** and write access. ^If it is zero, the BLOB is opened for read access. -** ^It is not possible to open a column that is part of an index or primary -** key for writing. ^If [foreign key constraints] are enabled, it is -** not possible to open a column that is part of a [child key] for writing. -** -** ^Note that the database name is not the filename that contains -** the database but rather the symbolic name of the database that -** appears after the AS keyword when the database is connected using [ATTACH]. -** ^For the main database file, the database name is "main". -** ^For TEMP tables, the database name is "temp". -** -** ^(On success, [SQLITE_OK] is returned and the new [BLOB handle] is written -** to *ppBlob. Otherwise an [error code] is returned and *ppBlob is set -** to be a null pointer.)^ -** ^This function sets the [database connection] error code and message -** accessible via [sqlite3_errcode()] and [sqlite3_errmsg()] and related -** functions. ^Note that the *ppBlob variable is always initialized in a -** way that makes it safe to invoke [sqlite3_blob_close()] on *ppBlob -** regardless of the success or failure of this routine. -** -** ^(If the row that a BLOB handle points to is modified by an -** [UPDATE], [DELETE], or by [ON CONFLICT] side-effects -** then the BLOB handle is marked as "expired". -** This is true if any column of the row is changed, even a column -** other than the one the BLOB handle is open on.)^ -** ^Calls to [sqlite3_blob_read()] and [sqlite3_blob_write()] for -** an expired BLOB handle fail with a return code of [SQLITE_ABORT]. -** ^(Changes written into a BLOB prior to the BLOB expiring are not -** rolled back by the expiration of the BLOB. Such changes will eventually -** commit if the transaction continues to completion.)^ -** -** ^Use the [sqlite3_blob_bytes()] interface to determine the size of -** the opened blob. ^The size of a blob may not be changed by this -** interface. Use the [UPDATE] SQL command to change the size of a -** blob. -** -** ^The [sqlite3_bind_zeroblob()] and [sqlite3_result_zeroblob()] interfaces -** and the built-in [zeroblob] SQL function can be used, if desired, -** to create an empty, zero-filled blob in which to read or write using -** this interface. -** -** To avoid a resource leak, every open [BLOB handle] should eventually -** be released by a call to [sqlite3_blob_close()]. -*/ -SQLITE_API int sqlite3_blob_open( - sqlite3*, - const char *zDb, - const char *zTable, - const char *zColumn, - sqlite3_int64 iRow, - int flags, - sqlite3_blob **ppBlob -); - -/* -** CAPI3REF: Move a BLOB Handle to a New Row -** -** ^This function is used to move an existing blob handle so that it points -** to a different row of the same database table. ^The new row is identified -** by the rowid value passed as the second argument. Only the row can be -** changed. ^The database, table and column on which the blob handle is open -** remain the same. Moving an existing blob handle to a new row can be -** faster than closing the existing handle and opening a new one. -** -** ^(The new row must meet the same criteria as for [sqlite3_blob_open()] - -** it must exist and there must be either a blob or text value stored in -** the nominated column.)^ ^If the new row is not present in the table, or if -** it does not contain a blob or text value, or if another error occurs, an -** SQLite error code is returned and the blob handle is considered aborted. -** ^All subsequent calls to [sqlite3_blob_read()], [sqlite3_blob_write()] or -** [sqlite3_blob_reopen()] on an aborted blob handle immediately return -** SQLITE_ABORT. ^Calling [sqlite3_blob_bytes()] on an aborted blob handle -** always returns zero. -** -** ^This function sets the database handle error code and message. -*/ -SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_blob_reopen(sqlite3_blob *, sqlite3_int64); - -/* -** CAPI3REF: Close A BLOB Handle -** -** ^Closes an open [BLOB handle]. -** -** ^Closing a BLOB shall cause the current transaction to commit -** if there are no other BLOBs, no pending prepared statements, and the -** database connection is in [autocommit mode]. -** ^If any writes were made to the BLOB, they might be held in cache -** until the close operation if they will fit. -** -** ^(Closing the BLOB often forces the changes -** out to disk and so if any I/O errors occur, they will likely occur -** at the time when the BLOB is closed. Any errors that occur during -** closing are reported as a non-zero return value.)^ -** -** ^(The BLOB is closed unconditionally. Even if this routine returns -** an error code, the BLOB is still closed.)^ -** -** ^Calling this routine with a null pointer (such as would be returned -** by a failed call to [sqlite3_blob_open()]) is a harmless no-op. -*/ -SQLITE_API int sqlite3_blob_close(sqlite3_blob *); - -/* -** CAPI3REF: Return The Size Of An Open BLOB -** -** ^Returns the size in bytes of the BLOB accessible via the -** successfully opened [BLOB handle] in its only argument. ^The -** incremental blob I/O routines can only read or overwriting existing -** blob content; they cannot change the size of a blob. -** -** This routine only works on a [BLOB handle] which has been created -** by a prior successful call to [sqlite3_blob_open()] and which has not -** been closed by [sqlite3_blob_close()]. Passing any other pointer in -** to this routine results in undefined and probably undesirable behavior. -*/ -SQLITE_API int sqlite3_blob_bytes(sqlite3_blob *); - -/* -** CAPI3REF: Read Data From A BLOB Incrementally -** -** ^(This function is used to read data from an open [BLOB handle] into a -** caller-supplied buffer. N bytes of data are copied into buffer Z -** from the open BLOB, starting at offset iOffset.)^ -** -** ^If offset iOffset is less than N bytes from the end of the BLOB, -** [SQLITE_ERROR] is returned and no data is read. ^If N or iOffset is -** less than zero, [SQLITE_ERROR] is returned and no data is read. -** ^The size of the blob (and hence the maximum value of N+iOffset) -** can be determined using the [sqlite3_blob_bytes()] interface. -** -** ^An attempt to read from an expired [BLOB handle] fails with an -** error code of [SQLITE_ABORT]. -** -** ^(On success, sqlite3_blob_read() returns SQLITE_OK. -** Otherwise, an [error code] or an [extended error code] is returned.)^ -** -** This routine only works on a [BLOB handle] which has been created -** by a prior successful call to [sqlite3_blob_open()] and which has not -** been closed by [sqlite3_blob_close()]. Passing any other pointer in -** to this routine results in undefined and probably undesirable behavior. -** -** See also: [sqlite3_blob_write()]. -*/ -SQLITE_API int sqlite3_blob_read(sqlite3_blob *, void *Z, int N, int iOffset); - -/* -** CAPI3REF: Write Data Into A BLOB Incrementally -** -** ^This function is used to write data into an open [BLOB handle] from a -** caller-supplied buffer. ^N bytes of data are copied from the buffer Z -** into the open BLOB, starting at offset iOffset. -** -** ^If the [BLOB handle] passed as the first argument was not opened for -** writing (the flags parameter to [sqlite3_blob_open()] was zero), -** this function returns [SQLITE_READONLY]. -** -** ^This function may only modify the contents of the BLOB; it is -** not possible to increase the size of a BLOB using this API. -** ^If offset iOffset is less than N bytes from the end of the BLOB, -** [SQLITE_ERROR] is returned and no data is written. ^If N is -** less than zero [SQLITE_ERROR] is returned and no data is written. -** The size of the BLOB (and hence the maximum value of N+iOffset) -** can be determined using the [sqlite3_blob_bytes()] interface. -** -** ^An attempt to write to an expired [BLOB handle] fails with an -** error code of [SQLITE_ABORT]. ^Writes to the BLOB that occurred -** before the [BLOB handle] expired are not rolled back by the -** expiration of the handle, though of course those changes might -** have been overwritten by the statement that expired the BLOB handle -** or by other independent statements. -** -** ^(On success, sqlite3_blob_write() returns SQLITE_OK. -** Otherwise, an [error code] or an [extended error code] is returned.)^ -** -** This routine only works on a [BLOB handle] which has been created -** by a prior successful call to [sqlite3_blob_open()] and which has not -** been closed by [sqlite3_blob_close()]. Passing any other pointer in -** to this routine results in undefined and probably undesirable behavior. -** -** See also: [sqlite3_blob_read()]. -*/ -SQLITE_API int sqlite3_blob_write(sqlite3_blob *, const void *z, int n, int iOffset); - -/* -** CAPI3REF: Virtual File System Objects -** -** A virtual filesystem (VFS) is an [sqlite3_vfs] object -** that SQLite uses to interact -** with the underlying operating system. Most SQLite builds come with a -** single default VFS that is appropriate for the host computer. -** New VFSes can be registered and existing VFSes can be unregistered. -** The following interfaces are provided. -** -** ^The sqlite3_vfs_find() interface returns a pointer to a VFS given its name. -** ^Names are case sensitive. -** ^Names are zero-terminated UTF-8 strings. -** ^If there is no match, a NULL pointer is returned. -** ^If zVfsName is NULL then the default VFS is returned. -** -** ^New VFSes are registered with sqlite3_vfs_register(). -** ^Each new VFS becomes the default VFS if the makeDflt flag is set. -** ^The same VFS can be registered multiple times without injury. -** ^To make an existing VFS into the default VFS, register it again -** with the makeDflt flag set. If two different VFSes with the -** same name are registered, the behavior is undefined. If a -** VFS is registered with a name that is NULL or an empty string, -** then the behavior is undefined. -** -** ^Unregister a VFS with the sqlite3_vfs_unregister() interface. -** ^(If the default VFS is unregistered, another VFS is chosen as -** the default. The choice for the new VFS is arbitrary.)^ -*/ -SQLITE_API sqlite3_vfs *sqlite3_vfs_find(const char *zVfsName); -SQLITE_API int sqlite3_vfs_register(sqlite3_vfs*, int makeDflt); -SQLITE_API int sqlite3_vfs_unregister(sqlite3_vfs*); - -/* -** CAPI3REF: Mutexes -** -** The SQLite core uses these routines for thread -** synchronization. Though they are intended for internal -** use by SQLite, code that links against SQLite is -** permitted to use any of these routines. -** -** The SQLite source code contains multiple implementations -** of these mutex routines. An appropriate implementation -** is selected automatically at compile-time. ^(The following -** implementations are available in the SQLite core: -** -**
    -**
  • SQLITE_MUTEX_OS2 -**
  • SQLITE_MUTEX_PTHREADS -**
  • SQLITE_MUTEX_W32 -**
  • SQLITE_MUTEX_NOOP -**
)^ -** -** ^The SQLITE_MUTEX_NOOP implementation is a set of routines -** that does no real locking and is appropriate for use in -** a single-threaded application. ^The SQLITE_MUTEX_OS2, -** SQLITE_MUTEX_PTHREADS, and SQLITE_MUTEX_W32 implementations -** are appropriate for use on OS/2, Unix, and Windows. -** -** ^(If SQLite is compiled with the SQLITE_MUTEX_APPDEF preprocessor -** macro defined (with "-DSQLITE_MUTEX_APPDEF=1"), then no mutex -** implementation is included with the library. In this case the -** application must supply a custom mutex implementation using the -** [SQLITE_CONFIG_MUTEX] option of the sqlite3_config() function -** before calling sqlite3_initialize() or any other public sqlite3_ -** function that calls sqlite3_initialize().)^ -** -** ^The sqlite3_mutex_alloc() routine allocates a new -** mutex and returns a pointer to it. ^If it returns NULL -** that means that a mutex could not be allocated. ^SQLite -** will unwind its stack and return an error. ^(The argument -** to sqlite3_mutex_alloc() is one of these integer constants: -** -**
    -**
  • SQLITE_MUTEX_FAST -**
  • SQLITE_MUTEX_RECURSIVE -**
  • SQLITE_MUTEX_STATIC_MASTER -**
  • SQLITE_MUTEX_STATIC_MEM -**
  • SQLITE_MUTEX_STATIC_MEM2 -**
  • SQLITE_MUTEX_STATIC_PRNG -**
  • SQLITE_MUTEX_STATIC_LRU -**
  • SQLITE_MUTEX_STATIC_LRU2 -**
)^ -** -** ^The first two constants (SQLITE_MUTEX_FAST and SQLITE_MUTEX_RECURSIVE) -** cause sqlite3_mutex_alloc() to create -** a new mutex. ^The new mutex is recursive when SQLITE_MUTEX_RECURSIVE -** is used but not necessarily so when SQLITE_MUTEX_FAST is used. -** The mutex implementation does not need to make a distinction -** between SQLITE_MUTEX_RECURSIVE and SQLITE_MUTEX_FAST if it does -** not want to. ^SQLite will only request a recursive mutex in -** cases where it really needs one. ^If a faster non-recursive mutex -** implementation is available on the host platform, the mutex subsystem -** might return such a mutex in response to SQLITE_MUTEX_FAST. -** -** ^The other allowed parameters to sqlite3_mutex_alloc() (anything other -** than SQLITE_MUTEX_FAST and SQLITE_MUTEX_RECURSIVE) each return -** a pointer to a static preexisting mutex. ^Six static mutexes are -** used by the current version of SQLite. Future versions of SQLite -** may add additional static mutexes. Static mutexes are for internal -** use by SQLite only. Applications that use SQLite mutexes should -** use only the dynamic mutexes returned by SQLITE_MUTEX_FAST or -** SQLITE_MUTEX_RECURSIVE. -** -** ^Note that if one of the dynamic mutex parameters (SQLITE_MUTEX_FAST -** or SQLITE_MUTEX_RECURSIVE) is used then sqlite3_mutex_alloc() -** returns a different mutex on every call. ^But for the static -** mutex types, the same mutex is returned on every call that has -** the same type number. -** -** ^The sqlite3_mutex_free() routine deallocates a previously -** allocated dynamic mutex. ^SQLite is careful to deallocate every -** dynamic mutex that it allocates. The dynamic mutexes must not be in -** use when they are deallocated. Attempting to deallocate a static -** mutex results in undefined behavior. ^SQLite never deallocates -** a static mutex. -** -** ^The sqlite3_mutex_enter() and sqlite3_mutex_try() routines attempt -** to enter a mutex. ^If another thread is already within the mutex, -** sqlite3_mutex_enter() will block and sqlite3_mutex_try() will return -** SQLITE_BUSY. ^The sqlite3_mutex_try() interface returns [SQLITE_OK] -** upon successful entry. ^(Mutexes created using -** SQLITE_MUTEX_RECURSIVE can be entered multiple times by the same thread. -** In such cases the, -** mutex must be exited an equal number of times before another thread -** can enter.)^ ^(If the same thread tries to enter any other -** kind of mutex more than once, the behavior is undefined. -** SQLite will never exhibit -** such behavior in its own use of mutexes.)^ -** -** ^(Some systems (for example, Windows 95) do not support the operation -** implemented by sqlite3_mutex_try(). On those systems, sqlite3_mutex_try() -** will always return SQLITE_BUSY. The SQLite core only ever uses -** sqlite3_mutex_try() as an optimization so this is acceptable behavior.)^ -** -** ^The sqlite3_mutex_leave() routine exits a mutex that was -** previously entered by the same thread. ^(The behavior -** is undefined if the mutex is not currently entered by the -** calling thread or is not currently allocated. SQLite will -** never do either.)^ -** -** ^If the argument to sqlite3_mutex_enter(), sqlite3_mutex_try(), or -** sqlite3_mutex_leave() is a NULL pointer, then all three routines -** behave as no-ops. -** -** See also: [sqlite3_mutex_held()] and [sqlite3_mutex_notheld()]. -*/ -SQLITE_API sqlite3_mutex *sqlite3_mutex_alloc(int); -SQLITE_API void sqlite3_mutex_free(sqlite3_mutex*); -SQLITE_API void sqlite3_mutex_enter(sqlite3_mutex*); -SQLITE_API int sqlite3_mutex_try(sqlite3_mutex*); -SQLITE_API void sqlite3_mutex_leave(sqlite3_mutex*); - -/* -** CAPI3REF: Mutex Methods Object -** -** An instance of this structure defines the low-level routines -** used to allocate and use mutexes. -** -** Usually, the default mutex implementations provided by SQLite are -** sufficient, however the user has the option of substituting a custom -** implementation for specialized deployments or systems for which SQLite -** does not provide a suitable implementation. In this case, the user -** creates and populates an instance of this structure to pass -** to sqlite3_config() along with the [SQLITE_CONFIG_MUTEX] option. -** Additionally, an instance of this structure can be used as an -** output variable when querying the system for the current mutex -** implementation, using the [SQLITE_CONFIG_GETMUTEX] option. -** -** ^The xMutexInit method defined by this structure is invoked as -** part of system initialization by the sqlite3_initialize() function. -** ^The xMutexInit routine is called by SQLite exactly once for each -** effective call to [sqlite3_initialize()]. -** -** ^The xMutexEnd method defined by this structure is invoked as -** part of system shutdown by the sqlite3_shutdown() function. The -** implementation of this method is expected to release all outstanding -** resources obtained by the mutex methods implementation, especially -** those obtained by the xMutexInit method. ^The xMutexEnd() -** interface is invoked exactly once for each call to [sqlite3_shutdown()]. -** -** ^(The remaining seven methods defined by this structure (xMutexAlloc, -** xMutexFree, xMutexEnter, xMutexTry, xMutexLeave, xMutexHeld and -** xMutexNotheld) implement the following interfaces (respectively): -** -**
    -**
  • [sqlite3_mutex_alloc()]
  • -**
  • [sqlite3_mutex_free()]
  • -**
  • [sqlite3_mutex_enter()]
  • -**
  • [sqlite3_mutex_try()]
  • -**
  • [sqlite3_mutex_leave()]
  • -**
  • [sqlite3_mutex_held()]
  • -**
  • [sqlite3_mutex_notheld()]
  • -**
)^ -** -** The only difference is that the public sqlite3_XXX functions enumerated -** above silently ignore any invocations that pass a NULL pointer instead -** of a valid mutex handle. The implementations of the methods defined -** by this structure are not required to handle this case, the results -** of passing a NULL pointer instead of a valid mutex handle are undefined -** (i.e. it is acceptable to provide an implementation that segfaults if -** it is passed a NULL pointer). -** -** The xMutexInit() method must be threadsafe. ^It must be harmless to -** invoke xMutexInit() multiple times within the same process and without -** intervening calls to xMutexEnd(). Second and subsequent calls to -** xMutexInit() must be no-ops. -** -** ^xMutexInit() must not use SQLite memory allocation ([sqlite3_malloc()] -** and its associates). ^Similarly, xMutexAlloc() must not use SQLite memory -** allocation for a static mutex. ^However xMutexAlloc() may use SQLite -** memory allocation for a fast or recursive mutex. -** -** ^SQLite will invoke the xMutexEnd() method when [sqlite3_shutdown()] is -** called, but only if the prior call to xMutexInit returned SQLITE_OK. -** If xMutexInit fails in any way, it is expected to clean up after itself -** prior to returning. -*/ -typedef struct sqlite3_mutex_methods sqlite3_mutex_methods; -struct sqlite3_mutex_methods { - int (*xMutexInit)(void); - int (*xMutexEnd)(void); - sqlite3_mutex *(*xMutexAlloc)(int); - void (*xMutexFree)(sqlite3_mutex *); - void (*xMutexEnter)(sqlite3_mutex *); - int (*xMutexTry)(sqlite3_mutex *); - void (*xMutexLeave)(sqlite3_mutex *); - int (*xMutexHeld)(sqlite3_mutex *); - int (*xMutexNotheld)(sqlite3_mutex *); -}; - -/* -** CAPI3REF: Mutex Verification Routines -** -** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routines -** are intended for use inside assert() statements. ^The SQLite core -** never uses these routines except inside an assert() and applications -** are advised to follow the lead of the core. ^The SQLite core only -** provides implementations for these routines when it is compiled -** with the SQLITE_DEBUG flag. ^External mutex implementations -** are only required to provide these routines if SQLITE_DEBUG is -** defined and if NDEBUG is not defined. -** -** ^These routines should return true if the mutex in their argument -** is held or not held, respectively, by the calling thread. -** -** ^The implementation is not required to provide versions of these -** routines that actually work. If the implementation does not provide working -** versions of these routines, it should at least provide stubs that always -** return true so that one does not get spurious assertion failures. -** -** ^If the argument to sqlite3_mutex_held() is a NULL pointer then -** the routine should return 1. This seems counter-intuitive since -** clearly the mutex cannot be held if it does not exist. But -** the reason the mutex does not exist is because the build is not -** using mutexes. And we do not want the assert() containing the -** call to sqlite3_mutex_held() to fail, so a non-zero return is -** the appropriate thing to do. ^The sqlite3_mutex_notheld() -** interface should also return 1 when given a NULL pointer. -*/ -#ifndef NDEBUG -SQLITE_API int sqlite3_mutex_held(sqlite3_mutex*); -SQLITE_API int sqlite3_mutex_notheld(sqlite3_mutex*); -#endif - -/* -** CAPI3REF: Mutex Types -** -** The [sqlite3_mutex_alloc()] interface takes a single argument -** which is one of these integer constants. -** -** The set of static mutexes may change from one SQLite release to the -** next. Applications that override the built-in mutex logic must be -** prepared to accommodate additional static mutexes. -*/ -#define SQLITE_MUTEX_FAST 0 -#define SQLITE_MUTEX_RECURSIVE 1 -#define SQLITE_MUTEX_STATIC_MASTER 2 -#define SQLITE_MUTEX_STATIC_MEM 3 /* sqlite3_malloc() */ -#define SQLITE_MUTEX_STATIC_MEM2 4 /* NOT USED */ -#define SQLITE_MUTEX_STATIC_OPEN 4 /* sqlite3BtreeOpen() */ -#define SQLITE_MUTEX_STATIC_PRNG 5 /* sqlite3_random() */ -#define SQLITE_MUTEX_STATIC_LRU 6 /* lru page list */ -#define SQLITE_MUTEX_STATIC_LRU2 7 /* NOT USED */ -#define SQLITE_MUTEX_STATIC_PMEM 7 /* sqlite3PageMalloc() */ - -/* -** CAPI3REF: Retrieve the mutex for a database connection -** -** ^This interface returns a pointer the [sqlite3_mutex] object that -** serializes access to the [database connection] given in the argument -** when the [threading mode] is Serialized. -** ^If the [threading mode] is Single-thread or Multi-thread then this -** routine returns a NULL pointer. -*/ -SQLITE_API sqlite3_mutex *sqlite3_db_mutex(sqlite3*); - -/* -** CAPI3REF: Low-Level Control Of Database Files -** -** ^The [sqlite3_file_control()] interface makes a direct call to the -** xFileControl method for the [sqlite3_io_methods] object associated -** with a particular database identified by the second argument. ^The -** name of the database is "main" for the main database or "temp" for the -** TEMP database, or the name that appears after the AS keyword for -** databases that are added using the [ATTACH] SQL command. -** ^A NULL pointer can be used in place of "main" to refer to the -** main database file. -** ^The third and fourth parameters to this routine -** are passed directly through to the second and third parameters of -** the xFileControl method. ^The return value of the xFileControl -** method becomes the return value of this routine. -** -** ^The SQLITE_FCNTL_FILE_POINTER value for the op parameter causes -** a pointer to the underlying [sqlite3_file] object to be written into -** the space pointed to by the 4th parameter. ^The SQLITE_FCNTL_FILE_POINTER -** case is a short-circuit path which does not actually invoke the -** underlying sqlite3_io_methods.xFileControl method. -** -** ^If the second parameter (zDbName) does not match the name of any -** open database file, then SQLITE_ERROR is returned. ^This error -** code is not remembered and will not be recalled by [sqlite3_errcode()] -** or [sqlite3_errmsg()]. The underlying xFileControl method might -** also return SQLITE_ERROR. There is no way to distinguish between -** an incorrect zDbName and an SQLITE_ERROR return from the underlying -** xFileControl method. -** -** See also: [SQLITE_FCNTL_LOCKSTATE] -*/ -SQLITE_API int sqlite3_file_control(sqlite3*, const char *zDbName, int op, void*); - -/* -** CAPI3REF: Testing Interface -** -** ^The sqlite3_test_control() interface is used to read out internal -** state of SQLite and to inject faults into SQLite for testing -** purposes. ^The first parameter is an operation code that determines -** the number, meaning, and operation of all subsequent parameters. -** -** This interface is not for use by applications. It exists solely -** for verifying the correct operation of the SQLite library. Depending -** on how the SQLite library is compiled, this interface might not exist. -** -** The details of the operation codes, their meanings, the parameters -** they take, and what they do are all subject to change without notice. -** Unlike most of the SQLite API, this function is not guaranteed to -** operate consistently from one release to the next. -*/ -SQLITE_API int sqlite3_test_control(int op, ...); - -/* -** CAPI3REF: Testing Interface Operation Codes -** -** These constants are the valid operation code parameters used -** as the first argument to [sqlite3_test_control()]. -** -** These parameters and their meanings are subject to change -** without notice. These values are for testing purposes only. -** Applications should not use any of these parameters or the -** [sqlite3_test_control()] interface. -*/ -#define SQLITE_TESTCTRL_FIRST 5 -#define SQLITE_TESTCTRL_PRNG_SAVE 5 -#define SQLITE_TESTCTRL_PRNG_RESTORE 6 -#define SQLITE_TESTCTRL_PRNG_RESET 7 -#define SQLITE_TESTCTRL_BITVEC_TEST 8 -#define SQLITE_TESTCTRL_FAULT_INSTALL 9 -#define SQLITE_TESTCTRL_BENIGN_MALLOC_HOOKS 10 -#define SQLITE_TESTCTRL_PENDING_BYTE 11 -#define SQLITE_TESTCTRL_ASSERT 12 -#define SQLITE_TESTCTRL_ALWAYS 13 -#define SQLITE_TESTCTRL_RESERVE 14 -#define SQLITE_TESTCTRL_OPTIMIZATIONS 15 -#define SQLITE_TESTCTRL_ISKEYWORD 16 -#define SQLITE_TESTCTRL_SCRATCHMALLOC 17 -#define SQLITE_TESTCTRL_LOCALTIME_FAULT 18 -#define SQLITE_TESTCTRL_EXPLAIN_STMT 19 -#define SQLITE_TESTCTRL_LAST 19 - -/* -** CAPI3REF: SQLite Runtime Status -** -** ^This interface is used to retrieve runtime status information -** about the performance of SQLite, and optionally to reset various -** highwater marks. ^The first argument is an integer code for -** the specific parameter to measure. ^(Recognized integer codes -** are of the form [status parameters | SQLITE_STATUS_...].)^ -** ^The current value of the parameter is returned into *pCurrent. -** ^The highest recorded value is returned in *pHighwater. ^If the -** resetFlag is true, then the highest record value is reset after -** *pHighwater is written. ^(Some parameters do not record the highest -** value. For those parameters -** nothing is written into *pHighwater and the resetFlag is ignored.)^ -** ^(Other parameters record only the highwater mark and not the current -** value. For these latter parameters nothing is written into *pCurrent.)^ -** -** ^The sqlite3_status() routine returns SQLITE_OK on success and a -** non-zero [error code] on failure. -** -** This routine is threadsafe but is not atomic. This routine can be -** called while other threads are running the same or different SQLite -** interfaces. However the values returned in *pCurrent and -** *pHighwater reflect the status of SQLite at different points in time -** and it is possible that another thread might change the parameter -** in between the times when *pCurrent and *pHighwater are written. -** -** See also: [sqlite3_db_status()] -*/ -SQLITE_API int sqlite3_status(int op, int *pCurrent, int *pHighwater, int resetFlag); - - -/* -** CAPI3REF: Status Parameters -** KEYWORDS: {status parameters} -** -** These integer constants designate various run-time status parameters -** that can be returned by [sqlite3_status()]. -** -**
-** [[SQLITE_STATUS_MEMORY_USED]] ^(
SQLITE_STATUS_MEMORY_USED
-**
This parameter is the current amount of memory checked out -** using [sqlite3_malloc()], either directly or indirectly. The -** figure includes calls made to [sqlite3_malloc()] by the application -** and internal memory usage by the SQLite library. Scratch memory -** controlled by [SQLITE_CONFIG_SCRATCH] and auxiliary page-cache -** memory controlled by [SQLITE_CONFIG_PAGECACHE] is not included in -** this parameter. The amount returned is the sum of the allocation -** sizes as reported by the xSize method in [sqlite3_mem_methods].
)^ -** -** [[SQLITE_STATUS_MALLOC_SIZE]] ^(
SQLITE_STATUS_MALLOC_SIZE
-**
This parameter records the largest memory allocation request -** handed to [sqlite3_malloc()] or [sqlite3_realloc()] (or their -** internal equivalents). Only the value returned in the -** *pHighwater parameter to [sqlite3_status()] is of interest. -** The value written into the *pCurrent parameter is undefined.
)^ -** -** [[SQLITE_STATUS_MALLOC_COUNT]] ^(
SQLITE_STATUS_MALLOC_COUNT
-**
This parameter records the number of separate memory allocations -** currently checked out.
)^ -** -** [[SQLITE_STATUS_PAGECACHE_USED]] ^(
SQLITE_STATUS_PAGECACHE_USED
-**
This parameter returns the number of pages used out of the -** [pagecache memory allocator] that was configured using -** [SQLITE_CONFIG_PAGECACHE]. The -** value returned is in pages, not in bytes.
)^ -** -** [[SQLITE_STATUS_PAGECACHE_OVERFLOW]] -** ^(
SQLITE_STATUS_PAGECACHE_OVERFLOW
-**
This parameter returns the number of bytes of page cache -** allocation which could not be satisfied by the [SQLITE_CONFIG_PAGECACHE] -** buffer and where forced to overflow to [sqlite3_malloc()]. The -** returned value includes allocations that overflowed because they -** where too large (they were larger than the "sz" parameter to -** [SQLITE_CONFIG_PAGECACHE]) and allocations that overflowed because -** no space was left in the page cache.
)^ -** -** [[SQLITE_STATUS_PAGECACHE_SIZE]] ^(
SQLITE_STATUS_PAGECACHE_SIZE
-**
This parameter records the largest memory allocation request -** handed to [pagecache memory allocator]. Only the value returned in the -** *pHighwater parameter to [sqlite3_status()] is of interest. -** The value written into the *pCurrent parameter is undefined.
)^ -** -** [[SQLITE_STATUS_SCRATCH_USED]] ^(
SQLITE_STATUS_SCRATCH_USED
-**
This parameter returns the number of allocations used out of the -** [scratch memory allocator] configured using -** [SQLITE_CONFIG_SCRATCH]. The value returned is in allocations, not -** in bytes. Since a single thread may only have one scratch allocation -** outstanding at time, this parameter also reports the number of threads -** using scratch memory at the same time.
)^ -** -** [[SQLITE_STATUS_SCRATCH_OVERFLOW]] ^(
SQLITE_STATUS_SCRATCH_OVERFLOW
-**
This parameter returns the number of bytes of scratch memory -** allocation which could not be satisfied by the [SQLITE_CONFIG_SCRATCH] -** buffer and where forced to overflow to [sqlite3_malloc()]. The values -** returned include overflows because the requested allocation was too -** larger (that is, because the requested allocation was larger than the -** "sz" parameter to [SQLITE_CONFIG_SCRATCH]) and because no scratch buffer -** slots were available. -**
)^ -** -** [[SQLITE_STATUS_SCRATCH_SIZE]] ^(
SQLITE_STATUS_SCRATCH_SIZE
-**
This parameter records the largest memory allocation request -** handed to [scratch memory allocator]. Only the value returned in the -** *pHighwater parameter to [sqlite3_status()] is of interest. -** The value written into the *pCurrent parameter is undefined.
)^ -** -** [[SQLITE_STATUS_PARSER_STACK]] ^(
SQLITE_STATUS_PARSER_STACK
-**
This parameter records the deepest parser stack. It is only -** meaningful if SQLite is compiled with [YYTRACKMAXSTACKDEPTH].
)^ -**
-** -** New status parameters may be added from time to time. -*/ -#define SQLITE_STATUS_MEMORY_USED 0 -#define SQLITE_STATUS_PAGECACHE_USED 1 -#define SQLITE_STATUS_PAGECACHE_OVERFLOW 2 -#define SQLITE_STATUS_SCRATCH_USED 3 -#define SQLITE_STATUS_SCRATCH_OVERFLOW 4 -#define SQLITE_STATUS_MALLOC_SIZE 5 -#define SQLITE_STATUS_PARSER_STACK 6 -#define SQLITE_STATUS_PAGECACHE_SIZE 7 -#define SQLITE_STATUS_SCRATCH_SIZE 8 -#define SQLITE_STATUS_MALLOC_COUNT 9 - -/* -** CAPI3REF: Database Connection Status -** -** ^This interface is used to retrieve runtime status information -** about a single [database connection]. ^The first argument is the -** database connection object to be interrogated. ^The second argument -** is an integer constant, taken from the set of -** [SQLITE_DBSTATUS options], that -** determines the parameter to interrogate. The set of -** [SQLITE_DBSTATUS options] is likely -** to grow in future releases of SQLite. -** -** ^The current value of the requested parameter is written into *pCur -** and the highest instantaneous value is written into *pHiwtr. ^If -** the resetFlg is true, then the highest instantaneous value is -** reset back down to the current value. -** -** ^The sqlite3_db_status() routine returns SQLITE_OK on success and a -** non-zero [error code] on failure. -** -** See also: [sqlite3_status()] and [sqlite3_stmt_status()]. -*/ -SQLITE_API int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int resetFlg); - -/* -** CAPI3REF: Status Parameters for database connections -** KEYWORDS: {SQLITE_DBSTATUS options} -** -** These constants are the available integer "verbs" that can be passed as -** the second argument to the [sqlite3_db_status()] interface. -** -** New verbs may be added in future releases of SQLite. Existing verbs -** might be discontinued. Applications should check the return code from -** [sqlite3_db_status()] to make sure that the call worked. -** The [sqlite3_db_status()] interface will return a non-zero error code -** if a discontinued or unsupported verb is invoked. -** -**
-** [[SQLITE_DBSTATUS_LOOKASIDE_USED]] ^(
SQLITE_DBSTATUS_LOOKASIDE_USED
-**
This parameter returns the number of lookaside memory slots currently -** checked out.
)^ -** -** [[SQLITE_DBSTATUS_LOOKASIDE_HIT]] ^(
SQLITE_DBSTATUS_LOOKASIDE_HIT
-**
This parameter returns the number malloc attempts that were -** satisfied using lookaside memory. Only the high-water value is meaningful; -** the current value is always zero.)^ -** -** [[SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE]] -** ^(
SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE
-**
This parameter returns the number malloc attempts that might have -** been satisfied using lookaside memory but failed due to the amount of -** memory requested being larger than the lookaside slot size. -** Only the high-water value is meaningful; -** the current value is always zero.)^ -** -** [[SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL]] -** ^(
SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL
-**
This parameter returns the number malloc attempts that might have -** been satisfied using lookaside memory but failed due to all lookaside -** memory already being in use. -** Only the high-water value is meaningful; -** the current value is always zero.)^ -** -** [[SQLITE_DBSTATUS_CACHE_USED]] ^(
SQLITE_DBSTATUS_CACHE_USED
-**
This parameter returns the approximate number of of bytes of heap -** memory used by all pager caches associated with the database connection.)^ -** ^The highwater mark associated with SQLITE_DBSTATUS_CACHE_USED is always 0. -** -** [[SQLITE_DBSTATUS_SCHEMA_USED]] ^(
SQLITE_DBSTATUS_SCHEMA_USED
-**
This parameter returns the approximate number of of bytes of heap -** memory used to store the schema for all databases associated -** with the connection - main, temp, and any [ATTACH]-ed databases.)^ -** ^The full amount of memory used by the schemas is reported, even if the -** schema memory is shared with other database connections due to -** [shared cache mode] being enabled. -** ^The highwater mark associated with SQLITE_DBSTATUS_SCHEMA_USED is always 0. -** -** [[SQLITE_DBSTATUS_STMT_USED]] ^(
SQLITE_DBSTATUS_STMT_USED
-**
This parameter returns the approximate number of of bytes of heap -** and lookaside memory used by all prepared statements associated with -** the database connection.)^ -** ^The highwater mark associated with SQLITE_DBSTATUS_STMT_USED is always 0. -**
-** -** [[SQLITE_DBSTATUS_CACHE_HIT]] ^(
SQLITE_DBSTATUS_CACHE_HIT
-**
This parameter returns the number of pager cache hits that have -** occurred.)^ ^The highwater mark associated with SQLITE_DBSTATUS_CACHE_HIT -** is always 0. -**
-** -** [[SQLITE_DBSTATUS_CACHE_MISS]] ^(
SQLITE_DBSTATUS_CACHE_MISS
-**
This parameter returns the number of pager cache misses that have -** occurred.)^ ^The highwater mark associated with SQLITE_DBSTATUS_CACHE_MISS -** is always 0. -**
-** -** [[SQLITE_DBSTATUS_CACHE_WRITE]] ^(
SQLITE_DBSTATUS_CACHE_WRITE
-**
This parameter returns the number of dirty cache entries that have -** been written to disk. Specifically, the number of pages written to the -** wal file in wal mode databases, or the number of pages written to the -** database file in rollback mode databases. Any pages written as part of -** transaction rollback or database recovery operations are not included. -** If an IO or other error occurs while writing a page to disk, the effect -** on subsequent SQLITE_DBSTATUS_CACHE_WRITE requests is undefined.)^ ^The -** highwater mark associated with SQLITE_DBSTATUS_CACHE_WRITE is always 0. -**
-**
-*/ -#define SQLITE_DBSTATUS_LOOKASIDE_USED 0 -#define SQLITE_DBSTATUS_CACHE_USED 1 -#define SQLITE_DBSTATUS_SCHEMA_USED 2 -#define SQLITE_DBSTATUS_STMT_USED 3 -#define SQLITE_DBSTATUS_LOOKASIDE_HIT 4 -#define SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE 5 -#define SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL 6 -#define SQLITE_DBSTATUS_CACHE_HIT 7 -#define SQLITE_DBSTATUS_CACHE_MISS 8 -#define SQLITE_DBSTATUS_CACHE_WRITE 9 -#define SQLITE_DBSTATUS_MAX 9 /* Largest defined DBSTATUS */ - - -/* -** CAPI3REF: Prepared Statement Status -** -** ^(Each prepared statement maintains various -** [SQLITE_STMTSTATUS counters] that measure the number -** of times it has performed specific operations.)^ These counters can -** be used to monitor the performance characteristics of the prepared -** statements. For example, if the number of table steps greatly exceeds -** the number of table searches or result rows, that would tend to indicate -** that the prepared statement is using a full table scan rather than -** an index. -** -** ^(This interface is used to retrieve and reset counter values from -** a [prepared statement]. The first argument is the prepared statement -** object to be interrogated. The second argument -** is an integer code for a specific [SQLITE_STMTSTATUS counter] -** to be interrogated.)^ -** ^The current value of the requested counter is returned. -** ^If the resetFlg is true, then the counter is reset to zero after this -** interface call returns. -** -** See also: [sqlite3_status()] and [sqlite3_db_status()]. -*/ -SQLITE_API int sqlite3_stmt_status(sqlite3_stmt*, int op,int resetFlg); - -/* -** CAPI3REF: Status Parameters for prepared statements -** KEYWORDS: {SQLITE_STMTSTATUS counter} {SQLITE_STMTSTATUS counters} -** -** These preprocessor macros define integer codes that name counter -** values associated with the [sqlite3_stmt_status()] interface. -** The meanings of the various counters are as follows: -** -**
-** [[SQLITE_STMTSTATUS_FULLSCAN_STEP]]
SQLITE_STMTSTATUS_FULLSCAN_STEP
-**
^This is the number of times that SQLite has stepped forward in -** a table as part of a full table scan. Large numbers for this counter -** may indicate opportunities for performance improvement through -** careful use of indices.
-** -** [[SQLITE_STMTSTATUS_SORT]]
SQLITE_STMTSTATUS_SORT
-**
^This is the number of sort operations that have occurred. -** A non-zero value in this counter may indicate an opportunity to -** improvement performance through careful use of indices.
-** -** [[SQLITE_STMTSTATUS_AUTOINDEX]]
SQLITE_STMTSTATUS_AUTOINDEX
-**
^This is the number of rows inserted into transient indices that -** were created automatically in order to help joins run faster. -** A non-zero value in this counter may indicate an opportunity to -** improvement performance by adding permanent indices that do not -** need to be reinitialized each time the statement is run.
-**
-*/ -#define SQLITE_STMTSTATUS_FULLSCAN_STEP 1 -#define SQLITE_STMTSTATUS_SORT 2 -#define SQLITE_STMTSTATUS_AUTOINDEX 3 - -/* -** CAPI3REF: Custom Page Cache Object -** -** The sqlite3_pcache type is opaque. It is implemented by -** the pluggable module. The SQLite core has no knowledge of -** its size or internal structure and never deals with the -** sqlite3_pcache object except by holding and passing pointers -** to the object. -** -** See [sqlite3_pcache_methods2] for additional information. -*/ -typedef struct sqlite3_pcache sqlite3_pcache; - -/* -** CAPI3REF: Custom Page Cache Object -** -** The sqlite3_pcache_page object represents a single page in the -** page cache. The page cache will allocate instances of this -** object. Various methods of the page cache use pointers to instances -** of this object as parameters or as their return value. -** -** See [sqlite3_pcache_methods2] for additional information. -*/ -typedef struct sqlite3_pcache_page sqlite3_pcache_page; -struct sqlite3_pcache_page { - void *pBuf; /* The content of the page */ - void *pExtra; /* Extra information associated with the page */ -}; - -/* -** CAPI3REF: Application Defined Page Cache. -** KEYWORDS: {page cache} -** -** ^(The [sqlite3_config]([SQLITE_CONFIG_PCACHE2], ...) interface can -** register an alternative page cache implementation by passing in an -** instance of the sqlite3_pcache_methods2 structure.)^ -** In many applications, most of the heap memory allocated by -** SQLite is used for the page cache. -** By implementing a -** custom page cache using this API, an application can better control -** the amount of memory consumed by SQLite, the way in which -** that memory is allocated and released, and the policies used to -** determine exactly which parts of a database file are cached and for -** how long. -** -** The alternative page cache mechanism is an -** extreme measure that is only needed by the most demanding applications. -** The built-in page cache is recommended for most uses. -** -** ^(The contents of the sqlite3_pcache_methods2 structure are copied to an -** internal buffer by SQLite within the call to [sqlite3_config]. Hence -** the application may discard the parameter after the call to -** [sqlite3_config()] returns.)^ -** -** [[the xInit() page cache method]] -** ^(The xInit() method is called once for each effective -** call to [sqlite3_initialize()])^ -** (usually only once during the lifetime of the process). ^(The xInit() -** method is passed a copy of the sqlite3_pcache_methods2.pArg value.)^ -** The intent of the xInit() method is to set up global data structures -** required by the custom page cache implementation. -** ^(If the xInit() method is NULL, then the -** built-in default page cache is used instead of the application defined -** page cache.)^ -** -** [[the xShutdown() page cache method]] -** ^The xShutdown() method is called by [sqlite3_shutdown()]. -** It can be used to clean up -** any outstanding resources before process shutdown, if required. -** ^The xShutdown() method may be NULL. -** -** ^SQLite automatically serializes calls to the xInit method, -** so the xInit method need not be threadsafe. ^The -** xShutdown method is only called from [sqlite3_shutdown()] so it does -** not need to be threadsafe either. All other methods must be threadsafe -** in multithreaded applications. -** -** ^SQLite will never invoke xInit() more than once without an intervening -** call to xShutdown(). -** -** [[the xCreate() page cache methods]] -** ^SQLite invokes the xCreate() method to construct a new cache instance. -** SQLite will typically create one cache instance for each open database file, -** though this is not guaranteed. ^The -** first parameter, szPage, is the size in bytes of the pages that must -** be allocated by the cache. ^szPage will always a power of two. ^The -** second parameter szExtra is a number of bytes of extra storage -** associated with each page cache entry. ^The szExtra parameter will -** a number less than 250. SQLite will use the -** extra szExtra bytes on each page to store metadata about the underlying -** database page on disk. The value passed into szExtra depends -** on the SQLite version, the target platform, and how SQLite was compiled. -** ^The third argument to xCreate(), bPurgeable, is true if the cache being -** created will be used to cache database pages of a file stored on disk, or -** false if it is used for an in-memory database. The cache implementation -** does not have to do anything special based with the value of bPurgeable; -** it is purely advisory. ^On a cache where bPurgeable is false, SQLite will -** never invoke xUnpin() except to deliberately delete a page. -** ^In other words, calls to xUnpin() on a cache with bPurgeable set to -** false will always have the "discard" flag set to true. -** ^Hence, a cache created with bPurgeable false will -** never contain any unpinned pages. -** -** [[the xCachesize() page cache method]] -** ^(The xCachesize() method may be called at any time by SQLite to set the -** suggested maximum cache-size (number of pages stored by) the cache -** instance passed as the first argument. This is the value configured using -** the SQLite "[PRAGMA cache_size]" command.)^ As with the bPurgeable -** parameter, the implementation is not required to do anything with this -** value; it is advisory only. -** -** [[the xPagecount() page cache methods]] -** The xPagecount() method must return the number of pages currently -** stored in the cache, both pinned and unpinned. -** -** [[the xFetch() page cache methods]] -** The xFetch() method locates a page in the cache and returns a pointer to -** an sqlite3_pcache_page object associated with that page, or a NULL pointer. -** The pBuf element of the returned sqlite3_pcache_page object will be a -** pointer to a buffer of szPage bytes used to store the content of a -** single database page. The pExtra element of sqlite3_pcache_page will be -** a pointer to the szExtra bytes of extra storage that SQLite has requested -** for each entry in the page cache. -** -** The page to be fetched is determined by the key. ^The minimum key value -** is 1. After it has been retrieved using xFetch, the page is considered -** to be "pinned". -** -** If the requested page is already in the page cache, then the page cache -** implementation must return a pointer to the page buffer with its content -** intact. If the requested page is not already in the cache, then the -** cache implementation should use the value of the createFlag -** parameter to help it determined what action to take: -** -** -**
createFlag Behaviour when page is not already in cache -**
0 Do not allocate a new page. Return NULL. -**
1 Allocate a new page if it easy and convenient to do so. -** Otherwise return NULL. -**
2 Make every effort to allocate a new page. Only return -** NULL if allocating a new page is effectively impossible. -**
-** -** ^(SQLite will normally invoke xFetch() with a createFlag of 0 or 1. SQLite -** will only use a createFlag of 2 after a prior call with a createFlag of 1 -** failed.)^ In between the to xFetch() calls, SQLite may -** attempt to unpin one or more cache pages by spilling the content of -** pinned pages to disk and synching the operating system disk cache. -** -** [[the xUnpin() page cache method]] -** ^xUnpin() is called by SQLite with a pointer to a currently pinned page -** as its second argument. If the third parameter, discard, is non-zero, -** then the page must be evicted from the cache. -** ^If the discard parameter is -** zero, then the page may be discarded or retained at the discretion of -** page cache implementation. ^The page cache implementation -** may choose to evict unpinned pages at any time. -** -** The cache must not perform any reference counting. A single -** call to xUnpin() unpins the page regardless of the number of prior calls -** to xFetch(). -** -** [[the xRekey() page cache methods]] -** The xRekey() method is used to change the key value associated with the -** page passed as the second argument. If the cache -** previously contains an entry associated with newKey, it must be -** discarded. ^Any prior cache entry associated with newKey is guaranteed not -** to be pinned. -** -** When SQLite calls the xTruncate() method, the cache must discard all -** existing cache entries with page numbers (keys) greater than or equal -** to the value of the iLimit parameter passed to xTruncate(). If any -** of these pages are pinned, they are implicitly unpinned, meaning that -** they can be safely discarded. -** -** [[the xDestroy() page cache method]] -** ^The xDestroy() method is used to delete a cache allocated by xCreate(). -** All resources associated with the specified cache should be freed. ^After -** calling the xDestroy() method, SQLite considers the [sqlite3_pcache*] -** handle invalid, and will not use it with any other sqlite3_pcache_methods2 -** functions. -** -** [[the xShrink() page cache method]] -** ^SQLite invokes the xShrink() method when it wants the page cache to -** free up as much of heap memory as possible. The page cache implementation -** is not obligated to free any memory, but well-behaved implementations should -** do their best. -*/ -typedef struct sqlite3_pcache_methods2 sqlite3_pcache_methods2; -struct sqlite3_pcache_methods2 { - int iVersion; - void *pArg; - int (*xInit)(void*); - void (*xShutdown)(void*); - sqlite3_pcache *(*xCreate)(int szPage, int szExtra, int bPurgeable); - void (*xCachesize)(sqlite3_pcache*, int nCachesize); - int (*xPagecount)(sqlite3_pcache*); - sqlite3_pcache_page *(*xFetch)(sqlite3_pcache*, unsigned key, int createFlag); - void (*xUnpin)(sqlite3_pcache*, sqlite3_pcache_page*, int discard); - void (*xRekey)(sqlite3_pcache*, sqlite3_pcache_page*, - unsigned oldKey, unsigned newKey); - void (*xTruncate)(sqlite3_pcache*, unsigned iLimit); - void (*xDestroy)(sqlite3_pcache*); - void (*xShrink)(sqlite3_pcache*); -}; - -/* -** This is the obsolete pcache_methods object that has now been replaced -** by sqlite3_pcache_methods2. This object is not used by SQLite. It is -** retained in the header file for backwards compatibility only. -*/ -typedef struct sqlite3_pcache_methods sqlite3_pcache_methods; -struct sqlite3_pcache_methods { - void *pArg; - int (*xInit)(void*); - void (*xShutdown)(void*); - sqlite3_pcache *(*xCreate)(int szPage, int bPurgeable); - void (*xCachesize)(sqlite3_pcache*, int nCachesize); - int (*xPagecount)(sqlite3_pcache*); - void *(*xFetch)(sqlite3_pcache*, unsigned key, int createFlag); - void (*xUnpin)(sqlite3_pcache*, void*, int discard); - void (*xRekey)(sqlite3_pcache*, void*, unsigned oldKey, unsigned newKey); - void (*xTruncate)(sqlite3_pcache*, unsigned iLimit); - void (*xDestroy)(sqlite3_pcache*); -}; - - -/* -** CAPI3REF: Online Backup Object -** -** The sqlite3_backup object records state information about an ongoing -** online backup operation. ^The sqlite3_backup object is created by -** a call to [sqlite3_backup_init()] and is destroyed by a call to -** [sqlite3_backup_finish()]. -** -** See Also: [Using the SQLite Online Backup API] -*/ -typedef struct sqlite3_backup sqlite3_backup; - -/* -** CAPI3REF: Online Backup API. -** -** The backup API copies the content of one database into another. -** It is useful either for creating backups of databases or -** for copying in-memory databases to or from persistent files. -** -** See Also: [Using the SQLite Online Backup API] -** -** ^SQLite holds a write transaction open on the destination database file -** for the duration of the backup operation. -** ^The source database is read-locked only while it is being read; -** it is not locked continuously for the entire backup operation. -** ^Thus, the backup may be performed on a live source database without -** preventing other database connections from -** reading or writing to the source database while the backup is underway. -** -** ^(To perform a backup operation: -**
    -**
  1. sqlite3_backup_init() is called once to initialize the -** backup, -**
  2. sqlite3_backup_step() is called one or more times to transfer -** the data between the two databases, and finally -**
  3. sqlite3_backup_finish() is called to release all resources -** associated with the backup operation. -**
)^ -** There should be exactly one call to sqlite3_backup_finish() for each -** successful call to sqlite3_backup_init(). -** -** [[sqlite3_backup_init()]] sqlite3_backup_init() -** -** ^The D and N arguments to sqlite3_backup_init(D,N,S,M) are the -** [database connection] associated with the destination database -** and the database name, respectively. -** ^The database name is "main" for the main database, "temp" for the -** temporary database, or the name specified after the AS keyword in -** an [ATTACH] statement for an attached database. -** ^The S and M arguments passed to -** sqlite3_backup_init(D,N,S,M) identify the [database connection] -** and database name of the source database, respectively. -** ^The source and destination [database connections] (parameters S and D) -** must be different or else sqlite3_backup_init(D,N,S,M) will fail with -** an error. -** -** ^If an error occurs within sqlite3_backup_init(D,N,S,M), then NULL is -** returned and an error code and error message are stored in the -** destination [database connection] D. -** ^The error code and message for the failed call to sqlite3_backup_init() -** can be retrieved using the [sqlite3_errcode()], [sqlite3_errmsg()], and/or -** [sqlite3_errmsg16()] functions. -** ^A successful call to sqlite3_backup_init() returns a pointer to an -** [sqlite3_backup] object. -** ^The [sqlite3_backup] object may be used with the sqlite3_backup_step() and -** sqlite3_backup_finish() functions to perform the specified backup -** operation. -** -** [[sqlite3_backup_step()]] sqlite3_backup_step() -** -** ^Function sqlite3_backup_step(B,N) will copy up to N pages between -** the source and destination databases specified by [sqlite3_backup] object B. -** ^If N is negative, all remaining source pages are copied. -** ^If sqlite3_backup_step(B,N) successfully copies N pages and there -** are still more pages to be copied, then the function returns [SQLITE_OK]. -** ^If sqlite3_backup_step(B,N) successfully finishes copying all pages -** from source to destination, then it returns [SQLITE_DONE]. -** ^If an error occurs while running sqlite3_backup_step(B,N), -** then an [error code] is returned. ^As well as [SQLITE_OK] and -** [SQLITE_DONE], a call to sqlite3_backup_step() may return [SQLITE_READONLY], -** [SQLITE_NOMEM], [SQLITE_BUSY], [SQLITE_LOCKED], or an -** [SQLITE_IOERR_ACCESS | SQLITE_IOERR_XXX] extended error code. -** -** ^(The sqlite3_backup_step() might return [SQLITE_READONLY] if -**
    -**
  1. the destination database was opened read-only, or -**
  2. the destination database is using write-ahead-log journaling -** and the destination and source page sizes differ, or -**
  3. the destination database is an in-memory database and the -** destination and source page sizes differ. -**
)^ -** -** ^If sqlite3_backup_step() cannot obtain a required file-system lock, then -** the [sqlite3_busy_handler | busy-handler function] -** is invoked (if one is specified). ^If the -** busy-handler returns non-zero before the lock is available, then -** [SQLITE_BUSY] is returned to the caller. ^In this case the call to -** sqlite3_backup_step() can be retried later. ^If the source -** [database connection] -** is being used to write to the source database when sqlite3_backup_step() -** is called, then [SQLITE_LOCKED] is returned immediately. ^Again, in this -** case the call to sqlite3_backup_step() can be retried later on. ^(If -** [SQLITE_IOERR_ACCESS | SQLITE_IOERR_XXX], [SQLITE_NOMEM], or -** [SQLITE_READONLY] is returned, then -** there is no point in retrying the call to sqlite3_backup_step(). These -** errors are considered fatal.)^ The application must accept -** that the backup operation has failed and pass the backup operation handle -** to the sqlite3_backup_finish() to release associated resources. -** -** ^The first call to sqlite3_backup_step() obtains an exclusive lock -** on the destination file. ^The exclusive lock is not released until either -** sqlite3_backup_finish() is called or the backup operation is complete -** and sqlite3_backup_step() returns [SQLITE_DONE]. ^Every call to -** sqlite3_backup_step() obtains a [shared lock] on the source database that -** lasts for the duration of the sqlite3_backup_step() call. -** ^Because the source database is not locked between calls to -** sqlite3_backup_step(), the source database may be modified mid-way -** through the backup process. ^If the source database is modified by an -** external process or via a database connection other than the one being -** used by the backup operation, then the backup will be automatically -** restarted by the next call to sqlite3_backup_step(). ^If the source -** database is modified by the using the same database connection as is used -** by the backup operation, then the backup database is automatically -** updated at the same time. -** -** [[sqlite3_backup_finish()]] sqlite3_backup_finish() -** -** When sqlite3_backup_step() has returned [SQLITE_DONE], or when the -** application wishes to abandon the backup operation, the application -** should destroy the [sqlite3_backup] by passing it to sqlite3_backup_finish(). -** ^The sqlite3_backup_finish() interfaces releases all -** resources associated with the [sqlite3_backup] object. -** ^If sqlite3_backup_step() has not yet returned [SQLITE_DONE], then any -** active write-transaction on the destination database is rolled back. -** The [sqlite3_backup] object is invalid -** and may not be used following a call to sqlite3_backup_finish(). -** -** ^The value returned by sqlite3_backup_finish is [SQLITE_OK] if no -** sqlite3_backup_step() errors occurred, regardless or whether or not -** sqlite3_backup_step() completed. -** ^If an out-of-memory condition or IO error occurred during any prior -** sqlite3_backup_step() call on the same [sqlite3_backup] object, then -** sqlite3_backup_finish() returns the corresponding [error code]. -** -** ^A return of [SQLITE_BUSY] or [SQLITE_LOCKED] from sqlite3_backup_step() -** is not a permanent error and does not affect the return value of -** sqlite3_backup_finish(). -** -** [[sqlite3_backup__remaining()]] [[sqlite3_backup_pagecount()]] -** sqlite3_backup_remaining() and sqlite3_backup_pagecount() -** -** ^Each call to sqlite3_backup_step() sets two values inside -** the [sqlite3_backup] object: the number of pages still to be backed -** up and the total number of pages in the source database file. -** The sqlite3_backup_remaining() and sqlite3_backup_pagecount() interfaces -** retrieve these two values, respectively. -** -** ^The values returned by these functions are only updated by -** sqlite3_backup_step(). ^If the source database is modified during a backup -** operation, then the values are not updated to account for any extra -** pages that need to be updated or the size of the source database file -** changing. -** -** Concurrent Usage of Database Handles -** -** ^The source [database connection] may be used by the application for other -** purposes while a backup operation is underway or being initialized. -** ^If SQLite is compiled and configured to support threadsafe database -** connections, then the source database connection may be used concurrently -** from within other threads. -** -** However, the application must guarantee that the destination -** [database connection] is not passed to any other API (by any thread) after -** sqlite3_backup_init() is called and before the corresponding call to -** sqlite3_backup_finish(). SQLite does not currently check to see -** if the application incorrectly accesses the destination [database connection] -** and so no error code is reported, but the operations may malfunction -** nevertheless. Use of the destination database connection while a -** backup is in progress might also also cause a mutex deadlock. -** -** If running in [shared cache mode], the application must -** guarantee that the shared cache used by the destination database -** is not accessed while the backup is running. In practice this means -** that the application must guarantee that the disk file being -** backed up to is not accessed by any connection within the process, -** not just the specific connection that was passed to sqlite3_backup_init(). -** -** The [sqlite3_backup] object itself is partially threadsafe. Multiple -** threads may safely make multiple concurrent calls to sqlite3_backup_step(). -** However, the sqlite3_backup_remaining() and sqlite3_backup_pagecount() -** APIs are not strictly speaking threadsafe. If they are invoked at the -** same time as another thread is invoking sqlite3_backup_step() it is -** possible that they return invalid values. -*/ -SQLITE_API sqlite3_backup *sqlite3_backup_init( - sqlite3 *pDest, /* Destination database handle */ - const char *zDestName, /* Destination database name */ - sqlite3 *pSource, /* Source database handle */ - const char *zSourceName /* Source database name */ -); -SQLITE_API int sqlite3_backup_step(sqlite3_backup *p, int nPage); -SQLITE_API int sqlite3_backup_finish(sqlite3_backup *p); -SQLITE_API int sqlite3_backup_remaining(sqlite3_backup *p); -SQLITE_API int sqlite3_backup_pagecount(sqlite3_backup *p); - -/* -** CAPI3REF: Unlock Notification -** -** ^When running in shared-cache mode, a database operation may fail with -** an [SQLITE_LOCKED] error if the required locks on the shared-cache or -** individual tables within the shared-cache cannot be obtained. See -** [SQLite Shared-Cache Mode] for a description of shared-cache locking. -** ^This API may be used to register a callback that SQLite will invoke -** when the connection currently holding the required lock relinquishes it. -** ^This API is only available if the library was compiled with the -** [SQLITE_ENABLE_UNLOCK_NOTIFY] C-preprocessor symbol defined. -** -** See Also: [Using the SQLite Unlock Notification Feature]. -** -** ^Shared-cache locks are released when a database connection concludes -** its current transaction, either by committing it or rolling it back. -** -** ^When a connection (known as the blocked connection) fails to obtain a -** shared-cache lock and SQLITE_LOCKED is returned to the caller, the -** identity of the database connection (the blocking connection) that -** has locked the required resource is stored internally. ^After an -** application receives an SQLITE_LOCKED error, it may call the -** sqlite3_unlock_notify() method with the blocked connection handle as -** the first argument to register for a callback that will be invoked -** when the blocking connections current transaction is concluded. ^The -** callback is invoked from within the [sqlite3_step] or [sqlite3_close] -** call that concludes the blocking connections transaction. -** -** ^(If sqlite3_unlock_notify() is called in a multi-threaded application, -** there is a chance that the blocking connection will have already -** concluded its transaction by the time sqlite3_unlock_notify() is invoked. -** If this happens, then the specified callback is invoked immediately, -** from within the call to sqlite3_unlock_notify().)^ -** -** ^If the blocked connection is attempting to obtain a write-lock on a -** shared-cache table, and more than one other connection currently holds -** a read-lock on the same table, then SQLite arbitrarily selects one of -** the other connections to use as the blocking connection. -** -** ^(There may be at most one unlock-notify callback registered by a -** blocked connection. If sqlite3_unlock_notify() is called when the -** blocked connection already has a registered unlock-notify callback, -** then the new callback replaces the old.)^ ^If sqlite3_unlock_notify() is -** called with a NULL pointer as its second argument, then any existing -** unlock-notify callback is canceled. ^The blocked connections -** unlock-notify callback may also be canceled by closing the blocked -** connection using [sqlite3_close()]. -** -** The unlock-notify callback is not reentrant. If an application invokes -** any sqlite3_xxx API functions from within an unlock-notify callback, a -** crash or deadlock may be the result. -** -** ^Unless deadlock is detected (see below), sqlite3_unlock_notify() always -** returns SQLITE_OK. -** -** Callback Invocation Details -** -** When an unlock-notify callback is registered, the application provides a -** single void* pointer that is passed to the callback when it is invoked. -** However, the signature of the callback function allows SQLite to pass -** it an array of void* context pointers. The first argument passed to -** an unlock-notify callback is a pointer to an array of void* pointers, -** and the second is the number of entries in the array. -** -** When a blocking connections transaction is concluded, there may be -** more than one blocked connection that has registered for an unlock-notify -** callback. ^If two or more such blocked connections have specified the -** same callback function, then instead of invoking the callback function -** multiple times, it is invoked once with the set of void* context pointers -** specified by the blocked connections bundled together into an array. -** This gives the application an opportunity to prioritize any actions -** related to the set of unblocked database connections. -** -** Deadlock Detection -** -** Assuming that after registering for an unlock-notify callback a -** database waits for the callback to be issued before taking any further -** action (a reasonable assumption), then using this API may cause the -** application to deadlock. For example, if connection X is waiting for -** connection Y's transaction to be concluded, and similarly connection -** Y is waiting on connection X's transaction, then neither connection -** will proceed and the system may remain deadlocked indefinitely. -** -** To avoid this scenario, the sqlite3_unlock_notify() performs deadlock -** detection. ^If a given call to sqlite3_unlock_notify() would put the -** system in a deadlocked state, then SQLITE_LOCKED is returned and no -** unlock-notify callback is registered. The system is said to be in -** a deadlocked state if connection A has registered for an unlock-notify -** callback on the conclusion of connection B's transaction, and connection -** B has itself registered for an unlock-notify callback when connection -** A's transaction is concluded. ^Indirect deadlock is also detected, so -** the system is also considered to be deadlocked if connection B has -** registered for an unlock-notify callback on the conclusion of connection -** C's transaction, where connection C is waiting on connection A. ^Any -** number of levels of indirection are allowed. -** -** The "DROP TABLE" Exception -** -** When a call to [sqlite3_step()] returns SQLITE_LOCKED, it is almost -** always appropriate to call sqlite3_unlock_notify(). There is however, -** one exception. When executing a "DROP TABLE" or "DROP INDEX" statement, -** SQLite checks if there are any currently executing SELECT statements -** that belong to the same connection. If there are, SQLITE_LOCKED is -** returned. In this case there is no "blocking connection", so invoking -** sqlite3_unlock_notify() results in the unlock-notify callback being -** invoked immediately. If the application then re-attempts the "DROP TABLE" -** or "DROP INDEX" query, an infinite loop might be the result. -** -** One way around this problem is to check the extended error code returned -** by an sqlite3_step() call. ^(If there is a blocking connection, then the -** extended error code is set to SQLITE_LOCKED_SHAREDCACHE. Otherwise, in -** the special "DROP TABLE/INDEX" case, the extended error code is just -** SQLITE_LOCKED.)^ -*/ -SQLITE_API int sqlite3_unlock_notify( - sqlite3 *pBlocked, /* Waiting connection */ - void (*xNotify)(void **apArg, int nArg), /* Callback function to invoke */ - void *pNotifyArg /* Argument to pass to xNotify */ -); - - -/* -** CAPI3REF: String Comparison -** -** ^The [sqlite3_stricmp()] and [sqlite3_strnicmp()] APIs allow applications -** and extensions to compare the contents of two buffers containing UTF-8 -** strings in a case-independent fashion, using the same definition of "case -** independence" that SQLite uses internally when comparing identifiers. -*/ -SQLITE_API int sqlite3_stricmp(const char *, const char *); -SQLITE_API int sqlite3_strnicmp(const char *, const char *, int); - -/* -** CAPI3REF: Error Logging Interface -** -** ^The [sqlite3_log()] interface writes a message into the error log -** established by the [SQLITE_CONFIG_LOG] option to [sqlite3_config()]. -** ^If logging is enabled, the zFormat string and subsequent arguments are -** used with [sqlite3_snprintf()] to generate the final output string. -** -** The sqlite3_log() interface is intended for use by extensions such as -** virtual tables, collating functions, and SQL functions. While there is -** nothing to prevent an application from calling sqlite3_log(), doing so -** is considered bad form. -** -** The zFormat string must not be NULL. -** -** To avoid deadlocks and other threading problems, the sqlite3_log() routine -** will not use dynamically allocated memory. The log message is stored in -** a fixed-length buffer on the stack. If the log message is longer than -** a few hundred characters, it will be truncated to the length of the -** buffer. -*/ -SQLITE_API void sqlite3_log(int iErrCode, const char *zFormat, ...); - -/* -** CAPI3REF: Write-Ahead Log Commit Hook -** -** ^The [sqlite3_wal_hook()] function is used to register a callback that -** will be invoked each time a database connection commits data to a -** [write-ahead log] (i.e. whenever a transaction is committed in -** [journal_mode | journal_mode=WAL mode]). -** -** ^The callback is invoked by SQLite after the commit has taken place and -** the associated write-lock on the database released, so the implementation -** may read, write or [checkpoint] the database as required. -** -** ^The first parameter passed to the callback function when it is invoked -** is a copy of the third parameter passed to sqlite3_wal_hook() when -** registering the callback. ^The second is a copy of the database handle. -** ^The third parameter is the name of the database that was written to - -** either "main" or the name of an [ATTACH]-ed database. ^The fourth parameter -** is the number of pages currently in the write-ahead log file, -** including those that were just committed. -** -** The callback function should normally return [SQLITE_OK]. ^If an error -** code is returned, that error will propagate back up through the -** SQLite code base to cause the statement that provoked the callback -** to report an error, though the commit will have still occurred. If the -** callback returns [SQLITE_ROW] or [SQLITE_DONE], or if it returns a value -** that does not correspond to any valid SQLite error code, the results -** are undefined. -** -** A single database handle may have at most a single write-ahead log callback -** registered at one time. ^Calling [sqlite3_wal_hook()] replaces any -** previously registered write-ahead log callback. ^Note that the -** [sqlite3_wal_autocheckpoint()] interface and the -** [wal_autocheckpoint pragma] both invoke [sqlite3_wal_hook()] and will -** those overwrite any prior [sqlite3_wal_hook()] settings. -*/ -SQLITE_API void *sqlite3_wal_hook( - sqlite3*, - int(*)(void *,sqlite3*,const char*,int), - void* -); - -/* -** CAPI3REF: Configure an auto-checkpoint -** -** ^The [sqlite3_wal_autocheckpoint(D,N)] is a wrapper around -** [sqlite3_wal_hook()] that causes any database on [database connection] D -** to automatically [checkpoint] -** after committing a transaction if there are N or -** more frames in the [write-ahead log] file. ^Passing zero or -** a negative value as the nFrame parameter disables automatic -** checkpoints entirely. -** -** ^The callback registered by this function replaces any existing callback -** registered using [sqlite3_wal_hook()]. ^Likewise, registering a callback -** using [sqlite3_wal_hook()] disables the automatic checkpoint mechanism -** configured by this function. -** -** ^The [wal_autocheckpoint pragma] can be used to invoke this interface -** from SQL. -** -** ^Every new [database connection] defaults to having the auto-checkpoint -** enabled with a threshold of 1000 or [SQLITE_DEFAULT_WAL_AUTOCHECKPOINT] -** pages. The use of this interface -** is only necessary if the default setting is found to be suboptimal -** for a particular application. -*/ -SQLITE_API int sqlite3_wal_autocheckpoint(sqlite3 *db, int N); - -/* -** CAPI3REF: Checkpoint a database -** -** ^The [sqlite3_wal_checkpoint(D,X)] interface causes database named X -** on [database connection] D to be [checkpointed]. ^If X is NULL or an -** empty string, then a checkpoint is run on all databases of -** connection D. ^If the database connection D is not in -** [WAL | write-ahead log mode] then this interface is a harmless no-op. -** -** ^The [wal_checkpoint pragma] can be used to invoke this interface -** from SQL. ^The [sqlite3_wal_autocheckpoint()] interface and the -** [wal_autocheckpoint pragma] can be used to cause this interface to be -** run whenever the WAL reaches a certain size threshold. -** -** See also: [sqlite3_wal_checkpoint_v2()] -*/ -SQLITE_API int sqlite3_wal_checkpoint(sqlite3 *db, const char *zDb); - -/* -** CAPI3REF: Checkpoint a database -** -** Run a checkpoint operation on WAL database zDb attached to database -** handle db. The specific operation is determined by the value of the -** eMode parameter: -** -**
-**
SQLITE_CHECKPOINT_PASSIVE
-** Checkpoint as many frames as possible without waiting for any database -** readers or writers to finish. Sync the db file if all frames in the log -** are checkpointed. This mode is the same as calling -** sqlite3_wal_checkpoint(). The busy-handler callback is never invoked. -** -**
SQLITE_CHECKPOINT_FULL
-** This mode blocks (calls the busy-handler callback) until there is no -** database writer and all readers are reading from the most recent database -** snapshot. It then checkpoints all frames in the log file and syncs the -** database file. This call blocks database writers while it is running, -** but not database readers. -** -**
SQLITE_CHECKPOINT_RESTART
-** This mode works the same way as SQLITE_CHECKPOINT_FULL, except after -** checkpointing the log file it blocks (calls the busy-handler callback) -** until all readers are reading from the database file only. This ensures -** that the next client to write to the database file restarts the log file -** from the beginning. This call blocks database writers while it is running, -** but not database readers. -**
-** -** If pnLog is not NULL, then *pnLog is set to the total number of frames in -** the log file before returning. If pnCkpt is not NULL, then *pnCkpt is set to -** the total number of checkpointed frames (including any that were already -** checkpointed when this function is called). *pnLog and *pnCkpt may be -** populated even if sqlite3_wal_checkpoint_v2() returns other than SQLITE_OK. -** If no values are available because of an error, they are both set to -1 -** before returning to communicate this to the caller. -** -** All calls obtain an exclusive "checkpoint" lock on the database file. If -** any other process is running a checkpoint operation at the same time, the -** lock cannot be obtained and SQLITE_BUSY is returned. Even if there is a -** busy-handler configured, it will not be invoked in this case. -** -** The SQLITE_CHECKPOINT_FULL and RESTART modes also obtain the exclusive -** "writer" lock on the database file. If the writer lock cannot be obtained -** immediately, and a busy-handler is configured, it is invoked and the writer -** lock retried until either the busy-handler returns 0 or the lock is -** successfully obtained. The busy-handler is also invoked while waiting for -** database readers as described above. If the busy-handler returns 0 before -** the writer lock is obtained or while waiting for database readers, the -** checkpoint operation proceeds from that point in the same way as -** SQLITE_CHECKPOINT_PASSIVE - checkpointing as many frames as possible -** without blocking any further. SQLITE_BUSY is returned in this case. -** -** If parameter zDb is NULL or points to a zero length string, then the -** specified operation is attempted on all WAL databases. In this case the -** values written to output parameters *pnLog and *pnCkpt are undefined. If -** an SQLITE_BUSY error is encountered when processing one or more of the -** attached WAL databases, the operation is still attempted on any remaining -** attached databases and SQLITE_BUSY is returned to the caller. If any other -** error occurs while processing an attached database, processing is abandoned -** and the error code returned to the caller immediately. If no error -** (SQLITE_BUSY or otherwise) is encountered while processing the attached -** databases, SQLITE_OK is returned. -** -** If database zDb is the name of an attached database that is not in WAL -** mode, SQLITE_OK is returned and both *pnLog and *pnCkpt set to -1. If -** zDb is not NULL (or a zero length string) and is not the name of any -** attached database, SQLITE_ERROR is returned to the caller. -*/ -SQLITE_API int sqlite3_wal_checkpoint_v2( - sqlite3 *db, /* Database handle */ - const char *zDb, /* Name of attached database (or NULL) */ - int eMode, /* SQLITE_CHECKPOINT_* value */ - int *pnLog, /* OUT: Size of WAL log in frames */ - int *pnCkpt /* OUT: Total number of frames checkpointed */ -); - -/* -** CAPI3REF: Checkpoint operation parameters -** -** These constants can be used as the 3rd parameter to -** [sqlite3_wal_checkpoint_v2()]. See the [sqlite3_wal_checkpoint_v2()] -** documentation for additional information about the meaning and use of -** each of these values. -*/ -#define SQLITE_CHECKPOINT_PASSIVE 0 -#define SQLITE_CHECKPOINT_FULL 1 -#define SQLITE_CHECKPOINT_RESTART 2 - -/* -** CAPI3REF: Virtual Table Interface Configuration -** -** This function may be called by either the [xConnect] or [xCreate] method -** of a [virtual table] implementation to configure -** various facets of the virtual table interface. -** -** If this interface is invoked outside the context of an xConnect or -** xCreate virtual table method then the behavior is undefined. -** -** At present, there is only one option that may be configured using -** this function. (See [SQLITE_VTAB_CONSTRAINT_SUPPORT].) Further options -** may be added in the future. -*/ -SQLITE_API int sqlite3_vtab_config(sqlite3*, int op, ...); - -/* -** CAPI3REF: Virtual Table Configuration Options -** -** These macros define the various options to the -** [sqlite3_vtab_config()] interface that [virtual table] implementations -** can use to customize and optimize their behavior. -** -**
-**
SQLITE_VTAB_CONSTRAINT_SUPPORT -**
Calls of the form -** [sqlite3_vtab_config](db,SQLITE_VTAB_CONSTRAINT_SUPPORT,X) are supported, -** where X is an integer. If X is zero, then the [virtual table] whose -** [xCreate] or [xConnect] method invoked [sqlite3_vtab_config()] does not -** support constraints. In this configuration (which is the default) if -** a call to the [xUpdate] method returns [SQLITE_CONSTRAINT], then the entire -** statement is rolled back as if [ON CONFLICT | OR ABORT] had been -** specified as part of the users SQL statement, regardless of the actual -** ON CONFLICT mode specified. -** -** If X is non-zero, then the virtual table implementation guarantees -** that if [xUpdate] returns [SQLITE_CONSTRAINT], it will do so before -** any modifications to internal or persistent data structures have been made. -** If the [ON CONFLICT] mode is ABORT, FAIL, IGNORE or ROLLBACK, SQLite -** is able to roll back a statement or database transaction, and abandon -** or continue processing the current SQL statement as appropriate. -** If the ON CONFLICT mode is REPLACE and the [xUpdate] method returns -** [SQLITE_CONSTRAINT], SQLite handles this as if the ON CONFLICT mode -** had been ABORT. -** -** Virtual table implementations that are required to handle OR REPLACE -** must do so within the [xUpdate] method. If a call to the -** [sqlite3_vtab_on_conflict()] function indicates that the current ON -** CONFLICT policy is REPLACE, the virtual table implementation should -** silently replace the appropriate rows within the xUpdate callback and -** return SQLITE_OK. Or, if this is not possible, it may return -** SQLITE_CONSTRAINT, in which case SQLite falls back to OR ABORT -** constraint handling. -**
-*/ -#define SQLITE_VTAB_CONSTRAINT_SUPPORT 1 - -/* -** CAPI3REF: Determine The Virtual Table Conflict Policy -** -** This function may only be called from within a call to the [xUpdate] method -** of a [virtual table] implementation for an INSERT or UPDATE operation. ^The -** value returned is one of [SQLITE_ROLLBACK], [SQLITE_IGNORE], [SQLITE_FAIL], -** [SQLITE_ABORT], or [SQLITE_REPLACE], according to the [ON CONFLICT] mode -** of the SQL statement that triggered the call to the [xUpdate] method of the -** [virtual table]. -*/ -SQLITE_API int sqlite3_vtab_on_conflict(sqlite3 *); - -/* -** CAPI3REF: Conflict resolution modes -** -** These constants are returned by [sqlite3_vtab_on_conflict()] to -** inform a [virtual table] implementation what the [ON CONFLICT] mode -** is for the SQL statement being evaluated. -** -** Note that the [SQLITE_IGNORE] constant is also used as a potential -** return value from the [sqlite3_set_authorizer()] callback and that -** [SQLITE_ABORT] is also a [result code]. -*/ -#define SQLITE_ROLLBACK 1 -/* #define SQLITE_IGNORE 2 // Also used by sqlite3_authorizer() callback */ -#define SQLITE_FAIL 3 -/* #define SQLITE_ABORT 4 // Also an error code */ -#define SQLITE_REPLACE 5 - - - -/* -** Undo the hack that converts floating point types to integer for -** builds on processors without floating point support. -*/ -#ifdef SQLITE_OMIT_FLOATING_POINT -# undef double -#endif - -#if 0 -} /* End of the 'extern "C"' block */ -#endif -#endif - -/* -** 2010 August 30 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -*/ - -#ifndef _SQLITE3RTREE_H_ -#define _SQLITE3RTREE_H_ - - -#if 0 -extern "C" { -#endif - -typedef struct sqlite3_rtree_geometry sqlite3_rtree_geometry; - -/* -** Register a geometry callback named zGeom that can be used as part of an -** R-Tree geometry query as follows: -** -** SELECT ... FROM WHERE MATCH $zGeom(... params ...) -*/ -SQLITE_API int sqlite3_rtree_geometry_callback( - sqlite3 *db, - const char *zGeom, -#ifdef SQLITE_RTREE_INT_ONLY - int (*xGeom)(sqlite3_rtree_geometry*, int n, sqlite3_int64 *a, int *pRes), -#else - int (*xGeom)(sqlite3_rtree_geometry*, int n, double *a, int *pRes), -#endif - void *pContext -); - - -/* -** A pointer to a structure of the following type is passed as the first -** argument to callbacks registered using rtree_geometry_callback(). -*/ -struct sqlite3_rtree_geometry { - void *pContext; /* Copy of pContext passed to s_r_g_c() */ - int nParam; /* Size of array aParam[] */ - double *aParam; /* Parameters passed to SQL geom function */ - void *pUser; /* Callback implementation user data */ - void (*xDelUser)(void *); /* Called by SQLite to clean up pUser */ -}; - - -#if 0 -} /* end of the 'extern "C"' block */ -#endif - -#endif /* ifndef _SQLITE3RTREE_H_ */ - - -/************** End of sqlite3.h *********************************************/ -/************** Continuing where we left off in sqliteInt.h ******************/ -/************** Include hash.h in the middle of sqliteInt.h ******************/ -/************** Begin file hash.h ********************************************/ -/* -** 2001 September 22 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This is the header file for the generic hash-table implemenation -** used in SQLite. -*/ -#ifndef _SQLITE_HASH_H_ -#define _SQLITE_HASH_H_ - -/* Forward declarations of structures. */ -typedef struct Hash Hash; -typedef struct HashElem HashElem; - -/* A complete hash table is an instance of the following structure. -** The internals of this structure are intended to be opaque -- client -** code should not attempt to access or modify the fields of this structure -** directly. Change this structure only by using the routines below. -** However, some of the "procedures" and "functions" for modifying and -** accessing this structure are really macros, so we can't really make -** this structure opaque. -** -** All elements of the hash table are on a single doubly-linked list. -** Hash.first points to the head of this list. -** -** There are Hash.htsize buckets. Each bucket points to a spot in -** the global doubly-linked list. The contents of the bucket are the -** element pointed to plus the next _ht.count-1 elements in the list. -** -** Hash.htsize and Hash.ht may be zero. In that case lookup is done -** by a linear search of the global list. For small tables, the -** Hash.ht table is never allocated because if there are few elements -** in the table, it is faster to do a linear search than to manage -** the hash table. -*/ -struct Hash { - unsigned int htsize; /* Number of buckets in the hash table */ - unsigned int count; /* Number of entries in this table */ - HashElem *first; /* The first element of the array */ - struct _ht { /* the hash table */ - int count; /* Number of entries with this hash */ - HashElem *chain; /* Pointer to first entry with this hash */ - } *ht; -}; - -/* Each element in the hash table is an instance of the following -** structure. All elements are stored on a single doubly-linked list. -** -** Again, this structure is intended to be opaque, but it can't really -** be opaque because it is used by macros. -*/ -struct HashElem { - HashElem *next, *prev; /* Next and previous elements in the table */ - void *data; /* Data associated with this element */ - const char *pKey; int nKey; /* Key associated with this element */ -}; - -/* -** Access routines. To delete, insert a NULL pointer. -*/ -SQLITE_PRIVATE void sqlite3HashInit(Hash*); -SQLITE_PRIVATE void *sqlite3HashInsert(Hash*, const char *pKey, int nKey, void *pData); -SQLITE_PRIVATE void *sqlite3HashFind(const Hash*, const char *pKey, int nKey); -SQLITE_PRIVATE void sqlite3HashClear(Hash*); - -/* -** Macros for looping over all elements of a hash table. The idiom is -** like this: -** -** Hash h; -** HashElem *p; -** ... -** for(p=sqliteHashFirst(&h); p; p=sqliteHashNext(p)){ -** SomeStructure *pData = sqliteHashData(p); -** // do something with pData -** } -*/ -#define sqliteHashFirst(H) ((H)->first) -#define sqliteHashNext(E) ((E)->next) -#define sqliteHashData(E) ((E)->data) -/* #define sqliteHashKey(E) ((E)->pKey) // NOT USED */ -/* #define sqliteHashKeysize(E) ((E)->nKey) // NOT USED */ - -/* -** Number of entries in a hash table -*/ -/* #define sqliteHashCount(H) ((H)->count) // NOT USED */ - -#endif /* _SQLITE_HASH_H_ */ - -/************** End of hash.h ************************************************/ -/************** Continuing where we left off in sqliteInt.h ******************/ -/************** Include parse.h in the middle of sqliteInt.h *****************/ -/************** Begin file parse.h *******************************************/ -#define TK_SEMI 1 -#define TK_EXPLAIN 2 -#define TK_QUERY 3 -#define TK_PLAN 4 -#define TK_BEGIN 5 -#define TK_TRANSACTION 6 -#define TK_DEFERRED 7 -#define TK_IMMEDIATE 8 -#define TK_EXCLUSIVE 9 -#define TK_COMMIT 10 -#define TK_END 11 -#define TK_ROLLBACK 12 -#define TK_SAVEPOINT 13 -#define TK_RELEASE 14 -#define TK_TO 15 -#define TK_TABLE 16 -#define TK_CREATE 17 -#define TK_IF 18 -#define TK_NOT 19 -#define TK_EXISTS 20 -#define TK_TEMP 21 -#define TK_LP 22 -#define TK_RP 23 -#define TK_AS 24 -#define TK_COMMA 25 -#define TK_ID 26 -#define TK_INDEXED 27 -#define TK_ABORT 28 -#define TK_ACTION 29 -#define TK_AFTER 30 -#define TK_ANALYZE 31 -#define TK_ASC 32 -#define TK_ATTACH 33 -#define TK_BEFORE 34 -#define TK_BY 35 -#define TK_CASCADE 36 -#define TK_CAST 37 -#define TK_COLUMNKW 38 -#define TK_CONFLICT 39 -#define TK_DATABASE 40 -#define TK_DESC 41 -#define TK_DETACH 42 -#define TK_EACH 43 -#define TK_FAIL 44 -#define TK_FOR 45 -#define TK_IGNORE 46 -#define TK_INITIALLY 47 -#define TK_INSTEAD 48 -#define TK_LIKE_KW 49 -#define TK_MATCH 50 -#define TK_NO 51 -#define TK_KEY 52 -#define TK_OF 53 -#define TK_OFFSET 54 -#define TK_PRAGMA 55 -#define TK_RAISE 56 -#define TK_REPLACE 57 -#define TK_RESTRICT 58 -#define TK_ROW 59 -#define TK_TRIGGER 60 -#define TK_VACUUM 61 -#define TK_VIEW 62 -#define TK_VIRTUAL 63 -#define TK_REINDEX 64 -#define TK_RENAME 65 -#define TK_CTIME_KW 66 -#define TK_ANY 67 -#define TK_OR 68 -#define TK_AND 69 -#define TK_IS 70 -#define TK_BETWEEN 71 -#define TK_IN 72 -#define TK_ISNULL 73 -#define TK_NOTNULL 74 -#define TK_NE 75 -#define TK_EQ 76 -#define TK_GT 77 -#define TK_LE 78 -#define TK_LT 79 -#define TK_GE 80 -#define TK_ESCAPE 81 -#define TK_BITAND 82 -#define TK_BITOR 83 -#define TK_LSHIFT 84 -#define TK_RSHIFT 85 -#define TK_PLUS 86 -#define TK_MINUS 87 -#define TK_STAR 88 -#define TK_SLASH 89 -#define TK_REM 90 -#define TK_CONCAT 91 -#define TK_COLLATE 92 -#define TK_BITNOT 93 -#define TK_STRING 94 -#define TK_JOIN_KW 95 -#define TK_CONSTRAINT 96 -#define TK_DEFAULT 97 -#define TK_NULL 98 -#define TK_PRIMARY 99 -#define TK_UNIQUE 100 -#define TK_CHECK 101 -#define TK_REFERENCES 102 -#define TK_AUTOINCR 103 -#define TK_ON 104 -#define TK_INSERT 105 -#define TK_DELETE 106 -#define TK_UPDATE 107 -#define TK_SET 108 -#define TK_DEFERRABLE 109 -#define TK_FOREIGN 110 -#define TK_DROP 111 -#define TK_UNION 112 -#define TK_ALL 113 -#define TK_EXCEPT 114 -#define TK_INTERSECT 115 -#define TK_SELECT 116 -#define TK_DISTINCT 117 -#define TK_DOT 118 -#define TK_FROM 119 -#define TK_JOIN 120 -#define TK_USING 121 -#define TK_ORDER 122 -#define TK_GROUP 123 -#define TK_HAVING 124 -#define TK_LIMIT 125 -#define TK_WHERE 126 -#define TK_INTO 127 -#define TK_VALUES 128 -#define TK_INTEGER 129 -#define TK_FLOAT 130 -#define TK_BLOB 131 -#define TK_REGISTER 132 -#define TK_VARIABLE 133 -#define TK_CASE 134 -#define TK_WHEN 135 -#define TK_THEN 136 -#define TK_ELSE 137 -#define TK_INDEX 138 -#define TK_ALTER 139 -#define TK_ADD 140 -#define TK_TO_TEXT 141 -#define TK_TO_BLOB 142 -#define TK_TO_NUMERIC 143 -#define TK_TO_INT 144 -#define TK_TO_REAL 145 -#define TK_ISNOT 146 -#define TK_END_OF_FILE 147 -#define TK_ILLEGAL 148 -#define TK_SPACE 149 -#define TK_UNCLOSED_STRING 150 -#define TK_FUNCTION 151 -#define TK_COLUMN 152 -#define TK_AGG_FUNCTION 153 -#define TK_AGG_COLUMN 154 -#define TK_CONST_FUNC 155 -#define TK_UMINUS 156 -#define TK_UPLUS 157 - -/************** End of parse.h ***********************************************/ -/************** Continuing where we left off in sqliteInt.h ******************/ -#include -#include -#include -#include -#include - -/* -** If compiling for a processor that lacks floating point support, -** substitute integer for floating-point -*/ -#ifdef SQLITE_OMIT_FLOATING_POINT -# define double sqlite_int64 -# define float sqlite_int64 -# define LONGDOUBLE_TYPE sqlite_int64 -# ifndef SQLITE_BIG_DBL -# define SQLITE_BIG_DBL (((sqlite3_int64)1)<<50) -# endif -# define SQLITE_OMIT_DATETIME_FUNCS 1 -# define SQLITE_OMIT_TRACE 1 -# undef SQLITE_MIXED_ENDIAN_64BIT_FLOAT -# undef SQLITE_HAVE_ISNAN -#endif -#ifndef SQLITE_BIG_DBL -# define SQLITE_BIG_DBL (1e99) -#endif - -/* -** OMIT_TEMPDB is set to 1 if SQLITE_OMIT_TEMPDB is defined, or 0 -** afterward. Having this macro allows us to cause the C compiler -** to omit code used by TEMP tables without messy #ifndef statements. -*/ -#ifdef SQLITE_OMIT_TEMPDB -#define OMIT_TEMPDB 1 -#else -#define OMIT_TEMPDB 0 -#endif - -/* -** The "file format" number is an integer that is incremented whenever -** the VDBE-level file format changes. The following macros define the -** the default file format for new databases and the maximum file format -** that the library can read. -*/ -#define SQLITE_MAX_FILE_FORMAT 4 -#ifndef SQLITE_DEFAULT_FILE_FORMAT -# define SQLITE_DEFAULT_FILE_FORMAT 4 -#endif - -/* -** Determine whether triggers are recursive by default. This can be -** changed at run-time using a pragma. -*/ -#ifndef SQLITE_DEFAULT_RECURSIVE_TRIGGERS -# define SQLITE_DEFAULT_RECURSIVE_TRIGGERS 0 -#endif - -/* -** Provide a default value for SQLITE_TEMP_STORE in case it is not specified -** on the command-line -*/ -#ifndef SQLITE_TEMP_STORE -# define SQLITE_TEMP_STORE 1 -#endif - -/* -** GCC does not define the offsetof() macro so we'll have to do it -** ourselves. -*/ -#ifndef offsetof -#define offsetof(STRUCTURE,FIELD) ((int)((char*)&((STRUCTURE*)0)->FIELD)) -#endif - -/* -** Check to see if this machine uses EBCDIC. (Yes, believe it or -** not, there are still machines out there that use EBCDIC.) -*/ -#if 'A' == '\301' -# define SQLITE_EBCDIC 1 -#else -# define SQLITE_ASCII 1 -#endif - -/* -** Integers of known sizes. These typedefs might change for architectures -** where the sizes very. Preprocessor macros are available so that the -** types can be conveniently redefined at compile-type. Like this: -** -** cc '-DUINTPTR_TYPE=long long int' ... -*/ -#ifndef UINT32_TYPE -# ifdef HAVE_UINT32_T -# define UINT32_TYPE uint32_t -# else -# define UINT32_TYPE unsigned int -# endif -#endif -#ifndef UINT16_TYPE -# ifdef HAVE_UINT16_T -# define UINT16_TYPE uint16_t -# else -# define UINT16_TYPE unsigned short int -# endif -#endif -#ifndef INT16_TYPE -# ifdef HAVE_INT16_T -# define INT16_TYPE int16_t -# else -# define INT16_TYPE short int -# endif -#endif -#ifndef UINT8_TYPE -# ifdef HAVE_UINT8_T -# define UINT8_TYPE uint8_t -# else -# define UINT8_TYPE unsigned char -# endif -#endif -#ifndef INT8_TYPE -# ifdef HAVE_INT8_T -# define INT8_TYPE int8_t -# else -# define INT8_TYPE signed char -# endif -#endif -#ifndef LONGDOUBLE_TYPE -# define LONGDOUBLE_TYPE long double -#endif -typedef sqlite_int64 i64; /* 8-byte signed integer */ -typedef sqlite_uint64 u64; /* 8-byte unsigned integer */ -typedef UINT32_TYPE u32; /* 4-byte unsigned integer */ -typedef UINT16_TYPE u16; /* 2-byte unsigned integer */ -typedef INT16_TYPE i16; /* 2-byte signed integer */ -typedef UINT8_TYPE u8; /* 1-byte unsigned integer */ -typedef INT8_TYPE i8; /* 1-byte signed integer */ - -/* -** SQLITE_MAX_U32 is a u64 constant that is the maximum u64 value -** that can be stored in a u32 without loss of data. The value -** is 0x00000000ffffffff. But because of quirks of some compilers, we -** have to specify the value in the less intuitive manner shown: -*/ -#define SQLITE_MAX_U32 ((((u64)1)<<32)-1) - -/* -** The datatype used to store estimates of the number of rows in a -** table or index. This is an unsigned integer type. For 99.9% of -** the world, a 32-bit integer is sufficient. But a 64-bit integer -** can be used at compile-time if desired. -*/ -#ifdef SQLITE_64BIT_STATS - typedef u64 tRowcnt; /* 64-bit only if requested at compile-time */ -#else - typedef u32 tRowcnt; /* 32-bit is the default */ -#endif - -/* -** Macros to determine whether the machine is big or little endian, -** evaluated at runtime. -*/ -#ifdef SQLITE_AMALGAMATION -SQLITE_PRIVATE const int sqlite3one = 1; -#else -SQLITE_PRIVATE const int sqlite3one; -#endif -#if defined(i386) || defined(__i386__) || defined(_M_IX86)\ - || defined(__x86_64) || defined(__x86_64__) -# define SQLITE_BIGENDIAN 0 -# define SQLITE_LITTLEENDIAN 1 -# define SQLITE_UTF16NATIVE SQLITE_UTF16LE -#else -# define SQLITE_BIGENDIAN (*(char *)(&sqlite3one)==0) -# define SQLITE_LITTLEENDIAN (*(char *)(&sqlite3one)==1) -# define SQLITE_UTF16NATIVE (SQLITE_BIGENDIAN?SQLITE_UTF16BE:SQLITE_UTF16LE) -#endif - -/* -** Constants for the largest and smallest possible 64-bit signed integers. -** These macros are designed to work correctly on both 32-bit and 64-bit -** compilers. -*/ -#define LARGEST_INT64 (0xffffffff|(((i64)0x7fffffff)<<32)) -#define SMALLEST_INT64 (((i64)-1) - LARGEST_INT64) - -/* -** Round up a number to the next larger multiple of 8. This is used -** to force 8-byte alignment on 64-bit architectures. -*/ -#define ROUND8(x) (((x)+7)&~7) - -/* -** Round down to the nearest multiple of 8 -*/ -#define ROUNDDOWN8(x) ((x)&~7) - -/* -** Assert that the pointer X is aligned to an 8-byte boundary. This -** macro is used only within assert() to verify that the code gets -** all alignment restrictions correct. -** -** Except, if SQLITE_4_BYTE_ALIGNED_MALLOC is defined, then the -** underlying malloc() implemention might return us 4-byte aligned -** pointers. In that case, only verify 4-byte alignment. -*/ -#ifdef SQLITE_4_BYTE_ALIGNED_MALLOC -# define EIGHT_BYTE_ALIGNMENT(X) ((((char*)(X) - (char*)0)&3)==0) -#else -# define EIGHT_BYTE_ALIGNMENT(X) ((((char*)(X) - (char*)0)&7)==0) -#endif - - -/* -** An instance of the following structure is used to store the busy-handler -** callback for a given sqlite handle. -** -** The sqlite.busyHandler member of the sqlite struct contains the busy -** callback for the database handle. Each pager opened via the sqlite -** handle is passed a pointer to sqlite.busyHandler. The busy-handler -** callback is currently invoked only from within pager.c. -*/ -typedef struct BusyHandler BusyHandler; -struct BusyHandler { - int (*xFunc)(void *,int); /* The busy callback */ - void *pArg; /* First arg to busy callback */ - int nBusy; /* Incremented with each busy call */ -}; - -/* -** Name of the master database table. The master database table -** is a special table that holds the names and attributes of all -** user tables and indices. -*/ -#define MASTER_NAME "sqlite_master" -#define TEMP_MASTER_NAME "sqlite_temp_master" - -/* -** The root-page of the master database table. -*/ -#define MASTER_ROOT 1 - -/* -** The name of the schema table. -*/ -#define SCHEMA_TABLE(x) ((!OMIT_TEMPDB)&&(x==1)?TEMP_MASTER_NAME:MASTER_NAME) - -/* -** A convenience macro that returns the number of elements in -** an array. -*/ -#define ArraySize(X) ((int)(sizeof(X)/sizeof(X[0]))) - -/* -** The following value as a destructor means to use sqlite3DbFree(). -** The sqlite3DbFree() routine requires two parameters instead of the -** one parameter that destructors normally want. So we have to introduce -** this magic value that the code knows to handle differently. Any -** pointer will work here as long as it is distinct from SQLITE_STATIC -** and SQLITE_TRANSIENT. -*/ -#define SQLITE_DYNAMIC ((sqlite3_destructor_type)sqlite3MallocSize) - -/* -** When SQLITE_OMIT_WSD is defined, it means that the target platform does -** not support Writable Static Data (WSD) such as global and static variables. -** All variables must either be on the stack or dynamically allocated from -** the heap. When WSD is unsupported, the variable declarations scattered -** throughout the SQLite code must become constants instead. The SQLITE_WSD -** macro is used for this purpose. And instead of referencing the variable -** directly, we use its constant as a key to lookup the run-time allocated -** buffer that holds real variable. The constant is also the initializer -** for the run-time allocated buffer. -** -** In the usual case where WSD is supported, the SQLITE_WSD and GLOBAL -** macros become no-ops and have zero performance impact. -*/ -#ifdef SQLITE_OMIT_WSD - #define SQLITE_WSD const - #define GLOBAL(t,v) (*(t*)sqlite3_wsd_find((void*)&(v), sizeof(v))) - #define sqlite3GlobalConfig GLOBAL(struct Sqlite3Config, sqlite3Config) -SQLITE_API int sqlite3_wsd_init(int N, int J); -SQLITE_API void *sqlite3_wsd_find(void *K, int L); -#else - #define SQLITE_WSD - #define GLOBAL(t,v) v - #define sqlite3GlobalConfig sqlite3Config -#endif - -/* -** The following macros are used to suppress compiler warnings and to -** make it clear to human readers when a function parameter is deliberately -** left unused within the body of a function. This usually happens when -** a function is called via a function pointer. For example the -** implementation of an SQL aggregate step callback may not use the -** parameter indicating the number of arguments passed to the aggregate, -** if it knows that this is enforced elsewhere. -** -** When a function parameter is not used at all within the body of a function, -** it is generally named "NotUsed" or "NotUsed2" to make things even clearer. -** However, these macros may also be used to suppress warnings related to -** parameters that may or may not be used depending on compilation options. -** For example those parameters only used in assert() statements. In these -** cases the parameters are named as per the usual conventions. -*/ -#define UNUSED_PARAMETER(x) (void)(x) -#define UNUSED_PARAMETER2(x,y) UNUSED_PARAMETER(x),UNUSED_PARAMETER(y) - -/* -** Forward references to structures -*/ -typedef struct AggInfo AggInfo; -typedef struct AuthContext AuthContext; -typedef struct AutoincInfo AutoincInfo; -typedef struct Bitvec Bitvec; -typedef struct CollSeq CollSeq; -typedef struct Column Column; -typedef struct Db Db; -typedef struct Schema Schema; -typedef struct Expr Expr; -typedef struct ExprList ExprList; -typedef struct ExprSpan ExprSpan; -typedef struct FKey FKey; -typedef struct FuncDestructor FuncDestructor; -typedef struct FuncDef FuncDef; -typedef struct FuncDefHash FuncDefHash; -typedef struct IdList IdList; -typedef struct Index Index; -typedef struct IndexSample IndexSample; -typedef struct KeyClass KeyClass; -typedef struct KeyInfo KeyInfo; -typedef struct Lookaside Lookaside; -typedef struct LookasideSlot LookasideSlot; -typedef struct Module Module; -typedef struct NameContext NameContext; -typedef struct Parse Parse; -typedef struct RowSet RowSet; -typedef struct Savepoint Savepoint; -typedef struct Select Select; -typedef struct SrcList SrcList; -typedef struct StrAccum StrAccum; -typedef struct Table Table; -typedef struct TableLock TableLock; -typedef struct Token Token; -typedef struct Trigger Trigger; -typedef struct TriggerPrg TriggerPrg; -typedef struct TriggerStep TriggerStep; -typedef struct UnpackedRecord UnpackedRecord; -typedef struct VTable VTable; -typedef struct VtabCtx VtabCtx; -typedef struct Walker Walker; -typedef struct WherePlan WherePlan; -typedef struct WhereInfo WhereInfo; -typedef struct WhereLevel WhereLevel; - -/* -** Defer sourcing vdbe.h and btree.h until after the "u8" and -** "BusyHandler" typedefs. vdbe.h also requires a few of the opaque -** pointer types (i.e. FuncDef) defined above. -*/ -/************** Include btree.h in the middle of sqliteInt.h *****************/ -/************** Begin file btree.h *******************************************/ -/* -** 2001 September 15 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This header file defines the interface that the sqlite B-Tree file -** subsystem. See comments in the source code for a detailed description -** of what each interface routine does. -*/ -#ifndef _BTREE_H_ -#define _BTREE_H_ - -/* TODO: This definition is just included so other modules compile. It -** needs to be revisited. -*/ -#define SQLITE_N_BTREE_META 10 - -/* -** If defined as non-zero, auto-vacuum is enabled by default. Otherwise -** it must be turned on for each database using "PRAGMA auto_vacuum = 1". -*/ -#ifndef SQLITE_DEFAULT_AUTOVACUUM - #define SQLITE_DEFAULT_AUTOVACUUM 0 -#endif - -#define BTREE_AUTOVACUUM_NONE 0 /* Do not do auto-vacuum */ -#define BTREE_AUTOVACUUM_FULL 1 /* Do full auto-vacuum */ -#define BTREE_AUTOVACUUM_INCR 2 /* Incremental vacuum */ - -/* -** Forward declarations of structure -*/ -typedef struct Btree Btree; -typedef struct BtCursor BtCursor; -typedef struct BtShared BtShared; - - -SQLITE_PRIVATE int sqlite3BtreeOpen( - sqlite3_vfs *pVfs, /* VFS to use with this b-tree */ - const char *zFilename, /* Name of database file to open */ - sqlite3 *db, /* Associated database connection */ - Btree **ppBtree, /* Return open Btree* here */ - int flags, /* Flags */ - int vfsFlags /* Flags passed through to VFS open */ -); - -/* The flags parameter to sqlite3BtreeOpen can be the bitwise or of the -** following values. -** -** NOTE: These values must match the corresponding PAGER_ values in -** pager.h. -*/ -#define BTREE_OMIT_JOURNAL 1 /* Do not create or use a rollback journal */ -#define BTREE_MEMORY 2 /* This is an in-memory DB */ -#define BTREE_SINGLE 4 /* The file contains at most 1 b-tree */ -#define BTREE_UNORDERED 8 /* Use of a hash implementation is OK */ - -SQLITE_PRIVATE int sqlite3BtreeClose(Btree*); -SQLITE_PRIVATE int sqlite3BtreeSetCacheSize(Btree*,int); -SQLITE_PRIVATE int sqlite3BtreeSetSafetyLevel(Btree*,int,int,int); -SQLITE_PRIVATE int sqlite3BtreeSyncDisabled(Btree*); -SQLITE_PRIVATE int sqlite3BtreeSetPageSize(Btree *p, int nPagesize, int nReserve, int eFix); -SQLITE_PRIVATE int sqlite3BtreeGetPageSize(Btree*); -SQLITE_PRIVATE int sqlite3BtreeMaxPageCount(Btree*,int); -SQLITE_PRIVATE u32 sqlite3BtreeLastPage(Btree*); -SQLITE_PRIVATE int sqlite3BtreeSecureDelete(Btree*,int); -SQLITE_PRIVATE int sqlite3BtreeGetReserve(Btree*); -SQLITE_PRIVATE int sqlite3BtreeSetAutoVacuum(Btree *, int); -SQLITE_PRIVATE int sqlite3BtreeGetAutoVacuum(Btree *); -SQLITE_PRIVATE int sqlite3BtreeBeginTrans(Btree*,int); -SQLITE_PRIVATE int sqlite3BtreeCommitPhaseOne(Btree*, const char *zMaster); -SQLITE_PRIVATE int sqlite3BtreeCommitPhaseTwo(Btree*, int); -SQLITE_PRIVATE int sqlite3BtreeCommit(Btree*); -SQLITE_PRIVATE int sqlite3BtreeRollback(Btree*,int); -SQLITE_PRIVATE int sqlite3BtreeBeginStmt(Btree*,int); -SQLITE_PRIVATE int sqlite3BtreeCreateTable(Btree*, int*, int flags); -SQLITE_PRIVATE int sqlite3BtreeIsInTrans(Btree*); -SQLITE_PRIVATE int sqlite3BtreeIsInReadTrans(Btree*); -SQLITE_PRIVATE int sqlite3BtreeIsInBackup(Btree*); -SQLITE_PRIVATE void *sqlite3BtreeSchema(Btree *, int, void(*)(void *)); -SQLITE_PRIVATE int sqlite3BtreeSchemaLocked(Btree *pBtree); -SQLITE_PRIVATE int sqlite3BtreeLockTable(Btree *pBtree, int iTab, u8 isWriteLock); -SQLITE_PRIVATE int sqlite3BtreeSavepoint(Btree *, int, int); - -SQLITE_PRIVATE const char *sqlite3BtreeGetFilename(Btree *); -SQLITE_PRIVATE const char *sqlite3BtreeGetJournalname(Btree *); -SQLITE_PRIVATE int sqlite3BtreeCopyFile(Btree *, Btree *); - -SQLITE_PRIVATE int sqlite3BtreeIncrVacuum(Btree *); - -/* The flags parameter to sqlite3BtreeCreateTable can be the bitwise OR -** of the flags shown below. -** -** Every SQLite table must have either BTREE_INTKEY or BTREE_BLOBKEY set. -** With BTREE_INTKEY, the table key is a 64-bit integer and arbitrary data -** is stored in the leaves. (BTREE_INTKEY is used for SQL tables.) With -** BTREE_BLOBKEY, the key is an arbitrary BLOB and no content is stored -** anywhere - the key is the content. (BTREE_BLOBKEY is used for SQL -** indices.) -*/ -#define BTREE_INTKEY 1 /* Table has only 64-bit signed integer keys */ -#define BTREE_BLOBKEY 2 /* Table has keys only - no data */ - -SQLITE_PRIVATE int sqlite3BtreeDropTable(Btree*, int, int*); -SQLITE_PRIVATE int sqlite3BtreeClearTable(Btree*, int, int*); -SQLITE_PRIVATE void sqlite3BtreeTripAllCursors(Btree*, int); - -SQLITE_PRIVATE void sqlite3BtreeGetMeta(Btree *pBtree, int idx, u32 *pValue); -SQLITE_PRIVATE int sqlite3BtreeUpdateMeta(Btree*, int idx, u32 value); - -/* -** The second parameter to sqlite3BtreeGetMeta or sqlite3BtreeUpdateMeta -** should be one of the following values. The integer values are assigned -** to constants so that the offset of the corresponding field in an -** SQLite database header may be found using the following formula: -** -** offset = 36 + (idx * 4) -** -** For example, the free-page-count field is located at byte offset 36 of -** the database file header. The incr-vacuum-flag field is located at -** byte offset 64 (== 36+4*7). -*/ -#define BTREE_FREE_PAGE_COUNT 0 -#define BTREE_SCHEMA_VERSION 1 -#define BTREE_FILE_FORMAT 2 -#define BTREE_DEFAULT_CACHE_SIZE 3 -#define BTREE_LARGEST_ROOT_PAGE 4 -#define BTREE_TEXT_ENCODING 5 -#define BTREE_USER_VERSION 6 -#define BTREE_INCR_VACUUM 7 - -SQLITE_PRIVATE int sqlite3BtreeCursor( - Btree*, /* BTree containing table to open */ - int iTable, /* Index of root page */ - int wrFlag, /* 1 for writing. 0 for read-only */ - struct KeyInfo*, /* First argument to compare function */ - BtCursor *pCursor /* Space to write cursor structure */ -); -SQLITE_PRIVATE int sqlite3BtreeCursorSize(void); -SQLITE_PRIVATE void sqlite3BtreeCursorZero(BtCursor*); - -SQLITE_PRIVATE int sqlite3BtreeCloseCursor(BtCursor*); -SQLITE_PRIVATE int sqlite3BtreeMovetoUnpacked( - BtCursor*, - UnpackedRecord *pUnKey, - i64 intKey, - int bias, - int *pRes -); -SQLITE_PRIVATE int sqlite3BtreeCursorHasMoved(BtCursor*, int*); -SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor*); -SQLITE_PRIVATE int sqlite3BtreeInsert(BtCursor*, const void *pKey, i64 nKey, - const void *pData, int nData, - int nZero, int bias, int seekResult); -SQLITE_PRIVATE int sqlite3BtreeFirst(BtCursor*, int *pRes); -SQLITE_PRIVATE int sqlite3BtreeLast(BtCursor*, int *pRes); -SQLITE_PRIVATE int sqlite3BtreeNext(BtCursor*, int *pRes); -SQLITE_PRIVATE int sqlite3BtreeEof(BtCursor*); -SQLITE_PRIVATE int sqlite3BtreePrevious(BtCursor*, int *pRes); -SQLITE_PRIVATE int sqlite3BtreeKeySize(BtCursor*, i64 *pSize); -SQLITE_PRIVATE int sqlite3BtreeKey(BtCursor*, u32 offset, u32 amt, void*); -SQLITE_PRIVATE const void *sqlite3BtreeKeyFetch(BtCursor*, int *pAmt); -SQLITE_PRIVATE const void *sqlite3BtreeDataFetch(BtCursor*, int *pAmt); -SQLITE_PRIVATE int sqlite3BtreeDataSize(BtCursor*, u32 *pSize); -SQLITE_PRIVATE int sqlite3BtreeData(BtCursor*, u32 offset, u32 amt, void*); -SQLITE_PRIVATE void sqlite3BtreeSetCachedRowid(BtCursor*, sqlite3_int64); -SQLITE_PRIVATE sqlite3_int64 sqlite3BtreeGetCachedRowid(BtCursor*); - -SQLITE_PRIVATE char *sqlite3BtreeIntegrityCheck(Btree*, int *aRoot, int nRoot, int, int*); -SQLITE_PRIVATE struct Pager *sqlite3BtreePager(Btree*); - -SQLITE_PRIVATE int sqlite3BtreePutData(BtCursor*, u32 offset, u32 amt, void*); -SQLITE_PRIVATE void sqlite3BtreeCacheOverflow(BtCursor *); -SQLITE_PRIVATE void sqlite3BtreeClearCursor(BtCursor *); - -SQLITE_PRIVATE int sqlite3BtreeSetVersion(Btree *pBt, int iVersion); - -#ifndef NDEBUG -SQLITE_PRIVATE int sqlite3BtreeCursorIsValid(BtCursor*); -#endif - -#ifndef SQLITE_OMIT_BTREECOUNT -SQLITE_PRIVATE int sqlite3BtreeCount(BtCursor *, i64 *); -#endif - -#ifdef SQLITE_TEST -SQLITE_PRIVATE int sqlite3BtreeCursorInfo(BtCursor*, int*, int); -SQLITE_PRIVATE void sqlite3BtreeCursorList(Btree*); -#endif - -#ifndef SQLITE_OMIT_WAL -SQLITE_PRIVATE int sqlite3BtreeCheckpoint(Btree*, int, int *, int *); -#endif - -/* -** If we are not using shared cache, then there is no need to -** use mutexes to access the BtShared structures. So make the -** Enter and Leave procedures no-ops. -*/ -#ifndef SQLITE_OMIT_SHARED_CACHE -SQLITE_PRIVATE void sqlite3BtreeEnter(Btree*); -SQLITE_PRIVATE void sqlite3BtreeEnterAll(sqlite3*); -#else -# define sqlite3BtreeEnter(X) -# define sqlite3BtreeEnterAll(X) -#endif - -#if !defined(SQLITE_OMIT_SHARED_CACHE) && SQLITE_THREADSAFE -SQLITE_PRIVATE int sqlite3BtreeSharable(Btree*); -SQLITE_PRIVATE void sqlite3BtreeLeave(Btree*); -SQLITE_PRIVATE void sqlite3BtreeEnterCursor(BtCursor*); -SQLITE_PRIVATE void sqlite3BtreeLeaveCursor(BtCursor*); -SQLITE_PRIVATE void sqlite3BtreeLeaveAll(sqlite3*); -#ifndef NDEBUG - /* These routines are used inside assert() statements only. */ -SQLITE_PRIVATE int sqlite3BtreeHoldsMutex(Btree*); -SQLITE_PRIVATE int sqlite3BtreeHoldsAllMutexes(sqlite3*); -SQLITE_PRIVATE int sqlite3SchemaMutexHeld(sqlite3*,int,Schema*); -#endif -#else - -# define sqlite3BtreeSharable(X) 0 -# define sqlite3BtreeLeave(X) -# define sqlite3BtreeEnterCursor(X) -# define sqlite3BtreeLeaveCursor(X) -# define sqlite3BtreeLeaveAll(X) - -# define sqlite3BtreeHoldsMutex(X) 1 -# define sqlite3BtreeHoldsAllMutexes(X) 1 -# define sqlite3SchemaMutexHeld(X,Y,Z) 1 -#endif - - -#endif /* _BTREE_H_ */ - -/************** End of btree.h ***********************************************/ -/************** Continuing where we left off in sqliteInt.h ******************/ -/************** Include vdbe.h in the middle of sqliteInt.h ******************/ -/************** Begin file vdbe.h ********************************************/ -/* -** 2001 September 15 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** Header file for the Virtual DataBase Engine (VDBE) -** -** This header defines the interface to the virtual database engine -** or VDBE. The VDBE implements an abstract machine that runs a -** simple program to access and modify the underlying database. -*/ -#ifndef _SQLITE_VDBE_H_ -#define _SQLITE_VDBE_H_ -/* #include */ - -/* -** A single VDBE is an opaque structure named "Vdbe". Only routines -** in the source file sqliteVdbe.c are allowed to see the insides -** of this structure. -*/ -typedef struct Vdbe Vdbe; - -/* -** The names of the following types declared in vdbeInt.h are required -** for the VdbeOp definition. -*/ -typedef struct VdbeFunc VdbeFunc; -typedef struct Mem Mem; -typedef struct SubProgram SubProgram; - -/* -** A single instruction of the virtual machine has an opcode -** and as many as three operands. The instruction is recorded -** as an instance of the following structure: -*/ -struct VdbeOp { - u8 opcode; /* What operation to perform */ - signed char p4type; /* One of the P4_xxx constants for p4 */ - u8 opflags; /* Mask of the OPFLG_* flags in opcodes.h */ - u8 p5; /* Fifth parameter is an unsigned character */ - int p1; /* First operand */ - int p2; /* Second parameter (often the jump destination) */ - int p3; /* The third parameter */ - union { /* fourth parameter */ - int i; /* Integer value if p4type==P4_INT32 */ - void *p; /* Generic pointer */ - char *z; /* Pointer to data for string (char array) types */ - i64 *pI64; /* Used when p4type is P4_INT64 */ - double *pReal; /* Used when p4type is P4_REAL */ - FuncDef *pFunc; /* Used when p4type is P4_FUNCDEF */ - VdbeFunc *pVdbeFunc; /* Used when p4type is P4_VDBEFUNC */ - CollSeq *pColl; /* Used when p4type is P4_COLLSEQ */ - Mem *pMem; /* Used when p4type is P4_MEM */ - VTable *pVtab; /* Used when p4type is P4_VTAB */ - KeyInfo *pKeyInfo; /* Used when p4type is P4_KEYINFO */ - int *ai; /* Used when p4type is P4_INTARRAY */ - SubProgram *pProgram; /* Used when p4type is P4_SUBPROGRAM */ - int (*xAdvance)(BtCursor *, int *); - } p4; -#ifdef SQLITE_DEBUG - char *zComment; /* Comment to improve readability */ -#endif -#ifdef VDBE_PROFILE - int cnt; /* Number of times this instruction was executed */ - u64 cycles; /* Total time spent executing this instruction */ -#endif -}; -typedef struct VdbeOp VdbeOp; - - -/* -** A sub-routine used to implement a trigger program. -*/ -struct SubProgram { - VdbeOp *aOp; /* Array of opcodes for sub-program */ - int nOp; /* Elements in aOp[] */ - int nMem; /* Number of memory cells required */ - int nCsr; /* Number of cursors required */ - int nOnce; /* Number of OP_Once instructions */ - void *token; /* id that may be used to recursive triggers */ - SubProgram *pNext; /* Next sub-program already visited */ -}; - -/* -** A smaller version of VdbeOp used for the VdbeAddOpList() function because -** it takes up less space. -*/ -struct VdbeOpList { - u8 opcode; /* What operation to perform */ - signed char p1; /* First operand */ - signed char p2; /* Second parameter (often the jump destination) */ - signed char p3; /* Third parameter */ -}; -typedef struct VdbeOpList VdbeOpList; - -/* -** Allowed values of VdbeOp.p4type -*/ -#define P4_NOTUSED 0 /* The P4 parameter is not used */ -#define P4_DYNAMIC (-1) /* Pointer to a string obtained from sqliteMalloc() */ -#define P4_STATIC (-2) /* Pointer to a static string */ -#define P4_COLLSEQ (-4) /* P4 is a pointer to a CollSeq structure */ -#define P4_FUNCDEF (-5) /* P4 is a pointer to a FuncDef structure */ -#define P4_KEYINFO (-6) /* P4 is a pointer to a KeyInfo structure */ -#define P4_VDBEFUNC (-7) /* P4 is a pointer to a VdbeFunc structure */ -#define P4_MEM (-8) /* P4 is a pointer to a Mem* structure */ -#define P4_TRANSIENT 0 /* P4 is a pointer to a transient string */ -#define P4_VTAB (-10) /* P4 is a pointer to an sqlite3_vtab structure */ -#define P4_MPRINTF (-11) /* P4 is a string obtained from sqlite3_mprintf() */ -#define P4_REAL (-12) /* P4 is a 64-bit floating point value */ -#define P4_INT64 (-13) /* P4 is a 64-bit signed integer */ -#define P4_INT32 (-14) /* P4 is a 32-bit signed integer */ -#define P4_INTARRAY (-15) /* P4 is a vector of 32-bit integers */ -#define P4_SUBPROGRAM (-18) /* P4 is a pointer to a SubProgram structure */ -#define P4_ADVANCE (-19) /* P4 is a pointer to BtreeNext() or BtreePrev() */ - -/* When adding a P4 argument using P4_KEYINFO, a copy of the KeyInfo structure -** is made. That copy is freed when the Vdbe is finalized. But if the -** argument is P4_KEYINFO_HANDOFF, the passed in pointer is used. It still -** gets freed when the Vdbe is finalized so it still should be obtained -** from a single sqliteMalloc(). But no copy is made and the calling -** function should *not* try to free the KeyInfo. -*/ -#define P4_KEYINFO_HANDOFF (-16) -#define P4_KEYINFO_STATIC (-17) - -/* -** The Vdbe.aColName array contains 5n Mem structures, where n is the -** number of columns of data returned by the statement. -*/ -#define COLNAME_NAME 0 -#define COLNAME_DECLTYPE 1 -#define COLNAME_DATABASE 2 -#define COLNAME_TABLE 3 -#define COLNAME_COLUMN 4 -#ifdef SQLITE_ENABLE_COLUMN_METADATA -# define COLNAME_N 5 /* Number of COLNAME_xxx symbols */ -#else -# ifdef SQLITE_OMIT_DECLTYPE -# define COLNAME_N 1 /* Store only the name */ -# else -# define COLNAME_N 2 /* Store the name and decltype */ -# endif -#endif - -/* -** The following macro converts a relative address in the p2 field -** of a VdbeOp structure into a negative number so that -** sqlite3VdbeAddOpList() knows that the address is relative. Calling -** the macro again restores the address. -*/ -#define ADDR(X) (-1-(X)) - -/* -** The makefile scans the vdbe.c source file and creates the "opcodes.h" -** header file that defines a number for each opcode used by the VDBE. -*/ -/************** Include opcodes.h in the middle of vdbe.h ********************/ -/************** Begin file opcodes.h *****************************************/ -/* Automatically generated. Do not edit */ -/* See the mkopcodeh.awk script for details */ -#define OP_Goto 1 -#define OP_Gosub 2 -#define OP_Return 3 -#define OP_Yield 4 -#define OP_HaltIfNull 5 -#define OP_Halt 6 -#define OP_Integer 7 -#define OP_Int64 8 -#define OP_Real 130 /* same as TK_FLOAT */ -#define OP_String8 94 /* same as TK_STRING */ -#define OP_String 9 -#define OP_Null 10 -#define OP_Blob 11 -#define OP_Variable 12 -#define OP_Move 13 -#define OP_Copy 14 -#define OP_SCopy 15 -#define OP_ResultRow 16 -#define OP_Concat 91 /* same as TK_CONCAT */ -#define OP_Add 86 /* same as TK_PLUS */ -#define OP_Subtract 87 /* same as TK_MINUS */ -#define OP_Multiply 88 /* same as TK_STAR */ -#define OP_Divide 89 /* same as TK_SLASH */ -#define OP_Remainder 90 /* same as TK_REM */ -#define OP_CollSeq 17 -#define OP_Function 18 -#define OP_BitAnd 82 /* same as TK_BITAND */ -#define OP_BitOr 83 /* same as TK_BITOR */ -#define OP_ShiftLeft 84 /* same as TK_LSHIFT */ -#define OP_ShiftRight 85 /* same as TK_RSHIFT */ -#define OP_AddImm 20 -#define OP_MustBeInt 21 -#define OP_RealAffinity 22 -#define OP_ToText 141 /* same as TK_TO_TEXT */ -#define OP_ToBlob 142 /* same as TK_TO_BLOB */ -#define OP_ToNumeric 143 /* same as TK_TO_NUMERIC*/ -#define OP_ToInt 144 /* same as TK_TO_INT */ -#define OP_ToReal 145 /* same as TK_TO_REAL */ -#define OP_Eq 76 /* same as TK_EQ */ -#define OP_Ne 75 /* same as TK_NE */ -#define OP_Lt 79 /* same as TK_LT */ -#define OP_Le 78 /* same as TK_LE */ -#define OP_Gt 77 /* same as TK_GT */ -#define OP_Ge 80 /* same as TK_GE */ -#define OP_Permutation 23 -#define OP_Compare 24 -#define OP_Jump 25 -#define OP_And 69 /* same as TK_AND */ -#define OP_Or 68 /* same as TK_OR */ -#define OP_Not 19 /* same as TK_NOT */ -#define OP_BitNot 93 /* same as TK_BITNOT */ -#define OP_Once 26 -#define OP_If 27 -#define OP_IfNot 28 -#define OP_IsNull 73 /* same as TK_ISNULL */ -#define OP_NotNull 74 /* same as TK_NOTNULL */ -#define OP_Column 29 -#define OP_Affinity 30 -#define OP_MakeRecord 31 -#define OP_Count 32 -#define OP_Savepoint 33 -#define OP_AutoCommit 34 -#define OP_Transaction 35 -#define OP_ReadCookie 36 -#define OP_SetCookie 37 -#define OP_VerifyCookie 38 -#define OP_OpenRead 39 -#define OP_OpenWrite 40 -#define OP_OpenAutoindex 41 -#define OP_OpenEphemeral 42 -#define OP_SorterOpen 43 -#define OP_OpenPseudo 44 -#define OP_Close 45 -#define OP_SeekLt 46 -#define OP_SeekLe 47 -#define OP_SeekGe 48 -#define OP_SeekGt 49 -#define OP_Seek 50 -#define OP_NotFound 51 -#define OP_Found 52 -#define OP_IsUnique 53 -#define OP_NotExists 54 -#define OP_Sequence 55 -#define OP_NewRowid 56 -#define OP_Insert 57 -#define OP_InsertInt 58 -#define OP_Delete 59 -#define OP_ResetCount 60 -#define OP_SorterCompare 61 -#define OP_SorterData 62 -#define OP_RowKey 63 -#define OP_RowData 64 -#define OP_Rowid 65 -#define OP_NullRow 66 -#define OP_Last 67 -#define OP_SorterSort 70 -#define OP_Sort 71 -#define OP_Rewind 72 -#define OP_SorterNext 81 -#define OP_Prev 92 -#define OP_Next 95 -#define OP_SorterInsert 96 -#define OP_IdxInsert 97 -#define OP_IdxDelete 98 -#define OP_IdxRowid 99 -#define OP_IdxLT 100 -#define OP_IdxGE 101 -#define OP_Destroy 102 -#define OP_Clear 103 -#define OP_CreateIndex 104 -#define OP_CreateTable 105 -#define OP_ParseSchema 106 -#define OP_LoadAnalysis 107 -#define OP_DropTable 108 -#define OP_DropIndex 109 -#define OP_DropTrigger 110 -#define OP_IntegrityCk 111 -#define OP_RowSetAdd 112 -#define OP_RowSetRead 113 -#define OP_RowSetTest 114 -#define OP_Program 115 -#define OP_Param 116 -#define OP_FkCounter 117 -#define OP_FkIfZero 118 -#define OP_MemMax 119 -#define OP_IfPos 120 -#define OP_IfNeg 121 -#define OP_IfZero 122 -#define OP_AggStep 123 -#define OP_AggFinal 124 -#define OP_Checkpoint 125 -#define OP_JournalMode 126 -#define OP_Vacuum 127 -#define OP_IncrVacuum 128 -#define OP_Expire 129 -#define OP_TableLock 131 -#define OP_VBegin 132 -#define OP_VCreate 133 -#define OP_VDestroy 134 -#define OP_VOpen 135 -#define OP_VFilter 136 -#define OP_VColumn 137 -#define OP_VNext 138 -#define OP_VRename 139 -#define OP_VUpdate 140 -#define OP_Pagecount 146 -#define OP_MaxPgcnt 147 -#define OP_Trace 148 -#define OP_Noop 149 -#define OP_Explain 150 - - -/* Properties such as "out2" or "jump" that are specified in -** comments following the "case" for each opcode in the vdbe.c -** are encoded into bitvectors as follows: -*/ -#define OPFLG_JUMP 0x0001 /* jump: P2 holds jmp target */ -#define OPFLG_OUT2_PRERELEASE 0x0002 /* out2-prerelease: */ -#define OPFLG_IN1 0x0004 /* in1: P1 is an input */ -#define OPFLG_IN2 0x0008 /* in2: P2 is an input */ -#define OPFLG_IN3 0x0010 /* in3: P3 is an input */ -#define OPFLG_OUT2 0x0020 /* out2: P2 is an output */ -#define OPFLG_OUT3 0x0040 /* out3: P3 is an output */ -#define OPFLG_INITIALIZER {\ -/* 0 */ 0x00, 0x01, 0x01, 0x04, 0x04, 0x10, 0x00, 0x02,\ -/* 8 */ 0x02, 0x02, 0x02, 0x02, 0x02, 0x00, 0x24, 0x24,\ -/* 16 */ 0x00, 0x00, 0x00, 0x24, 0x04, 0x05, 0x04, 0x00,\ -/* 24 */ 0x00, 0x01, 0x01, 0x05, 0x05, 0x00, 0x00, 0x00,\ -/* 32 */ 0x02, 0x00, 0x00, 0x00, 0x02, 0x10, 0x00, 0x00,\ -/* 40 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x11,\ -/* 48 */ 0x11, 0x11, 0x08, 0x11, 0x11, 0x11, 0x11, 0x02,\ -/* 56 */ 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\ -/* 64 */ 0x00, 0x02, 0x00, 0x01, 0x4c, 0x4c, 0x01, 0x01,\ -/* 72 */ 0x01, 0x05, 0x05, 0x15, 0x15, 0x15, 0x15, 0x15,\ -/* 80 */ 0x15, 0x01, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c, 0x4c,\ -/* 88 */ 0x4c, 0x4c, 0x4c, 0x4c, 0x01, 0x24, 0x02, 0x01,\ -/* 96 */ 0x08, 0x08, 0x00, 0x02, 0x01, 0x01, 0x02, 0x00,\ -/* 104 */ 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\ -/* 112 */ 0x0c, 0x45, 0x15, 0x01, 0x02, 0x00, 0x01, 0x08,\ -/* 120 */ 0x05, 0x05, 0x05, 0x00, 0x00, 0x00, 0x02, 0x00,\ -/* 128 */ 0x01, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00,\ -/* 136 */ 0x01, 0x00, 0x01, 0x00, 0x00, 0x04, 0x04, 0x04,\ -/* 144 */ 0x04, 0x04, 0x02, 0x02, 0x00, 0x00, 0x00,} - -/************** End of opcodes.h *********************************************/ -/************** Continuing where we left off in vdbe.h ***********************/ - -/* -** Prototypes for the VDBE interface. See comments on the implementation -** for a description of what each of these routines does. -*/ -SQLITE_PRIVATE Vdbe *sqlite3VdbeCreate(sqlite3*); -SQLITE_PRIVATE int sqlite3VdbeAddOp0(Vdbe*,int); -SQLITE_PRIVATE int sqlite3VdbeAddOp1(Vdbe*,int,int); -SQLITE_PRIVATE int sqlite3VdbeAddOp2(Vdbe*,int,int,int); -SQLITE_PRIVATE int sqlite3VdbeAddOp3(Vdbe*,int,int,int,int); -SQLITE_PRIVATE int sqlite3VdbeAddOp4(Vdbe*,int,int,int,int,const char *zP4,int); -SQLITE_PRIVATE int sqlite3VdbeAddOp4Int(Vdbe*,int,int,int,int,int); -SQLITE_PRIVATE int sqlite3VdbeAddOpList(Vdbe*, int nOp, VdbeOpList const *aOp); -SQLITE_PRIVATE void sqlite3VdbeAddParseSchemaOp(Vdbe*,int,char*); -SQLITE_PRIVATE void sqlite3VdbeChangeP1(Vdbe*, u32 addr, int P1); -SQLITE_PRIVATE void sqlite3VdbeChangeP2(Vdbe*, u32 addr, int P2); -SQLITE_PRIVATE void sqlite3VdbeChangeP3(Vdbe*, u32 addr, int P3); -SQLITE_PRIVATE void sqlite3VdbeChangeP5(Vdbe*, u8 P5); -SQLITE_PRIVATE void sqlite3VdbeJumpHere(Vdbe*, int addr); -SQLITE_PRIVATE void sqlite3VdbeChangeToNoop(Vdbe*, int addr); -SQLITE_PRIVATE void sqlite3VdbeChangeP4(Vdbe*, int addr, const char *zP4, int N); -SQLITE_PRIVATE void sqlite3VdbeUsesBtree(Vdbe*, int); -SQLITE_PRIVATE VdbeOp *sqlite3VdbeGetOp(Vdbe*, int); -SQLITE_PRIVATE int sqlite3VdbeMakeLabel(Vdbe*); -SQLITE_PRIVATE void sqlite3VdbeRunOnlyOnce(Vdbe*); -SQLITE_PRIVATE void sqlite3VdbeDelete(Vdbe*); -SQLITE_PRIVATE void sqlite3VdbeDeleteObject(sqlite3*,Vdbe*); -SQLITE_PRIVATE void sqlite3VdbeMakeReady(Vdbe*,Parse*); -SQLITE_PRIVATE int sqlite3VdbeFinalize(Vdbe*); -SQLITE_PRIVATE void sqlite3VdbeResolveLabel(Vdbe*, int); -SQLITE_PRIVATE int sqlite3VdbeCurrentAddr(Vdbe*); -#ifdef SQLITE_DEBUG -SQLITE_PRIVATE int sqlite3VdbeAssertMayAbort(Vdbe *, int); -SQLITE_PRIVATE void sqlite3VdbeTrace(Vdbe*,FILE*); -#endif -SQLITE_PRIVATE void sqlite3VdbeResetStepResult(Vdbe*); -SQLITE_PRIVATE void sqlite3VdbeRewind(Vdbe*); -SQLITE_PRIVATE int sqlite3VdbeReset(Vdbe*); -SQLITE_PRIVATE void sqlite3VdbeSetNumCols(Vdbe*,int); -SQLITE_PRIVATE int sqlite3VdbeSetColName(Vdbe*, int, int, const char *, void(*)(void*)); -SQLITE_PRIVATE void sqlite3VdbeCountChanges(Vdbe*); -SQLITE_PRIVATE sqlite3 *sqlite3VdbeDb(Vdbe*); -SQLITE_PRIVATE void sqlite3VdbeSetSql(Vdbe*, const char *z, int n, int); -SQLITE_PRIVATE void sqlite3VdbeSwap(Vdbe*,Vdbe*); -SQLITE_PRIVATE VdbeOp *sqlite3VdbeTakeOpArray(Vdbe*, int*, int*); -SQLITE_PRIVATE sqlite3_value *sqlite3VdbeGetValue(Vdbe*, int, u8); -SQLITE_PRIVATE void sqlite3VdbeSetVarmask(Vdbe*, int); -#ifndef SQLITE_OMIT_TRACE -SQLITE_PRIVATE char *sqlite3VdbeExpandSql(Vdbe*, const char*); -#endif - -SQLITE_PRIVATE void sqlite3VdbeRecordUnpack(KeyInfo*,int,const void*,UnpackedRecord*); -SQLITE_PRIVATE int sqlite3VdbeRecordCompare(int,const void*,UnpackedRecord*); -SQLITE_PRIVATE UnpackedRecord *sqlite3VdbeAllocUnpackedRecord(KeyInfo *, char *, int, char **); - -#ifndef SQLITE_OMIT_TRIGGER -SQLITE_PRIVATE void sqlite3VdbeLinkSubProgram(Vdbe *, SubProgram *); -#endif - - -#ifndef NDEBUG -SQLITE_PRIVATE void sqlite3VdbeComment(Vdbe*, const char*, ...); -# define VdbeComment(X) sqlite3VdbeComment X -SQLITE_PRIVATE void sqlite3VdbeNoopComment(Vdbe*, const char*, ...); -# define VdbeNoopComment(X) sqlite3VdbeNoopComment X -#else -# define VdbeComment(X) -# define VdbeNoopComment(X) -#endif - -#endif - -/************** End of vdbe.h ************************************************/ -/************** Continuing where we left off in sqliteInt.h ******************/ -/************** Include pager.h in the middle of sqliteInt.h *****************/ -/************** Begin file pager.h *******************************************/ -/* -** 2001 September 15 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This header file defines the interface that the sqlite page cache -** subsystem. The page cache subsystem reads and writes a file a page -** at a time and provides a journal for rollback. -*/ - -#ifndef _PAGER_H_ -#define _PAGER_H_ - -/* -** Default maximum size for persistent journal files. A negative -** value means no limit. This value may be overridden using the -** sqlite3PagerJournalSizeLimit() API. See also "PRAGMA journal_size_limit". -*/ -#ifndef SQLITE_DEFAULT_JOURNAL_SIZE_LIMIT - #define SQLITE_DEFAULT_JOURNAL_SIZE_LIMIT -1 -#endif - -/* -** The type used to represent a page number. The first page in a file -** is called page 1. 0 is used to represent "not a page". -*/ -typedef u32 Pgno; - -/* -** Each open file is managed by a separate instance of the "Pager" structure. -*/ -typedef struct Pager Pager; - -/* -** Handle type for pages. -*/ -typedef struct PgHdr DbPage; - -/* -** Page number PAGER_MJ_PGNO is never used in an SQLite database (it is -** reserved for working around a windows/posix incompatibility). It is -** used in the journal to signify that the remainder of the journal file -** is devoted to storing a master journal name - there are no more pages to -** roll back. See comments for function writeMasterJournal() in pager.c -** for details. -*/ -#define PAGER_MJ_PGNO(x) ((Pgno)((PENDING_BYTE/((x)->pageSize))+1)) - -/* -** Allowed values for the flags parameter to sqlite3PagerOpen(). -** -** NOTE: These values must match the corresponding BTREE_ values in btree.h. -*/ -#define PAGER_OMIT_JOURNAL 0x0001 /* Do not use a rollback journal */ -#define PAGER_MEMORY 0x0002 /* In-memory database */ - -/* -** Valid values for the second argument to sqlite3PagerLockingMode(). -*/ -#define PAGER_LOCKINGMODE_QUERY -1 -#define PAGER_LOCKINGMODE_NORMAL 0 -#define PAGER_LOCKINGMODE_EXCLUSIVE 1 - -/* -** Numeric constants that encode the journalmode. -*/ -#define PAGER_JOURNALMODE_QUERY (-1) /* Query the value of journalmode */ -#define PAGER_JOURNALMODE_DELETE 0 /* Commit by deleting journal file */ -#define PAGER_JOURNALMODE_PERSIST 1 /* Commit by zeroing journal header */ -#define PAGER_JOURNALMODE_OFF 2 /* Journal omitted. */ -#define PAGER_JOURNALMODE_TRUNCATE 3 /* Commit by truncating journal */ -#define PAGER_JOURNALMODE_MEMORY 4 /* In-memory journal file */ -#define PAGER_JOURNALMODE_WAL 5 /* Use write-ahead logging */ - -/* -** The remainder of this file contains the declarations of the functions -** that make up the Pager sub-system API. See source code comments for -** a detailed description of each routine. -*/ - -/* Open and close a Pager connection. */ -SQLITE_PRIVATE int sqlite3PagerOpen( - sqlite3_vfs*, - Pager **ppPager, - const char*, - int, - int, - int, - void(*)(DbPage*) -); -SQLITE_PRIVATE int sqlite3PagerClose(Pager *pPager); -SQLITE_PRIVATE int sqlite3PagerReadFileheader(Pager*, int, unsigned char*); - -/* Functions used to configure a Pager object. */ -SQLITE_PRIVATE void sqlite3PagerSetBusyhandler(Pager*, int(*)(void *), void *); -SQLITE_PRIVATE int sqlite3PagerSetPagesize(Pager*, u32*, int); -SQLITE_PRIVATE int sqlite3PagerMaxPageCount(Pager*, int); -SQLITE_PRIVATE void sqlite3PagerSetCachesize(Pager*, int); -SQLITE_PRIVATE void sqlite3PagerShrink(Pager*); -SQLITE_PRIVATE void sqlite3PagerSetSafetyLevel(Pager*,int,int,int); -SQLITE_PRIVATE int sqlite3PagerLockingMode(Pager *, int); -SQLITE_PRIVATE int sqlite3PagerSetJournalMode(Pager *, int); -SQLITE_PRIVATE int sqlite3PagerGetJournalMode(Pager*); -SQLITE_PRIVATE int sqlite3PagerOkToChangeJournalMode(Pager*); -SQLITE_PRIVATE i64 sqlite3PagerJournalSizeLimit(Pager *, i64); -SQLITE_PRIVATE sqlite3_backup **sqlite3PagerBackupPtr(Pager*); - -/* Functions used to obtain and release page references. */ -SQLITE_PRIVATE int sqlite3PagerAcquire(Pager *pPager, Pgno pgno, DbPage **ppPage, int clrFlag); -#define sqlite3PagerGet(A,B,C) sqlite3PagerAcquire(A,B,C,0) -SQLITE_PRIVATE DbPage *sqlite3PagerLookup(Pager *pPager, Pgno pgno); -SQLITE_PRIVATE void sqlite3PagerRef(DbPage*); -SQLITE_PRIVATE void sqlite3PagerUnref(DbPage*); - -/* Operations on page references. */ -SQLITE_PRIVATE int sqlite3PagerWrite(DbPage*); -SQLITE_PRIVATE void sqlite3PagerDontWrite(DbPage*); -SQLITE_PRIVATE int sqlite3PagerMovepage(Pager*,DbPage*,Pgno,int); -SQLITE_PRIVATE int sqlite3PagerPageRefcount(DbPage*); -SQLITE_PRIVATE void *sqlite3PagerGetData(DbPage *); -SQLITE_PRIVATE void *sqlite3PagerGetExtra(DbPage *); - -/* Functions used to manage pager transactions and savepoints. */ -SQLITE_PRIVATE void sqlite3PagerPagecount(Pager*, int*); -SQLITE_PRIVATE int sqlite3PagerBegin(Pager*, int exFlag, int); -SQLITE_PRIVATE int sqlite3PagerCommitPhaseOne(Pager*,const char *zMaster, int); -SQLITE_PRIVATE int sqlite3PagerExclusiveLock(Pager*); -SQLITE_PRIVATE int sqlite3PagerSync(Pager *pPager); -SQLITE_PRIVATE int sqlite3PagerCommitPhaseTwo(Pager*); -SQLITE_PRIVATE int sqlite3PagerRollback(Pager*); -SQLITE_PRIVATE int sqlite3PagerOpenSavepoint(Pager *pPager, int n); -SQLITE_PRIVATE int sqlite3PagerSavepoint(Pager *pPager, int op, int iSavepoint); -SQLITE_PRIVATE int sqlite3PagerSharedLock(Pager *pPager); - -SQLITE_PRIVATE int sqlite3PagerCheckpoint(Pager *pPager, int, int*, int*); -SQLITE_PRIVATE int sqlite3PagerWalSupported(Pager *pPager); -SQLITE_PRIVATE int sqlite3PagerWalCallback(Pager *pPager); -SQLITE_PRIVATE int sqlite3PagerOpenWal(Pager *pPager, int *pisOpen); -SQLITE_PRIVATE int sqlite3PagerCloseWal(Pager *pPager); -#ifdef SQLITE_ENABLE_ZIPVFS -SQLITE_PRIVATE int sqlite3PagerWalFramesize(Pager *pPager); -#endif - -/* Functions used to query pager state and configuration. */ -SQLITE_PRIVATE u8 sqlite3PagerIsreadonly(Pager*); -SQLITE_PRIVATE int sqlite3PagerRefcount(Pager*); -SQLITE_PRIVATE int sqlite3PagerMemUsed(Pager*); -SQLITE_PRIVATE const char *sqlite3PagerFilename(Pager*, int); -SQLITE_PRIVATE const sqlite3_vfs *sqlite3PagerVfs(Pager*); -SQLITE_PRIVATE sqlite3_file *sqlite3PagerFile(Pager*); -SQLITE_PRIVATE const char *sqlite3PagerJournalname(Pager*); -SQLITE_PRIVATE int sqlite3PagerNosync(Pager*); -SQLITE_PRIVATE void *sqlite3PagerTempSpace(Pager*); -SQLITE_PRIVATE int sqlite3PagerIsMemdb(Pager*); -SQLITE_PRIVATE void sqlite3PagerCacheStat(Pager *, int, int, int *); -SQLITE_PRIVATE void sqlite3PagerClearCache(Pager *); - -/* Functions used to truncate the database file. */ -SQLITE_PRIVATE void sqlite3PagerTruncateImage(Pager*,Pgno); - -#if defined(SQLITE_HAS_CODEC) && !defined(SQLITE_OMIT_WAL) -SQLITE_PRIVATE void *sqlite3PagerCodec(DbPage *); -#endif - -/* Functions to support testing and debugging. */ -#if !defined(NDEBUG) || defined(SQLITE_TEST) -SQLITE_PRIVATE Pgno sqlite3PagerPagenumber(DbPage*); -SQLITE_PRIVATE int sqlite3PagerIswriteable(DbPage*); -#endif -#ifdef SQLITE_TEST -SQLITE_PRIVATE int *sqlite3PagerStats(Pager*); -SQLITE_PRIVATE void sqlite3PagerRefdump(Pager*); - void disable_simulated_io_errors(void); - void enable_simulated_io_errors(void); -#else -# define disable_simulated_io_errors() -# define enable_simulated_io_errors() -#endif - -#endif /* _PAGER_H_ */ - -/************** End of pager.h ***********************************************/ -/************** Continuing where we left off in sqliteInt.h ******************/ -/************** Include pcache.h in the middle of sqliteInt.h ****************/ -/************** Begin file pcache.h ******************************************/ -/* -** 2008 August 05 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This header file defines the interface that the sqlite page cache -** subsystem. -*/ - -#ifndef _PCACHE_H_ - -typedef struct PgHdr PgHdr; -typedef struct PCache PCache; - -/* -** Every page in the cache is controlled by an instance of the following -** structure. -*/ -struct PgHdr { - sqlite3_pcache_page *pPage; /* Pcache object page handle */ - void *pData; /* Page data */ - void *pExtra; /* Extra content */ - PgHdr *pDirty; /* Transient list of dirty pages */ - Pager *pPager; /* The pager this page is part of */ - Pgno pgno; /* Page number for this page */ -#ifdef SQLITE_CHECK_PAGES - u32 pageHash; /* Hash of page content */ -#endif - u16 flags; /* PGHDR flags defined below */ - - /********************************************************************** - ** Elements above are public. All that follows is private to pcache.c - ** and should not be accessed by other modules. - */ - i16 nRef; /* Number of users of this page */ - PCache *pCache; /* Cache that owns this page */ - - PgHdr *pDirtyNext; /* Next element in list of dirty pages */ - PgHdr *pDirtyPrev; /* Previous element in list of dirty pages */ -}; - -/* Bit values for PgHdr.flags */ -#define PGHDR_DIRTY 0x002 /* Page has changed */ -#define PGHDR_NEED_SYNC 0x004 /* Fsync the rollback journal before - ** writing this page to the database */ -#define PGHDR_NEED_READ 0x008 /* Content is unread */ -#define PGHDR_REUSE_UNLIKELY 0x010 /* A hint that reuse is unlikely */ -#define PGHDR_DONT_WRITE 0x020 /* Do not write content to disk */ - -/* Initialize and shutdown the page cache subsystem */ -SQLITE_PRIVATE int sqlite3PcacheInitialize(void); -SQLITE_PRIVATE void sqlite3PcacheShutdown(void); - -/* Page cache buffer management: -** These routines implement SQLITE_CONFIG_PAGECACHE. -*/ -SQLITE_PRIVATE void sqlite3PCacheBufferSetup(void *, int sz, int n); - -/* Create a new pager cache. -** Under memory stress, invoke xStress to try to make pages clean. -** Only clean and unpinned pages can be reclaimed. -*/ -SQLITE_PRIVATE void sqlite3PcacheOpen( - int szPage, /* Size of every page */ - int szExtra, /* Extra space associated with each page */ - int bPurgeable, /* True if pages are on backing store */ - int (*xStress)(void*, PgHdr*), /* Call to try to make pages clean */ - void *pStress, /* Argument to xStress */ - PCache *pToInit /* Preallocated space for the PCache */ -); - -/* Modify the page-size after the cache has been created. */ -SQLITE_PRIVATE void sqlite3PcacheSetPageSize(PCache *, int); - -/* Return the size in bytes of a PCache object. Used to preallocate -** storage space. -*/ -SQLITE_PRIVATE int sqlite3PcacheSize(void); - -/* One release per successful fetch. Page is pinned until released. -** Reference counted. -*/ -SQLITE_PRIVATE int sqlite3PcacheFetch(PCache*, Pgno, int createFlag, PgHdr**); -SQLITE_PRIVATE void sqlite3PcacheRelease(PgHdr*); - -SQLITE_PRIVATE void sqlite3PcacheDrop(PgHdr*); /* Remove page from cache */ -SQLITE_PRIVATE void sqlite3PcacheMakeDirty(PgHdr*); /* Make sure page is marked dirty */ -SQLITE_PRIVATE void sqlite3PcacheMakeClean(PgHdr*); /* Mark a single page as clean */ -SQLITE_PRIVATE void sqlite3PcacheCleanAll(PCache*); /* Mark all dirty list pages as clean */ - -/* Change a page number. Used by incr-vacuum. */ -SQLITE_PRIVATE void sqlite3PcacheMove(PgHdr*, Pgno); - -/* Remove all pages with pgno>x. Reset the cache if x==0 */ -SQLITE_PRIVATE void sqlite3PcacheTruncate(PCache*, Pgno x); - -/* Get a list of all dirty pages in the cache, sorted by page number */ -SQLITE_PRIVATE PgHdr *sqlite3PcacheDirtyList(PCache*); - -/* Reset and close the cache object */ -SQLITE_PRIVATE void sqlite3PcacheClose(PCache*); - -/* Clear flags from pages of the page cache */ -SQLITE_PRIVATE void sqlite3PcacheClearSyncFlags(PCache *); - -/* Discard the contents of the cache */ -SQLITE_PRIVATE void sqlite3PcacheClear(PCache*); - -/* Return the total number of outstanding page references */ -SQLITE_PRIVATE int sqlite3PcacheRefCount(PCache*); - -/* Increment the reference count of an existing page */ -SQLITE_PRIVATE void sqlite3PcacheRef(PgHdr*); - -SQLITE_PRIVATE int sqlite3PcachePageRefcount(PgHdr*); - -/* Return the total number of pages stored in the cache */ -SQLITE_PRIVATE int sqlite3PcachePagecount(PCache*); - -#if defined(SQLITE_CHECK_PAGES) || defined(SQLITE_DEBUG) -/* Iterate through all dirty pages currently stored in the cache. This -** interface is only available if SQLITE_CHECK_PAGES is defined when the -** library is built. -*/ -SQLITE_PRIVATE void sqlite3PcacheIterateDirty(PCache *pCache, void (*xIter)(PgHdr *)); -#endif - -/* Set and get the suggested cache-size for the specified pager-cache. -** -** If no global maximum is configured, then the system attempts to limit -** the total number of pages cached by purgeable pager-caches to the sum -** of the suggested cache-sizes. -*/ -SQLITE_PRIVATE void sqlite3PcacheSetCachesize(PCache *, int); -#ifdef SQLITE_TEST -SQLITE_PRIVATE int sqlite3PcacheGetCachesize(PCache *); -#endif - -/* Free up as much memory as possible from the page cache */ -SQLITE_PRIVATE void sqlite3PcacheShrink(PCache*); - -#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT -/* Try to return memory used by the pcache module to the main memory heap */ -SQLITE_PRIVATE int sqlite3PcacheReleaseMemory(int); -#endif - -#ifdef SQLITE_TEST -SQLITE_PRIVATE void sqlite3PcacheStats(int*,int*,int*,int*); -#endif - -SQLITE_PRIVATE void sqlite3PCacheSetDefault(void); - -#endif /* _PCACHE_H_ */ - -/************** End of pcache.h **********************************************/ -/************** Continuing where we left off in sqliteInt.h ******************/ - -/************** Include os.h in the middle of sqliteInt.h ********************/ -/************** Begin file os.h **********************************************/ -/* -** 2001 September 16 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** This header file (together with is companion C source-code file -** "os.c") attempt to abstract the underlying operating system so that -** the SQLite library will work on both POSIX and windows systems. -** -** This header file is #include-ed by sqliteInt.h and thus ends up -** being included by every source file. -*/ -#ifndef _SQLITE_OS_H_ -#define _SQLITE_OS_H_ - -/* -** Figure out if we are dealing with Unix, Windows, or some other -** operating system. After the following block of preprocess macros, -** all of SQLITE_OS_UNIX, SQLITE_OS_WIN, SQLITE_OS_OS2, and SQLITE_OS_OTHER -** will defined to either 1 or 0. One of the four will be 1. The other -** three will be 0. -*/ -#if defined(SQLITE_OS_OTHER) -# if SQLITE_OS_OTHER==1 -# undef SQLITE_OS_UNIX -# define SQLITE_OS_UNIX 0 -# undef SQLITE_OS_WIN -# define SQLITE_OS_WIN 0 -# undef SQLITE_OS_OS2 -# define SQLITE_OS_OS2 0 -# else -# undef SQLITE_OS_OTHER -# endif -#endif -#if !defined(SQLITE_OS_UNIX) && !defined(SQLITE_OS_OTHER) -# define SQLITE_OS_OTHER 0 -# ifndef SQLITE_OS_WIN -# if defined(_WIN32) || defined(WIN32) || defined(__CYGWIN__) || defined(__MINGW32__) || defined(__BORLANDC__) -# define SQLITE_OS_WIN 1 -# define SQLITE_OS_UNIX 0 -# define SQLITE_OS_OS2 0 -# elif defined(__EMX__) || defined(_OS2) || defined(OS2) || defined(_OS2_) || defined(__OS2__) -# define SQLITE_OS_WIN 0 -# define SQLITE_OS_UNIX 0 -# define SQLITE_OS_OS2 1 -# else -# define SQLITE_OS_WIN 0 -# define SQLITE_OS_UNIX 1 -# define SQLITE_OS_OS2 0 -# endif -# else -# define SQLITE_OS_UNIX 0 -# define SQLITE_OS_OS2 0 -# endif -#else -# ifndef SQLITE_OS_WIN -# define SQLITE_OS_WIN 0 -# endif -#endif - -#if SQLITE_OS_WIN -# include -#endif - -#if SQLITE_OS_OS2 -# if (__GNUC__ > 3 || __GNUC__ == 3 && __GNUC_MINOR__ >= 3) && defined(OS2_HIGH_MEMORY) -# include /* has to be included before os2.h for linking to work */ -# endif -# define INCL_DOSDATETIME -# define INCL_DOSFILEMGR -# define INCL_DOSERRORS -# define INCL_DOSMISC -# define INCL_DOSPROCESS -# define INCL_DOSMODULEMGR -# define INCL_DOSSEMAPHORES -# include -# include -#endif - -/* -** Determine if we are dealing with Windows NT. -** -** We ought to be able to determine if we are compiling for win98 or winNT -** using the _WIN32_WINNT macro as follows: -** -** #if defined(_WIN32_WINNT) -** # define SQLITE_OS_WINNT 1 -** #else -** # define SQLITE_OS_WINNT 0 -** #endif -** -** However, vs2005 does not set _WIN32_WINNT by default, as it ought to, -** so the above test does not work. We'll just assume that everything is -** winNT unless the programmer explicitly says otherwise by setting -** SQLITE_OS_WINNT to 0. -*/ -#if SQLITE_OS_WIN && !defined(SQLITE_OS_WINNT) -# define SQLITE_OS_WINNT 1 -#endif - -/* -** Determine if we are dealing with WindowsCE - which has a much -** reduced API. -*/ -#if defined(_WIN32_WCE) -# define SQLITE_OS_WINCE 1 -#else -# define SQLITE_OS_WINCE 0 -#endif - -/* -** Determine if we are dealing with WindowsRT (Metro) as this has a different and -** incompatible API from win32. -*/ -#if !defined(SQLITE_OS_WINRT) -# define SQLITE_OS_WINRT 0 -#endif - -/* -** When compiled for WinCE or WinRT, there is no concept of the current -** directory. - */ -#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT -# define SQLITE_CURDIR 1 -#endif - -/* If the SET_FULLSYNC macro is not defined above, then make it -** a no-op -*/ -#ifndef SET_FULLSYNC -# define SET_FULLSYNC(x,y) -#endif - -/* -** The default size of a disk sector -*/ -#ifndef SQLITE_DEFAULT_SECTOR_SIZE -# define SQLITE_DEFAULT_SECTOR_SIZE 4096 -#endif - -/* -** Temporary files are named starting with this prefix followed by 16 random -** alphanumeric characters, and no file extension. They are stored in the -** OS's standard temporary file directory, and are deleted prior to exit. -** If sqlite is being embedded in another program, you may wish to change the -** prefix to reflect your program's name, so that if your program exits -** prematurely, old temporary files can be easily identified. This can be done -** using -DSQLITE_TEMP_FILE_PREFIX=myprefix_ on the compiler command line. -** -** 2006-10-31: The default prefix used to be "sqlite_". But then -** Mcafee started using SQLite in their anti-virus product and it -** started putting files with the "sqlite" name in the c:/temp folder. -** This annoyed many windows users. Those users would then do a -** Google search for "sqlite", find the telephone numbers of the -** developers and call to wake them up at night and complain. -** For this reason, the default name prefix is changed to be "sqlite" -** spelled backwards. So the temp files are still identified, but -** anybody smart enough to figure out the code is also likely smart -** enough to know that calling the developer will not help get rid -** of the file. -*/ -#ifndef SQLITE_TEMP_FILE_PREFIX -# define SQLITE_TEMP_FILE_PREFIX "etilqs_" -#endif - -/* -** The following values may be passed as the second argument to -** sqlite3OsLock(). The various locks exhibit the following semantics: -** -** SHARED: Any number of processes may hold a SHARED lock simultaneously. -** RESERVED: A single process may hold a RESERVED lock on a file at -** any time. Other processes may hold and obtain new SHARED locks. -** PENDING: A single process may hold a PENDING lock on a file at -** any one time. Existing SHARED locks may persist, but no new -** SHARED locks may be obtained by other processes. -** EXCLUSIVE: An EXCLUSIVE lock precludes all other locks. -** -** PENDING_LOCK may not be passed directly to sqlite3OsLock(). Instead, a -** process that requests an EXCLUSIVE lock may actually obtain a PENDING -** lock. This can be upgraded to an EXCLUSIVE lock by a subsequent call to -** sqlite3OsLock(). -*/ -#define NO_LOCK 0 -#define SHARED_LOCK 1 -#define RESERVED_LOCK 2 -#define PENDING_LOCK 3 -#define EXCLUSIVE_LOCK 4 - -/* -** File Locking Notes: (Mostly about windows but also some info for Unix) -** -** We cannot use LockFileEx() or UnlockFileEx() on Win95/98/ME because -** those functions are not available. So we use only LockFile() and -** UnlockFile(). -** -** LockFile() prevents not just writing but also reading by other processes. -** A SHARED_LOCK is obtained by locking a single randomly-chosen -** byte out of a specific range of bytes. The lock byte is obtained at -** random so two separate readers can probably access the file at the -** same time, unless they are unlucky and choose the same lock byte. -** An EXCLUSIVE_LOCK is obtained by locking all bytes in the range. -** There can only be one writer. A RESERVED_LOCK is obtained by locking -** a single byte of the file that is designated as the reserved lock byte. -** A PENDING_LOCK is obtained by locking a designated byte different from -** the RESERVED_LOCK byte. -** -** On WinNT/2K/XP systems, LockFileEx() and UnlockFileEx() are available, -** which means we can use reader/writer locks. When reader/writer locks -** are used, the lock is placed on the same range of bytes that is used -** for probabilistic locking in Win95/98/ME. Hence, the locking scheme -** will support two or more Win95 readers or two or more WinNT readers. -** But a single Win95 reader will lock out all WinNT readers and a single -** WinNT reader will lock out all other Win95 readers. -** -** The following #defines specify the range of bytes used for locking. -** SHARED_SIZE is the number of bytes available in the pool from which -** a random byte is selected for a shared lock. The pool of bytes for -** shared locks begins at SHARED_FIRST. -** -** The same locking strategy and -** byte ranges are used for Unix. This leaves open the possiblity of having -** clients on win95, winNT, and unix all talking to the same shared file -** and all locking correctly. To do so would require that samba (or whatever -** tool is being used for file sharing) implements locks correctly between -** windows and unix. I'm guessing that isn't likely to happen, but by -** using the same locking range we are at least open to the possibility. -** -** Locking in windows is manditory. For this reason, we cannot store -** actual data in the bytes used for locking. The pager never allocates -** the pages involved in locking therefore. SHARED_SIZE is selected so -** that all locks will fit on a single page even at the minimum page size. -** PENDING_BYTE defines the beginning of the locks. By default PENDING_BYTE -** is set high so that we don't have to allocate an unused page except -** for very large databases. But one should test the page skipping logic -** by setting PENDING_BYTE low and running the entire regression suite. -** -** Changing the value of PENDING_BYTE results in a subtly incompatible -** file format. Depending on how it is changed, you might not notice -** the incompatibility right away, even running a full regression test. -** The default location of PENDING_BYTE is the first byte past the -** 1GB boundary. -** -*/ -#ifdef SQLITE_OMIT_WSD -# define PENDING_BYTE (0x40000000) -#else -# define PENDING_BYTE sqlite3PendingByte -#endif -#define RESERVED_BYTE (PENDING_BYTE+1) -#define SHARED_FIRST (PENDING_BYTE+2) -#define SHARED_SIZE 510 - -/* -** Wrapper around OS specific sqlite3_os_init() function. -*/ -SQLITE_PRIVATE int sqlite3OsInit(void); - -/* -** Functions for accessing sqlite3_file methods -*/ -SQLITE_PRIVATE int sqlite3OsClose(sqlite3_file*); -SQLITE_PRIVATE int sqlite3OsRead(sqlite3_file*, void*, int amt, i64 offset); -SQLITE_PRIVATE int sqlite3OsWrite(sqlite3_file*, const void*, int amt, i64 offset); -SQLITE_PRIVATE int sqlite3OsTruncate(sqlite3_file*, i64 size); -SQLITE_PRIVATE int sqlite3OsSync(sqlite3_file*, int); -SQLITE_PRIVATE int sqlite3OsFileSize(sqlite3_file*, i64 *pSize); -SQLITE_PRIVATE int sqlite3OsLock(sqlite3_file*, int); -SQLITE_PRIVATE int sqlite3OsUnlock(sqlite3_file*, int); -SQLITE_PRIVATE int sqlite3OsCheckReservedLock(sqlite3_file *id, int *pResOut); -SQLITE_PRIVATE int sqlite3OsFileControl(sqlite3_file*,int,void*); -SQLITE_PRIVATE void sqlite3OsFileControlHint(sqlite3_file*,int,void*); -#define SQLITE_FCNTL_DB_UNCHANGED 0xca093fa0 -SQLITE_PRIVATE int sqlite3OsSectorSize(sqlite3_file *id); -SQLITE_PRIVATE int sqlite3OsDeviceCharacteristics(sqlite3_file *id); -SQLITE_PRIVATE int sqlite3OsShmMap(sqlite3_file *,int,int,int,void volatile **); -SQLITE_PRIVATE int sqlite3OsShmLock(sqlite3_file *id, int, int, int); -SQLITE_PRIVATE void sqlite3OsShmBarrier(sqlite3_file *id); -SQLITE_PRIVATE int sqlite3OsShmUnmap(sqlite3_file *id, int); - - -/* -** Functions for accessing sqlite3_vfs methods -*/ -SQLITE_PRIVATE int sqlite3OsOpen(sqlite3_vfs *, const char *, sqlite3_file*, int, int *); -SQLITE_PRIVATE int sqlite3OsDelete(sqlite3_vfs *, const char *, int); -SQLITE_PRIVATE int sqlite3OsAccess(sqlite3_vfs *, const char *, int, int *pResOut); -SQLITE_PRIVATE int sqlite3OsFullPathname(sqlite3_vfs *, const char *, int, char *); -#ifndef SQLITE_OMIT_LOAD_EXTENSION -SQLITE_PRIVATE void *sqlite3OsDlOpen(sqlite3_vfs *, const char *); -SQLITE_PRIVATE void sqlite3OsDlError(sqlite3_vfs *, int, char *); -SQLITE_PRIVATE void (*sqlite3OsDlSym(sqlite3_vfs *, void *, const char *))(void); -SQLITE_PRIVATE void sqlite3OsDlClose(sqlite3_vfs *, void *); -#endif /* SQLITE_OMIT_LOAD_EXTENSION */ -SQLITE_PRIVATE int sqlite3OsRandomness(sqlite3_vfs *, int, char *); -SQLITE_PRIVATE int sqlite3OsSleep(sqlite3_vfs *, int); -SQLITE_PRIVATE int sqlite3OsCurrentTimeInt64(sqlite3_vfs *, sqlite3_int64*); - -/* -** Convenience functions for opening and closing files using -** sqlite3_malloc() to obtain space for the file-handle structure. -*/ -SQLITE_PRIVATE int sqlite3OsOpenMalloc(sqlite3_vfs *, const char *, sqlite3_file **, int,int*); -SQLITE_PRIVATE int sqlite3OsCloseFree(sqlite3_file *); - -#endif /* _SQLITE_OS_H_ */ - -/************** End of os.h **************************************************/ -/************** Continuing where we left off in sqliteInt.h ******************/ -/************** Include mutex.h in the middle of sqliteInt.h *****************/ -/************** Begin file mutex.h *******************************************/ -/* -** 2007 August 28 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** This file contains the common header for all mutex implementations. -** The sqliteInt.h header #includes this file so that it is available -** to all source files. We break it out in an effort to keep the code -** better organized. -** -** NOTE: source files should *not* #include this header file directly. -** Source files should #include the sqliteInt.h file and let that file -** include this one indirectly. -*/ - - -/* -** Figure out what version of the code to use. The choices are -** -** SQLITE_MUTEX_OMIT No mutex logic. Not even stubs. The -** mutexes implemention cannot be overridden -** at start-time. -** -** SQLITE_MUTEX_NOOP For single-threaded applications. No -** mutual exclusion is provided. But this -** implementation can be overridden at -** start-time. -** -** SQLITE_MUTEX_PTHREADS For multi-threaded applications on Unix. -** -** SQLITE_MUTEX_W32 For multi-threaded applications on Win32. -** -** SQLITE_MUTEX_OS2 For multi-threaded applications on OS/2. -*/ -#if !SQLITE_THREADSAFE -# define SQLITE_MUTEX_OMIT -#endif -#if SQLITE_THREADSAFE && !defined(SQLITE_MUTEX_NOOP) -# if SQLITE_OS_UNIX -# define SQLITE_MUTEX_PTHREADS -# elif SQLITE_OS_WIN -# define SQLITE_MUTEX_W32 -# elif SQLITE_OS_OS2 -# define SQLITE_MUTEX_OS2 -# else -# define SQLITE_MUTEX_NOOP -# endif -#endif - -#ifdef SQLITE_MUTEX_OMIT -/* -** If this is a no-op implementation, implement everything as macros. -*/ -#define sqlite3_mutex_alloc(X) ((sqlite3_mutex*)8) -#define sqlite3_mutex_free(X) -#define sqlite3_mutex_enter(X) -#define sqlite3_mutex_try(X) SQLITE_OK -#define sqlite3_mutex_leave(X) -#define sqlite3_mutex_held(X) ((void)(X),1) -#define sqlite3_mutex_notheld(X) ((void)(X),1) -#define sqlite3MutexAlloc(X) ((sqlite3_mutex*)8) -#define sqlite3MutexInit() SQLITE_OK -#define sqlite3MutexEnd() -#define MUTEX_LOGIC(X) -#else -#define MUTEX_LOGIC(X) X -#endif /* defined(SQLITE_MUTEX_OMIT) */ - -/************** End of mutex.h ***********************************************/ -/************** Continuing where we left off in sqliteInt.h ******************/ - - -/* -** Each database file to be accessed by the system is an instance -** of the following structure. There are normally two of these structures -** in the sqlite.aDb[] array. aDb[0] is the main database file and -** aDb[1] is the database file used to hold temporary tables. Additional -** databases may be attached. -*/ -struct Db { - char *zName; /* Name of this database */ - Btree *pBt; /* The B*Tree structure for this database file */ - u8 inTrans; /* 0: not writable. 1: Transaction. 2: Checkpoint */ - u8 safety_level; /* How aggressive at syncing data to disk */ - Schema *pSchema; /* Pointer to database schema (possibly shared) */ -}; - -/* -** An instance of the following structure stores a database schema. -** -** Most Schema objects are associated with a Btree. The exception is -** the Schema for the TEMP databaes (sqlite3.aDb[1]) which is free-standing. -** In shared cache mode, a single Schema object can be shared by multiple -** Btrees that refer to the same underlying BtShared object. -** -** Schema objects are automatically deallocated when the last Btree that -** references them is destroyed. The TEMP Schema is manually freed by -** sqlite3_close(). -* -** A thread must be holding a mutex on the corresponding Btree in order -** to access Schema content. This implies that the thread must also be -** holding a mutex on the sqlite3 connection pointer that owns the Btree. -** For a TEMP Schema, only the connection mutex is required. -*/ -struct Schema { - int schema_cookie; /* Database schema version number for this file */ - int iGeneration; /* Generation counter. Incremented with each change */ - Hash tblHash; /* All tables indexed by name */ - Hash idxHash; /* All (named) indices indexed by name */ - Hash trigHash; /* All triggers indexed by name */ - Hash fkeyHash; /* All foreign keys by referenced table name */ - Table *pSeqTab; /* The sqlite_sequence table used by AUTOINCREMENT */ - u8 file_format; /* Schema format version for this file */ - u8 enc; /* Text encoding used by this database */ - u16 flags; /* Flags associated with this schema */ - int cache_size; /* Number of pages to use in the cache */ -}; - -/* -** These macros can be used to test, set, or clear bits in the -** Db.pSchema->flags field. -*/ -#define DbHasProperty(D,I,P) (((D)->aDb[I].pSchema->flags&(P))==(P)) -#define DbHasAnyProperty(D,I,P) (((D)->aDb[I].pSchema->flags&(P))!=0) -#define DbSetProperty(D,I,P) (D)->aDb[I].pSchema->flags|=(P) -#define DbClearProperty(D,I,P) (D)->aDb[I].pSchema->flags&=~(P) - -/* -** Allowed values for the DB.pSchema->flags field. -** -** The DB_SchemaLoaded flag is set after the database schema has been -** read into internal hash tables. -** -** DB_UnresetViews means that one or more views have column names that -** have been filled out. If the schema changes, these column names might -** changes and so the view will need to be reset. -*/ -#define DB_SchemaLoaded 0x0001 /* The schema has been loaded */ -#define DB_UnresetViews 0x0002 /* Some views have defined column names */ -#define DB_Empty 0x0004 /* The file is empty (length 0 bytes) */ - -/* -** The number of different kinds of things that can be limited -** using the sqlite3_limit() interface. -*/ -#define SQLITE_N_LIMIT (SQLITE_LIMIT_TRIGGER_DEPTH+1) - -/* -** Lookaside malloc is a set of fixed-size buffers that can be used -** to satisfy small transient memory allocation requests for objects -** associated with a particular database connection. The use of -** lookaside malloc provides a significant performance enhancement -** (approx 10%) by avoiding numerous malloc/free requests while parsing -** SQL statements. -** -** The Lookaside structure holds configuration information about the -** lookaside malloc subsystem. Each available memory allocation in -** the lookaside subsystem is stored on a linked list of LookasideSlot -** objects. -** -** Lookaside allocations are only allowed for objects that are associated -** with a particular database connection. Hence, schema information cannot -** be stored in lookaside because in shared cache mode the schema information -** is shared by multiple database connections. Therefore, while parsing -** schema information, the Lookaside.bEnabled flag is cleared so that -** lookaside allocations are not used to construct the schema objects. -*/ -struct Lookaside { - u16 sz; /* Size of each buffer in bytes */ - u8 bEnabled; /* False to disable new lookaside allocations */ - u8 bMalloced; /* True if pStart obtained from sqlite3_malloc() */ - int nOut; /* Number of buffers currently checked out */ - int mxOut; /* Highwater mark for nOut */ - int anStat[3]; /* 0: hits. 1: size misses. 2: full misses */ - LookasideSlot *pFree; /* List of available buffers */ - void *pStart; /* First byte of available memory space */ - void *pEnd; /* First byte past end of available space */ -}; -struct LookasideSlot { - LookasideSlot *pNext; /* Next buffer in the list of free buffers */ -}; - -/* -** A hash table for function definitions. -** -** Hash each FuncDef structure into one of the FuncDefHash.a[] slots. -** Collisions are on the FuncDef.pHash chain. -*/ -struct FuncDefHash { - FuncDef *a[23]; /* Hash table for functions */ -}; - -/* -** Each database connection is an instance of the following structure. -*/ -struct sqlite3 { - sqlite3_vfs *pVfs; /* OS Interface */ - struct Vdbe *pVdbe; /* List of active virtual machines */ - CollSeq *pDfltColl; /* The default collating sequence (BINARY) */ - sqlite3_mutex *mutex; /* Connection mutex */ - Db *aDb; /* All backends */ - int nDb; /* Number of backends currently in use */ - int flags; /* Miscellaneous flags. See below */ - i64 lastRowid; /* ROWID of most recent insert (see above) */ - unsigned int openFlags; /* Flags passed to sqlite3_vfs.xOpen() */ - int errCode; /* Most recent error code (SQLITE_*) */ - int errMask; /* & result codes with this before returning */ - u8 autoCommit; /* The auto-commit flag. */ - u8 temp_store; /* 1: file 2: memory 0: default */ - u8 mallocFailed; /* True if we have seen a malloc failure */ - u8 dfltLockMode; /* Default locking-mode for attached dbs */ - signed char nextAutovac; /* Autovac setting after VACUUM if >=0 */ - u8 suppressErr; /* Do not issue error messages if true */ - u8 vtabOnConflict; /* Value to return for s3_vtab_on_conflict() */ - u8 isTransactionSavepoint; /* True if the outermost savepoint is a TS */ - int nextPagesize; /* Pagesize after VACUUM if >0 */ - u32 magic; /* Magic number for detect library misuse */ - int nChange; /* Value returned by sqlite3_changes() */ - int nTotalChange; /* Value returned by sqlite3_total_changes() */ - int aLimit[SQLITE_N_LIMIT]; /* Limits */ - struct sqlite3InitInfo { /* Information used during initialization */ - int newTnum; /* Rootpage of table being initialized */ - u8 iDb; /* Which db file is being initialized */ - u8 busy; /* TRUE if currently initializing */ - u8 orphanTrigger; /* Last statement is orphaned TEMP trigger */ - } init; - int activeVdbeCnt; /* Number of VDBEs currently executing */ - int writeVdbeCnt; /* Number of active VDBEs that are writing */ - int vdbeExecCnt; /* Number of nested calls to VdbeExec() */ - int nExtension; /* Number of loaded extensions */ - void **aExtension; /* Array of shared library handles */ - void (*xTrace)(void*,const char*); /* Trace function */ - void *pTraceArg; /* Argument to the trace function */ - void (*xProfile)(void*,const char*,u64); /* Profiling function */ - void *pProfileArg; /* Argument to profile function */ - void *pCommitArg; /* Argument to xCommitCallback() */ - int (*xCommitCallback)(void*); /* Invoked at every commit. */ - void *pRollbackArg; /* Argument to xRollbackCallback() */ - void (*xRollbackCallback)(void*); /* Invoked at every commit. */ - void *pUpdateArg; - void (*xUpdateCallback)(void*,int, const char*,const char*,sqlite_int64); -#ifndef SQLITE_OMIT_WAL - int (*xWalCallback)(void *, sqlite3 *, const char *, int); - void *pWalArg; -#endif - void(*xCollNeeded)(void*,sqlite3*,int eTextRep,const char*); - void(*xCollNeeded16)(void*,sqlite3*,int eTextRep,const void*); - void *pCollNeededArg; - sqlite3_value *pErr; /* Most recent error message */ - char *zErrMsg; /* Most recent error message (UTF-8 encoded) */ - char *zErrMsg16; /* Most recent error message (UTF-16 encoded) */ - union { - volatile int isInterrupted; /* True if sqlite3_interrupt has been called */ - double notUsed1; /* Spacer */ - } u1; - Lookaside lookaside; /* Lookaside malloc configuration */ -#ifndef SQLITE_OMIT_AUTHORIZATION - int (*xAuth)(void*,int,const char*,const char*,const char*,const char*); - /* Access authorization function */ - void *pAuthArg; /* 1st argument to the access auth function */ -#endif -#ifndef SQLITE_OMIT_PROGRESS_CALLBACK - int (*xProgress)(void *); /* The progress callback */ - void *pProgressArg; /* Argument to the progress callback */ - int nProgressOps; /* Number of opcodes for progress callback */ -#endif -#ifndef SQLITE_OMIT_VIRTUALTABLE - int nVTrans; /* Allocated size of aVTrans */ - Hash aModule; /* populated by sqlite3_create_module() */ - VtabCtx *pVtabCtx; /* Context for active vtab connect/create */ - VTable **aVTrans; /* Virtual tables with open transactions */ - VTable *pDisconnect; /* Disconnect these in next sqlite3_prepare() */ -#endif - FuncDefHash aFunc; /* Hash table of connection functions */ - Hash aCollSeq; /* All collating sequences */ - BusyHandler busyHandler; /* Busy callback */ - Db aDbStatic[2]; /* Static space for the 2 default backends */ - Savepoint *pSavepoint; /* List of active savepoints */ - int busyTimeout; /* Busy handler timeout, in msec */ - int nSavepoint; /* Number of non-transaction savepoints */ - int nStatement; /* Number of nested statement-transactions */ - i64 nDeferredCons; /* Net deferred constraints this transaction. */ - int *pnBytesFreed; /* If not NULL, increment this in DbFree() */ - -#ifdef SQLITE_ENABLE_UNLOCK_NOTIFY - /* The following variables are all protected by the STATIC_MASTER - ** mutex, not by sqlite3.mutex. They are used by code in notify.c. - ** - ** When X.pUnlockConnection==Y, that means that X is waiting for Y to - ** unlock so that it can proceed. - ** - ** When X.pBlockingConnection==Y, that means that something that X tried - ** tried to do recently failed with an SQLITE_LOCKED error due to locks - ** held by Y. - */ - sqlite3 *pBlockingConnection; /* Connection that caused SQLITE_LOCKED */ - sqlite3 *pUnlockConnection; /* Connection to watch for unlock */ - void *pUnlockArg; /* Argument to xUnlockNotify */ - void (*xUnlockNotify)(void **, int); /* Unlock notify callback */ - sqlite3 *pNextBlocked; /* Next in list of all blocked connections */ -#endif -}; - -/* -** A macro to discover the encoding of a database. -*/ -#define ENC(db) ((db)->aDb[0].pSchema->enc) - -/* -** Possible values for the sqlite3.flags. -*/ -#define SQLITE_VdbeTrace 0x00000100 /* True to trace VDBE execution */ -#define SQLITE_InternChanges 0x00000200 /* Uncommitted Hash table changes */ -#define SQLITE_FullColNames 0x00000400 /* Show full column names on SELECT */ -#define SQLITE_ShortColNames 0x00000800 /* Show short columns names */ -#define SQLITE_CountRows 0x00001000 /* Count rows changed by INSERT, */ - /* DELETE, or UPDATE and return */ - /* the count using a callback. */ -#define SQLITE_NullCallback 0x00002000 /* Invoke the callback once if the */ - /* result set is empty */ -#define SQLITE_SqlTrace 0x00004000 /* Debug print SQL as it executes */ -#define SQLITE_VdbeListing 0x00008000 /* Debug listings of VDBE programs */ -#define SQLITE_WriteSchema 0x00010000 /* OK to update SQLITE_MASTER */ - /* 0x00020000 Unused */ -#define SQLITE_IgnoreChecks 0x00040000 /* Do not enforce check constraints */ -#define SQLITE_ReadUncommitted 0x0080000 /* For shared-cache mode */ -#define SQLITE_LegacyFileFmt 0x00100000 /* Create new databases in format 1 */ -#define SQLITE_FullFSync 0x00200000 /* Use full fsync on the backend */ -#define SQLITE_CkptFullFSync 0x00400000 /* Use full fsync for checkpoint */ -#define SQLITE_RecoveryMode 0x00800000 /* Ignore schema errors */ -#define SQLITE_ReverseOrder 0x01000000 /* Reverse unordered SELECTs */ -#define SQLITE_RecTriggers 0x02000000 /* Enable recursive triggers */ -#define SQLITE_ForeignKeys 0x04000000 /* Enforce foreign key constraints */ -#define SQLITE_AutoIndex 0x08000000 /* Enable automatic indexes */ -#define SQLITE_PreferBuiltin 0x10000000 /* Preference to built-in funcs */ -#define SQLITE_LoadExtension 0x20000000 /* Enable load_extension */ -#define SQLITE_EnableTrigger 0x40000000 /* True to enable triggers */ - -/* -** Bits of the sqlite3.flags field that are used by the -** sqlite3_test_control(SQLITE_TESTCTRL_OPTIMIZATIONS,...) interface. -** These must be the low-order bits of the flags field. -*/ -#define SQLITE_QueryFlattener 0x01 /* Disable query flattening */ -#define SQLITE_ColumnCache 0x02 /* Disable the column cache */ -#define SQLITE_IndexSort 0x04 /* Disable indexes for sorting */ -#define SQLITE_IndexSearch 0x08 /* Disable indexes for searching */ -#define SQLITE_IndexCover 0x10 /* Disable index covering table */ -#define SQLITE_GroupByOrder 0x20 /* Disable GROUPBY cover of ORDERBY */ -#define SQLITE_FactorOutConst 0x40 /* Disable factoring out constants */ -#define SQLITE_IdxRealAsInt 0x80 /* Store REAL as INT in indices */ -#define SQLITE_DistinctOpt 0x80 /* DISTINCT using indexes */ -#define SQLITE_OptMask 0xff /* Mask of all disablable opts */ - -/* -** Possible values for the sqlite.magic field. -** The numbers are obtained at random and have no special meaning, other -** than being distinct from one another. -*/ -#define SQLITE_MAGIC_OPEN 0xa029a697 /* Database is open */ -#define SQLITE_MAGIC_CLOSED 0x9f3c2d33 /* Database is closed */ -#define SQLITE_MAGIC_SICK 0x4b771290 /* Error and awaiting close */ -#define SQLITE_MAGIC_BUSY 0xf03b7906 /* Database currently in use */ -#define SQLITE_MAGIC_ERROR 0xb5357930 /* An SQLITE_MISUSE error occurred */ - -/* -** Each SQL function is defined by an instance of the following -** structure. A pointer to this structure is stored in the sqlite.aFunc -** hash table. When multiple functions have the same name, the hash table -** points to a linked list of these structures. -*/ -struct FuncDef { - i16 nArg; /* Number of arguments. -1 means unlimited */ - u8 iPrefEnc; /* Preferred text encoding (SQLITE_UTF8, 16LE, 16BE) */ - u8 flags; /* Some combination of SQLITE_FUNC_* */ - void *pUserData; /* User data parameter */ - FuncDef *pNext; /* Next function with same name */ - void (*xFunc)(sqlite3_context*,int,sqlite3_value**); /* Regular function */ - void (*xStep)(sqlite3_context*,int,sqlite3_value**); /* Aggregate step */ - void (*xFinalize)(sqlite3_context*); /* Aggregate finalizer */ - char *zName; /* SQL name of the function. */ - FuncDef *pHash; /* Next with a different name but the same hash */ - FuncDestructor *pDestructor; /* Reference counted destructor function */ -}; - -/* -** This structure encapsulates a user-function destructor callback (as -** configured using create_function_v2()) and a reference counter. When -** create_function_v2() is called to create a function with a destructor, -** a single object of this type is allocated. FuncDestructor.nRef is set to -** the number of FuncDef objects created (either 1 or 3, depending on whether -** or not the specified encoding is SQLITE_ANY). The FuncDef.pDestructor -** member of each of the new FuncDef objects is set to point to the allocated -** FuncDestructor. -** -** Thereafter, when one of the FuncDef objects is deleted, the reference -** count on this object is decremented. When it reaches 0, the destructor -** is invoked and the FuncDestructor structure freed. -*/ -struct FuncDestructor { - int nRef; - void (*xDestroy)(void *); - void *pUserData; -}; - -/* -** Possible values for FuncDef.flags. Note that the _LENGTH and _TYPEOF -** values must correspond to OPFLAG_LENGTHARG and OPFLAG_TYPEOFARG. There -** are assert() statements in the code to verify this. -*/ -#define SQLITE_FUNC_LIKE 0x01 /* Candidate for the LIKE optimization */ -#define SQLITE_FUNC_CASE 0x02 /* Case-sensitive LIKE-type function */ -#define SQLITE_FUNC_EPHEM 0x04 /* Ephemeral. Delete with VDBE */ -#define SQLITE_FUNC_NEEDCOLL 0x08 /* sqlite3GetFuncCollSeq() might be called */ -#define SQLITE_FUNC_COUNT 0x10 /* Built-in count(*) aggregate */ -#define SQLITE_FUNC_COALESCE 0x20 /* Built-in coalesce() or ifnull() function */ -#define SQLITE_FUNC_LENGTH 0x40 /* Built-in length() function */ -#define SQLITE_FUNC_TYPEOF 0x80 /* Built-in typeof() function */ - -/* -** The following three macros, FUNCTION(), LIKEFUNC() and AGGREGATE() are -** used to create the initializers for the FuncDef structures. -** -** FUNCTION(zName, nArg, iArg, bNC, xFunc) -** Used to create a scalar function definition of a function zName -** implemented by C function xFunc that accepts nArg arguments. The -** value passed as iArg is cast to a (void*) and made available -** as the user-data (sqlite3_user_data()) for the function. If -** argument bNC is true, then the SQLITE_FUNC_NEEDCOLL flag is set. -** -** AGGREGATE(zName, nArg, iArg, bNC, xStep, xFinal) -** Used to create an aggregate function definition implemented by -** the C functions xStep and xFinal. The first four parameters -** are interpreted in the same way as the first 4 parameters to -** FUNCTION(). -** -** LIKEFUNC(zName, nArg, pArg, flags) -** Used to create a scalar function definition of a function zName -** that accepts nArg arguments and is implemented by a call to C -** function likeFunc. Argument pArg is cast to a (void *) and made -** available as the function user-data (sqlite3_user_data()). The -** FuncDef.flags variable is set to the value passed as the flags -** parameter. -*/ -#define FUNCTION(zName, nArg, iArg, bNC, xFunc) \ - {nArg, SQLITE_UTF8, (bNC*SQLITE_FUNC_NEEDCOLL), \ - SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, 0, #zName, 0, 0} -#define FUNCTION2(zName, nArg, iArg, bNC, xFunc, extraFlags) \ - {nArg, SQLITE_UTF8, (bNC*SQLITE_FUNC_NEEDCOLL)|extraFlags, \ - SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, 0, #zName, 0, 0} -#define STR_FUNCTION(zName, nArg, pArg, bNC, xFunc) \ - {nArg, SQLITE_UTF8, bNC*SQLITE_FUNC_NEEDCOLL, \ - pArg, 0, xFunc, 0, 0, #zName, 0, 0} -#define LIKEFUNC(zName, nArg, arg, flags) \ - {nArg, SQLITE_UTF8, flags, (void *)arg, 0, likeFunc, 0, 0, #zName, 0, 0} -#define AGGREGATE(zName, nArg, arg, nc, xStep, xFinal) \ - {nArg, SQLITE_UTF8, nc*SQLITE_FUNC_NEEDCOLL, \ - SQLITE_INT_TO_PTR(arg), 0, 0, xStep,xFinal,#zName,0,0} - -/* -** All current savepoints are stored in a linked list starting at -** sqlite3.pSavepoint. The first element in the list is the most recently -** opened savepoint. Savepoints are added to the list by the vdbe -** OP_Savepoint instruction. -*/ -struct Savepoint { - char *zName; /* Savepoint name (nul-terminated) */ - i64 nDeferredCons; /* Number of deferred fk violations */ - Savepoint *pNext; /* Parent savepoint (if any) */ -}; - -/* -** The following are used as the second parameter to sqlite3Savepoint(), -** and as the P1 argument to the OP_Savepoint instruction. -*/ -#define SAVEPOINT_BEGIN 0 -#define SAVEPOINT_RELEASE 1 -#define SAVEPOINT_ROLLBACK 2 - - -/* -** Each SQLite module (virtual table definition) is defined by an -** instance of the following structure, stored in the sqlite3.aModule -** hash table. -*/ -struct Module { - const sqlite3_module *pModule; /* Callback pointers */ - const char *zName; /* Name passed to create_module() */ - void *pAux; /* pAux passed to create_module() */ - void (*xDestroy)(void *); /* Module destructor function */ -}; - -/* -** information about each column of an SQL table is held in an instance -** of this structure. -*/ -struct Column { - char *zName; /* Name of this column */ - Expr *pDflt; /* Default value of this column */ - char *zDflt; /* Original text of the default value */ - char *zType; /* Data type for this column */ - char *zColl; /* Collating sequence. If NULL, use the default */ - u8 notNull; /* True if there is a NOT NULL constraint */ - u8 isPrimKey; /* True if this column is part of the PRIMARY KEY */ - char affinity; /* One of the SQLITE_AFF_... values */ -#ifndef SQLITE_OMIT_VIRTUALTABLE - u8 isHidden; /* True if this column is 'hidden' */ -#endif -}; - -/* -** A "Collating Sequence" is defined by an instance of the following -** structure. Conceptually, a collating sequence consists of a name and -** a comparison routine that defines the order of that sequence. -** -** There may two separate implementations of the collation function, one -** that processes text in UTF-8 encoding (CollSeq.xCmp) and another that -** processes text encoded in UTF-16 (CollSeq.xCmp16), using the machine -** native byte order. When a collation sequence is invoked, SQLite selects -** the version that will require the least expensive encoding -** translations, if any. -** -** The CollSeq.pUser member variable is an extra parameter that passed in -** as the first argument to the UTF-8 comparison function, xCmp. -** CollSeq.pUser16 is the equivalent for the UTF-16 comparison function, -** xCmp16. -** -** If both CollSeq.xCmp and CollSeq.xCmp16 are NULL, it means that the -** collating sequence is undefined. Indices built on an undefined -** collating sequence may not be read or written. -*/ -struct CollSeq { - char *zName; /* Name of the collating sequence, UTF-8 encoded */ - u8 enc; /* Text encoding handled by xCmp() */ - void *pUser; /* First argument to xCmp() */ - int (*xCmp)(void*,int, const void*, int, const void*); - void (*xDel)(void*); /* Destructor for pUser */ -}; - -/* -** A sort order can be either ASC or DESC. -*/ -#define SQLITE_SO_ASC 0 /* Sort in ascending order */ -#define SQLITE_SO_DESC 1 /* Sort in ascending order */ - -/* -** Column affinity types. -** -** These used to have mnemonic name like 'i' for SQLITE_AFF_INTEGER and -** 't' for SQLITE_AFF_TEXT. But we can save a little space and improve -** the speed a little by numbering the values consecutively. -** -** But rather than start with 0 or 1, we begin with 'a'. That way, -** when multiple affinity types are concatenated into a string and -** used as the P4 operand, they will be more readable. -** -** Note also that the numeric types are grouped together so that testing -** for a numeric type is a single comparison. -*/ -#define SQLITE_AFF_TEXT 'a' -#define SQLITE_AFF_NONE 'b' -#define SQLITE_AFF_NUMERIC 'c' -#define SQLITE_AFF_INTEGER 'd' -#define SQLITE_AFF_REAL 'e' - -#define sqlite3IsNumericAffinity(X) ((X)>=SQLITE_AFF_NUMERIC) - -/* -** The SQLITE_AFF_MASK values masks off the significant bits of an -** affinity value. -*/ -#define SQLITE_AFF_MASK 0x67 - -/* -** Additional bit values that can be ORed with an affinity without -** changing the affinity. -*/ -#define SQLITE_JUMPIFNULL 0x08 /* jumps if either operand is NULL */ -#define SQLITE_STOREP2 0x10 /* Store result in reg[P2] rather than jump */ -#define SQLITE_NULLEQ 0x80 /* NULL=NULL */ - -/* -** An object of this type is created for each virtual table present in -** the database schema. -** -** If the database schema is shared, then there is one instance of this -** structure for each database connection (sqlite3*) that uses the shared -** schema. This is because each database connection requires its own unique -** instance of the sqlite3_vtab* handle used to access the virtual table -** implementation. sqlite3_vtab* handles can not be shared between -** database connections, even when the rest of the in-memory database -** schema is shared, as the implementation often stores the database -** connection handle passed to it via the xConnect() or xCreate() method -** during initialization internally. This database connection handle may -** then be used by the virtual table implementation to access real tables -** within the database. So that they appear as part of the callers -** transaction, these accesses need to be made via the same database -** connection as that used to execute SQL operations on the virtual table. -** -** All VTable objects that correspond to a single table in a shared -** database schema are initially stored in a linked-list pointed to by -** the Table.pVTable member variable of the corresponding Table object. -** When an sqlite3_prepare() operation is required to access the virtual -** table, it searches the list for the VTable that corresponds to the -** database connection doing the preparing so as to use the correct -** sqlite3_vtab* handle in the compiled query. -** -** When an in-memory Table object is deleted (for example when the -** schema is being reloaded for some reason), the VTable objects are not -** deleted and the sqlite3_vtab* handles are not xDisconnect()ed -** immediately. Instead, they are moved from the Table.pVTable list to -** another linked list headed by the sqlite3.pDisconnect member of the -** corresponding sqlite3 structure. They are then deleted/xDisconnected -** next time a statement is prepared using said sqlite3*. This is done -** to avoid deadlock issues involving multiple sqlite3.mutex mutexes. -** Refer to comments above function sqlite3VtabUnlockList() for an -** explanation as to why it is safe to add an entry to an sqlite3.pDisconnect -** list without holding the corresponding sqlite3.mutex mutex. -** -** The memory for objects of this type is always allocated by -** sqlite3DbMalloc(), using the connection handle stored in VTable.db as -** the first argument. -*/ -struct VTable { - sqlite3 *db; /* Database connection associated with this table */ - Module *pMod; /* Pointer to module implementation */ - sqlite3_vtab *pVtab; /* Pointer to vtab instance */ - int nRef; /* Number of pointers to this structure */ - u8 bConstraint; /* True if constraints are supported */ - int iSavepoint; /* Depth of the SAVEPOINT stack */ - VTable *pNext; /* Next in linked list (see above) */ -}; - -/* -** Each SQL table is represented in memory by an instance of the -** following structure. -** -** Table.zName is the name of the table. The case of the original -** CREATE TABLE statement is stored, but case is not significant for -** comparisons. -** -** Table.nCol is the number of columns in this table. Table.aCol is a -** pointer to an array of Column structures, one for each column. -** -** If the table has an INTEGER PRIMARY KEY, then Table.iPKey is the index of -** the column that is that key. Otherwise Table.iPKey is negative. Note -** that the datatype of the PRIMARY KEY must be INTEGER for this field to -** be set. An INTEGER PRIMARY KEY is used as the rowid for each row of -** the table. If a table has no INTEGER PRIMARY KEY, then a random rowid -** is generated for each row of the table. TF_HasPrimaryKey is set if -** the table has any PRIMARY KEY, INTEGER or otherwise. -** -** Table.tnum is the page number for the root BTree page of the table in the -** database file. If Table.iDb is the index of the database table backend -** in sqlite.aDb[]. 0 is for the main database and 1 is for the file that -** holds temporary tables and indices. If TF_Ephemeral is set -** then the table is stored in a file that is automatically deleted -** when the VDBE cursor to the table is closed. In this case Table.tnum -** refers VDBE cursor number that holds the table open, not to the root -** page number. Transient tables are used to hold the results of a -** sub-query that appears instead of a real table name in the FROM clause -** of a SELECT statement. -*/ -struct Table { - char *zName; /* Name of the table or view */ - int iPKey; /* If not negative, use aCol[iPKey] as the primary key */ - int nCol; /* Number of columns in this table */ - Column *aCol; /* Information about each column */ - Index *pIndex; /* List of SQL indexes on this table. */ - int tnum; /* Root BTree node for this table (see note above) */ - tRowcnt nRowEst; /* Estimated rows in table - from sqlite_stat1 table */ - Select *pSelect; /* NULL for tables. Points to definition if a view. */ - u16 nRef; /* Number of pointers to this Table */ - u8 tabFlags; /* Mask of TF_* values */ - u8 keyConf; /* What to do in case of uniqueness conflict on iPKey */ - FKey *pFKey; /* Linked list of all foreign keys in this table */ - char *zColAff; /* String defining the affinity of each column */ -#ifndef SQLITE_OMIT_CHECK - ExprList *pCheck; /* All CHECK constraints */ -#endif -#ifndef SQLITE_OMIT_ALTERTABLE - int addColOffset; /* Offset in CREATE TABLE stmt to add a new column */ -#endif -#ifndef SQLITE_OMIT_VIRTUALTABLE - VTable *pVTable; /* List of VTable objects. */ - int nModuleArg; /* Number of arguments to the module */ - char **azModuleArg; /* Text of all module args. [0] is module name */ -#endif - Trigger *pTrigger; /* List of triggers stored in pSchema */ - Schema *pSchema; /* Schema that contains this table */ - Table *pNextZombie; /* Next on the Parse.pZombieTab list */ -}; - -/* -** Allowed values for Tabe.tabFlags. -*/ -#define TF_Readonly 0x01 /* Read-only system table */ -#define TF_Ephemeral 0x02 /* An ephemeral table */ -#define TF_HasPrimaryKey 0x04 /* Table has a primary key */ -#define TF_Autoincrement 0x08 /* Integer primary key is autoincrement */ -#define TF_Virtual 0x10 /* Is a virtual table */ - - -/* -** Test to see whether or not a table is a virtual table. This is -** done as a macro so that it will be optimized out when virtual -** table support is omitted from the build. -*/ -#ifndef SQLITE_OMIT_VIRTUALTABLE -# define IsVirtual(X) (((X)->tabFlags & TF_Virtual)!=0) -# define IsHiddenColumn(X) ((X)->isHidden) -#else -# define IsVirtual(X) 0 -# define IsHiddenColumn(X) 0 -#endif - -/* -** Each foreign key constraint is an instance of the following structure. -** -** A foreign key is associated with two tables. The "from" table is -** the table that contains the REFERENCES clause that creates the foreign -** key. The "to" table is the table that is named in the REFERENCES clause. -** Consider this example: -** -** CREATE TABLE ex1( -** a INTEGER PRIMARY KEY, -** b INTEGER CONSTRAINT fk1 REFERENCES ex2(x) -** ); -** -** For foreign key "fk1", the from-table is "ex1" and the to-table is "ex2". -** -** Each REFERENCES clause generates an instance of the following structure -** which is attached to the from-table. The to-table need not exist when -** the from-table is created. The existence of the to-table is not checked. -*/ -struct FKey { - Table *pFrom; /* Table containing the REFERENCES clause (aka: Child) */ - FKey *pNextFrom; /* Next foreign key in pFrom */ - char *zTo; /* Name of table that the key points to (aka: Parent) */ - FKey *pNextTo; /* Next foreign key on table named zTo */ - FKey *pPrevTo; /* Previous foreign key on table named zTo */ - int nCol; /* Number of columns in this key */ - /* EV: R-30323-21917 */ - u8 isDeferred; /* True if constraint checking is deferred till COMMIT */ - u8 aAction[2]; /* ON DELETE and ON UPDATE actions, respectively */ - Trigger *apTrigger[2]; /* Triggers for aAction[] actions */ - struct sColMap { /* Mapping of columns in pFrom to columns in zTo */ - int iFrom; /* Index of column in pFrom */ - char *zCol; /* Name of column in zTo. If 0 use PRIMARY KEY */ - } aCol[1]; /* One entry for each of nCol column s */ -}; - -/* -** SQLite supports many different ways to resolve a constraint -** error. ROLLBACK processing means that a constraint violation -** causes the operation in process to fail and for the current transaction -** to be rolled back. ABORT processing means the operation in process -** fails and any prior changes from that one operation are backed out, -** but the transaction is not rolled back. FAIL processing means that -** the operation in progress stops and returns an error code. But prior -** changes due to the same operation are not backed out and no rollback -** occurs. IGNORE means that the particular row that caused the constraint -** error is not inserted or updated. Processing continues and no error -** is returned. REPLACE means that preexisting database rows that caused -** a UNIQUE constraint violation are removed so that the new insert or -** update can proceed. Processing continues and no error is reported. -** -** RESTRICT, SETNULL, and CASCADE actions apply only to foreign keys. -** RESTRICT is the same as ABORT for IMMEDIATE foreign keys and the -** same as ROLLBACK for DEFERRED keys. SETNULL means that the foreign -** key is set to NULL. CASCADE means that a DELETE or UPDATE of the -** referenced table row is propagated into the row that holds the -** foreign key. -** -** The following symbolic values are used to record which type -** of action to take. -*/ -#define OE_None 0 /* There is no constraint to check */ -#define OE_Rollback 1 /* Fail the operation and rollback the transaction */ -#define OE_Abort 2 /* Back out changes but do no rollback transaction */ -#define OE_Fail 3 /* Stop the operation but leave all prior changes */ -#define OE_Ignore 4 /* Ignore the error. Do not do the INSERT or UPDATE */ -#define OE_Replace 5 /* Delete existing record, then do INSERT or UPDATE */ - -#define OE_Restrict 6 /* OE_Abort for IMMEDIATE, OE_Rollback for DEFERRED */ -#define OE_SetNull 7 /* Set the foreign key value to NULL */ -#define OE_SetDflt 8 /* Set the foreign key value to its default */ -#define OE_Cascade 9 /* Cascade the changes */ - -#define OE_Default 99 /* Do whatever the default action is */ - - -/* -** An instance of the following structure is passed as the first -** argument to sqlite3VdbeKeyCompare and is used to control the -** comparison of the two index keys. -*/ -struct KeyInfo { - sqlite3 *db; /* The database connection */ - u8 enc; /* Text encoding - one of the SQLITE_UTF* values */ - u16 nField; /* Number of entries in aColl[] */ - u8 *aSortOrder; /* Sort order for each column. May be NULL */ - CollSeq *aColl[1]; /* Collating sequence for each term of the key */ -}; - -/* -** An instance of the following structure holds information about a -** single index record that has already been parsed out into individual -** values. -** -** A record is an object that contains one or more fields of data. -** Records are used to store the content of a table row and to store -** the key of an index. A blob encoding of a record is created by -** the OP_MakeRecord opcode of the VDBE and is disassembled by the -** OP_Column opcode. -** -** This structure holds a record that has already been disassembled -** into its constituent fields. -*/ -struct UnpackedRecord { - KeyInfo *pKeyInfo; /* Collation and sort-order information */ - u16 nField; /* Number of entries in apMem[] */ - u8 flags; /* Boolean settings. UNPACKED_... below */ - i64 rowid; /* Used by UNPACKED_PREFIX_SEARCH */ - Mem *aMem; /* Values */ -}; - -/* -** Allowed values of UnpackedRecord.flags -*/ -#define UNPACKED_INCRKEY 0x01 /* Make this key an epsilon larger */ -#define UNPACKED_PREFIX_MATCH 0x02 /* A prefix match is considered OK */ -#define UNPACKED_PREFIX_SEARCH 0x04 /* Ignore final (rowid) field */ - -/* -** Each SQL index is represented in memory by an -** instance of the following structure. -** -** The columns of the table that are to be indexed are described -** by the aiColumn[] field of this structure. For example, suppose -** we have the following table and index: -** -** CREATE TABLE Ex1(c1 int, c2 int, c3 text); -** CREATE INDEX Ex2 ON Ex1(c3,c1); -** -** In the Table structure describing Ex1, nCol==3 because there are -** three columns in the table. In the Index structure describing -** Ex2, nColumn==2 since 2 of the 3 columns of Ex1 are indexed. -** The value of aiColumn is {2, 0}. aiColumn[0]==2 because the -** first column to be indexed (c3) has an index of 2 in Ex1.aCol[]. -** The second column to be indexed (c1) has an index of 0 in -** Ex1.aCol[], hence Ex2.aiColumn[1]==0. -** -** The Index.onError field determines whether or not the indexed columns -** must be unique and what to do if they are not. When Index.onError=OE_None, -** it means this is not a unique index. Otherwise it is a unique index -** and the value of Index.onError indicate the which conflict resolution -** algorithm to employ whenever an attempt is made to insert a non-unique -** element. -*/ -struct Index { - char *zName; /* Name of this index */ - int *aiColumn; /* Which columns are used by this index. 1st is 0 */ - tRowcnt *aiRowEst; /* Result of ANALYZE: Est. rows selected by each column */ - Table *pTable; /* The SQL table being indexed */ - char *zColAff; /* String defining the affinity of each column */ - Index *pNext; /* The next index associated with the same table */ - Schema *pSchema; /* Schema containing this index */ - u8 *aSortOrder; /* Array of size Index.nColumn. True==DESC, False==ASC */ - char **azColl; /* Array of collation sequence names for index */ - int nColumn; /* Number of columns in the table used by this index */ - int tnum; /* Page containing root of this index in database file */ - u8 onError; /* OE_Abort, OE_Ignore, OE_Replace, or OE_None */ - u8 autoIndex; /* True if is automatically created (ex: by UNIQUE) */ - u8 bUnordered; /* Use this index for == or IN queries only */ -#ifdef SQLITE_ENABLE_STAT3 - int nSample; /* Number of elements in aSample[] */ - tRowcnt avgEq; /* Average nEq value for key values not in aSample */ - IndexSample *aSample; /* Samples of the left-most key */ -#endif -}; - -/* -** Each sample stored in the sqlite_stat3 table is represented in memory -** using a structure of this type. See documentation at the top of the -** analyze.c source file for additional information. -*/ -struct IndexSample { - union { - char *z; /* Value if eType is SQLITE_TEXT or SQLITE_BLOB */ - double r; /* Value if eType is SQLITE_FLOAT */ - i64 i; /* Value if eType is SQLITE_INTEGER */ - } u; - u8 eType; /* SQLITE_NULL, SQLITE_INTEGER ... etc. */ - int nByte; /* Size in byte of text or blob. */ - tRowcnt nEq; /* Est. number of rows where the key equals this sample */ - tRowcnt nLt; /* Est. number of rows where key is less than this sample */ - tRowcnt nDLt; /* Est. number of distinct keys less than this sample */ -}; - -/* -** Each token coming out of the lexer is an instance of -** this structure. Tokens are also used as part of an expression. -** -** Note if Token.z==0 then Token.dyn and Token.n are undefined and -** may contain random values. Do not make any assumptions about Token.dyn -** and Token.n when Token.z==0. -*/ -struct Token { - const char *z; /* Text of the token. Not NULL-terminated! */ - unsigned int n; /* Number of characters in this token */ -}; - -/* -** An instance of this structure contains information needed to generate -** code for a SELECT that contains aggregate functions. -** -** If Expr.op==TK_AGG_COLUMN or TK_AGG_FUNCTION then Expr.pAggInfo is a -** pointer to this structure. The Expr.iColumn field is the index in -** AggInfo.aCol[] or AggInfo.aFunc[] of information needed to generate -** code for that node. -** -** AggInfo.pGroupBy and AggInfo.aFunc.pExpr point to fields within the -** original Select structure that describes the SELECT statement. These -** fields do not need to be freed when deallocating the AggInfo structure. -*/ -struct AggInfo { - u8 directMode; /* Direct rendering mode means take data directly - ** from source tables rather than from accumulators */ - u8 useSortingIdx; /* In direct mode, reference the sorting index rather - ** than the source table */ - int sortingIdx; /* Cursor number of the sorting index */ - int sortingIdxPTab; /* Cursor number of pseudo-table */ - int nSortingColumn; /* Number of columns in the sorting index */ - ExprList *pGroupBy; /* The group by clause */ - struct AggInfo_col { /* For each column used in source tables */ - Table *pTab; /* Source table */ - int iTable; /* Cursor number of the source table */ - int iColumn; /* Column number within the source table */ - int iSorterColumn; /* Column number in the sorting index */ - int iMem; /* Memory location that acts as accumulator */ - Expr *pExpr; /* The original expression */ - } *aCol; - int nColumn; /* Number of used entries in aCol[] */ - int nAccumulator; /* Number of columns that show through to the output. - ** Additional columns are used only as parameters to - ** aggregate functions */ - struct AggInfo_func { /* For each aggregate function */ - Expr *pExpr; /* Expression encoding the function */ - FuncDef *pFunc; /* The aggregate function implementation */ - int iMem; /* Memory location that acts as accumulator */ - int iDistinct; /* Ephemeral table used to enforce DISTINCT */ - } *aFunc; - int nFunc; /* Number of entries in aFunc[] */ -}; - -/* -** The datatype ynVar is a signed integer, either 16-bit or 32-bit. -** Usually it is 16-bits. But if SQLITE_MAX_VARIABLE_NUMBER is greater -** than 32767 we have to make it 32-bit. 16-bit is preferred because -** it uses less memory in the Expr object, which is a big memory user -** in systems with lots of prepared statements. And few applications -** need more than about 10 or 20 variables. But some extreme users want -** to have prepared statements with over 32767 variables, and for them -** the option is available (at compile-time). -*/ -#if SQLITE_MAX_VARIABLE_NUMBER<=32767 -typedef i16 ynVar; -#else -typedef int ynVar; -#endif - -/* -** Each node of an expression in the parse tree is an instance -** of this structure. -** -** Expr.op is the opcode. The integer parser token codes are reused -** as opcodes here. For example, the parser defines TK_GE to be an integer -** code representing the ">=" operator. This same integer code is reused -** to represent the greater-than-or-equal-to operator in the expression -** tree. -** -** If the expression is an SQL literal (TK_INTEGER, TK_FLOAT, TK_BLOB, -** or TK_STRING), then Expr.token contains the text of the SQL literal. If -** the expression is a variable (TK_VARIABLE), then Expr.token contains the -** variable name. Finally, if the expression is an SQL function (TK_FUNCTION), -** then Expr.token contains the name of the function. -** -** Expr.pRight and Expr.pLeft are the left and right subexpressions of a -** binary operator. Either or both may be NULL. -** -** Expr.x.pList is a list of arguments if the expression is an SQL function, -** a CASE expression or an IN expression of the form " IN (, ...)". -** Expr.x.pSelect is used if the expression is a sub-select or an expression of -** the form " IN (SELECT ...)". If the EP_xIsSelect bit is set in the -** Expr.flags mask, then Expr.x.pSelect is valid. Otherwise, Expr.x.pList is -** valid. -** -** An expression of the form ID or ID.ID refers to a column in a table. -** For such expressions, Expr.op is set to TK_COLUMN and Expr.iTable is -** the integer cursor number of a VDBE cursor pointing to that table and -** Expr.iColumn is the column number for the specific column. If the -** expression is used as a result in an aggregate SELECT, then the -** value is also stored in the Expr.iAgg column in the aggregate so that -** it can be accessed after all aggregates are computed. -** -** If the expression is an unbound variable marker (a question mark -** character '?' in the original SQL) then the Expr.iTable holds the index -** number for that variable. -** -** If the expression is a subquery then Expr.iColumn holds an integer -** register number containing the result of the subquery. If the -** subquery gives a constant result, then iTable is -1. If the subquery -** gives a different answer at different times during statement processing -** then iTable is the address of a subroutine that computes the subquery. -** -** If the Expr is of type OP_Column, and the table it is selecting from -** is a disk table or the "old.*" pseudo-table, then pTab points to the -** corresponding table definition. -** -** ALLOCATION NOTES: -** -** Expr objects can use a lot of memory space in database schema. To -** help reduce memory requirements, sometimes an Expr object will be -** truncated. And to reduce the number of memory allocations, sometimes -** two or more Expr objects will be stored in a single memory allocation, -** together with Expr.zToken strings. -** -** If the EP_Reduced and EP_TokenOnly flags are set when -** an Expr object is truncated. When EP_Reduced is set, then all -** the child Expr objects in the Expr.pLeft and Expr.pRight subtrees -** are contained within the same memory allocation. Note, however, that -** the subtrees in Expr.x.pList or Expr.x.pSelect are always separately -** allocated, regardless of whether or not EP_Reduced is set. -*/ -struct Expr { - u8 op; /* Operation performed by this node */ - char affinity; /* The affinity of the column or 0 if not a column */ - u16 flags; /* Various flags. EP_* See below */ - union { - char *zToken; /* Token value. Zero terminated and dequoted */ - int iValue; /* Non-negative integer value if EP_IntValue */ - } u; - - /* If the EP_TokenOnly flag is set in the Expr.flags mask, then no - ** space is allocated for the fields below this point. An attempt to - ** access them will result in a segfault or malfunction. - *********************************************************************/ - - Expr *pLeft; /* Left subnode */ - Expr *pRight; /* Right subnode */ - union { - ExprList *pList; /* Function arguments or in " IN ( IN (