From c2fb0394bf4238fde94372bba88fa07330fdafbb Mon Sep 17 00:00:00 2001 From: Akshay Joshi Date: Fri, 7 Jul 2017 11:55:55 +0530 Subject: [PATCH] Added support of Declarative Partitioning (Range, List) for PostgreSQL 10. This allows the user to create partitioned table and it's partitions. - Edit partitions, Create/Listing of constraints, rules, triggers under partitions. (Implemented by Harshal) - Updating browser tree node when create/attach/detach partitions from table dialog.(Implemented by Ashesh) --- .../databases/schemas/static/js/schema.js | 36 + .../databases/schemas/tables/__init__.py | 2168 +++------------- .../column/templates/column/js/column.js | 26 +- .../tables/column/tests/test_column_get.py | 2 +- .../check_constraint/js/check_constraint.js | 15 +- .../js/exclusion_constraint.js | 19 +- .../templates/foreign_key/js/foreign_key.js | 19 +- .../index_constraint/js/index_constraint.js | 27 +- .../templates/constraints/js/constraints.js | 4 +- .../schemas/tables/indexes/__init__.py | 156 +- .../indexes/templates/index/js/index.js | 31 +- .../schemas/tables/partitions/__init__.py | 583 +++++ .../partitions/static/img/coll-partition.png | Bin 0 -> 1433 bytes .../partitions/static/img/partition.png | Bin 0 -> 1264 bytes .../tables/partitions/static/js/partition.js | 1276 ++++++++++ .../tables/rules/templates/rules/js/rules.js | 12 +- .../tables/static/js/partition.utils.js | 323 +++ .../schemas/tables/static/js/table.js | 520 +++- .../partition/sql/10_plus/attach.sql | 2 + .../partition/sql/10_plus/backend_support.sql | 9 + .../partition/sql/10_plus/create.sql | 30 + .../partition/sql/10_plus/detach.sql | 1 + .../sql/10_plus/get_attach_tables.sql | 23 + .../templates/partition/sql/10_plus/nodes.sql | 15 + .../partition/sql/10_plus/properties.sql | 82 + .../templates/table/sql/10_plus/acl.sql | 46 + .../table/sql/10_plus/get_inherits.sql | 17 + .../templates/table/sql/10_plus/get_oid.sql | 5 + .../templates/table/sql/10_plus/get_table.sql | 8 + .../templates/table/sql/10_plus/nodes.sql | 9 + .../table/sql/10_plus/properties.sql | 73 + .../templates/table/sql/default/create.sql | 3 +- .../table/sql/default/get_schema_oid.sql | 3 +- .../schemas/tables/tests/test_table_add.py | 61 +- .../schemas/tables/tests/test_table_delete.py | 2 +- .../schemas/tables/tests/test_table_put.py | 89 +- .../databases/schemas/tables/tests/utils.py | 150 ++ .../triggers/templates/trigger/js/trigger.js | 37 +- .../servers/databases/schemas/tables/utils.py | 2178 +++++++++++++++++ .../browser/templates/browser/js/browser.js | 197 +- .../browser/templates/browser/js/node.js | 22 +- .../backup/templates/backup/js/backup.js | 2 +- web/pgadmin/tools/datagrid/__init__.py | 26 +- .../templates/datagrid/js/datagrid.js | 2 +- .../templates/maintenance/js/maintenance.js | 13 +- .../restore/templates/restore/js/restore.js | 3 +- web/pgadmin/utils/__init__.py | 3 + web/pgadmin/utils/exception.py | 8 + 48 files changed, 6274 insertions(+), 2062 deletions(-) create mode 100644 web/pgadmin/browser/server_groups/servers/databases/schemas/tables/partitions/__init__.py create mode 100644 web/pgadmin/browser/server_groups/servers/databases/schemas/tables/partitions/static/img/coll-partition.png create mode 100644 web/pgadmin/browser/server_groups/servers/databases/schemas/tables/partitions/static/img/partition.png create mode 100644 web/pgadmin/browser/server_groups/servers/databases/schemas/tables/partitions/static/js/partition.js create mode 100644 web/pgadmin/browser/server_groups/servers/databases/schemas/tables/static/js/partition.utils.js create mode 100644 web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/attach.sql create mode 100644 web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/backend_support.sql create mode 100644 web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/create.sql create mode 100644 web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/detach.sql create mode 100644 web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/get_attach_tables.sql create mode 100644 web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/nodes.sql create mode 100644 web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/properties.sql create mode 100644 web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/acl.sql create mode 100644 web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/get_inherits.sql create mode 100644 web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/get_oid.sql create mode 100644 web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/get_table.sql create mode 100644 web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/nodes.sql create mode 100644 web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/properties.sql create mode 100644 web/pgadmin/browser/server_groups/servers/databases/schemas/tables/utils.py diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/static/js/schema.js b/web/pgadmin/browser/server_groups/servers/databases/schemas/static/js/schema.js index 0e9099745..ef496e73e 100644 --- a/web/pgadmin/browser/server_groups/servers/databases/schemas/static/js/schema.js +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/static/js/schema.js @@ -145,6 +145,16 @@ define('pgadmin.node.schema', [ group: gettext('Table'), mode: ['edit', 'create'], type: 'switch', disabled: function(m) { + // If table is partitioned table then disabled it. + if (m.top && m.top.get('is_partitioned')) { + // We also need to unset rest of all + setTimeout(function() { + m.set('autovacuum_custom', false); + }, 10); + + return true; + } + if(!m.top.inSchema.apply(this, [m])) { return false; } @@ -459,6 +469,32 @@ define('pgadmin.node.schema', [ return true; } }); + + pgBrowser.tableChildTreeNodeHierarchy = function(i) { + var idx = 0, + res = {}, + t = pgBrowser.tree; + + do { + d = t.itemData(i); + if ( + d._type in pgBrowser.Nodes && pgBrowser.Nodes[d._type].hasId + ) { + if (d._type === 'partition' || d._type === 'table') { + if (!('table' in res)) { + res['table'] = _.extend({}, d, {'priority': idx}); + idx -= 1; + } + } else { + res[d._type] = _.extend({}, d, {'priority': idx}); + idx -= 1; + } + } + i = t.hasParent(i) ? t.parent(i) : null; + } while (i); + + return res; + }; } // Switch Cell with Deps (specifically for table children) diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/__init__.py b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/__init__.py index f6fa65ef2..17d97075a 100644 --- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/__init__.py +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/__init__.py @@ -11,22 +11,16 @@ import simplejson as json import re -from functools import wraps import pgadmin.browser.server_groups.servers.databases as database -from flask import render_template, request, jsonify +from flask import render_template, request, jsonify, url_for from flask_babel import gettext from pgadmin.browser.server_groups.servers.databases.schemas.utils \ - import SchemaChildModule, DataTypeReader, VacuumSettings, \ - trigger_definition, parse_rule_definition -from pgadmin.browser.server_groups.servers.utils import parse_priv_from_db, \ - parse_priv_to_db -from pgadmin.browser.utils import PGChildNodeView + import SchemaChildModule, DataTypeReader, VacuumSettings +from pgadmin.browser.server_groups.servers.utils import parse_priv_to_db from pgadmin.utils.ajax import make_json_response, internal_server_error, \ make_response as ajax_response, gone -from pgadmin.utils.driver import get_driver - -from config import PG_DEFAULT_DRIVER +from .utils import BaseTableView class TableModule(SchemaChildModule): @@ -79,11 +73,22 @@ class TableModule(SchemaChildModule): """ return database.DatabaseModule.NODE_TYPE + def get_own_javascripts(self): + scripts = SchemaChildModule.get_own_javascripts(self) + + scripts.append({ + 'name': 'pgadmin.browser.table.partition.utils', + 'path': url_for('browser.index') + 'table/static/js/partition.utils', + 'when': 'database', 'is_template': False + }) + + return scripts + blueprint = TableModule(__name__) -class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): +class TableView(BaseTableView, DataTypeReader, VacuumSettings): """ This class is responsible for generating routes for Table node @@ -96,11 +101,6 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): - This property defines (if javascript) exists for this node. Override this property for your own logic - * check_precondition() - - This function will behave as a decorator which will checks - database connection before running view, it will also attaches - manager,conn & template_path properties to self - * list() - This function is used to list all the Table nodes within that collection. @@ -149,10 +149,6 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): - This function will generate dependent list to show it in dependent pane for the selected node. - * _formatter(data, tid) - - It will return formatted output of query result - as per client model format - * get_types(self, gid, sid, did, scid) - This function will return list of types available for columns node via AJAX response @@ -178,19 +174,6 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): * get_toast_table_vacuum(gid, sid, did, scid=None, tid=None) - Fetch the default values for toast table auto-vacuum - * _columns_formatter(tid, data): - - It will return formatted output of query result - as per client model format for column node - - * _index_constraints_formatter(self, did, tid, data): - - It will return formatted output of query result - as per client model format for index constraint node - - * _cltype_formatter(type): (staticmethod) - - We need to remove [] from type and append it - after length/precision so we will send flag for - sql template - * _parse_format_columns(self, data, mode=None): - This function will parse and return formatted list of columns added by user @@ -252,71 +235,16 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): 'get_access_methods': [{}, {'get': 'get_access_methods'}], 'get_oper_class': [{}, {'get': 'get_oper_class'}], 'get_operator': [{}, {'get': 'get_operator'}], + 'get_attach_tables': [ + {'get': 'get_attach_tables'}, + {'get': 'get_attach_tables'}], 'select_sql': [{'get': 'select_sql'}], 'insert_sql': [{'get': 'insert_sql'}], 'update_sql': [{'get': 'update_sql'}], 'delete_sql': [{'get': 'delete_sql'}] - }) - def check_precondition(f): - """ - This function will behave as a decorator which will checks - database connection before running view, it will also attaches - manager,conn & template_path properties to self - """ - - @wraps(f) - def wrap(*args, **kwargs): - # Here args[0] will hold self & kwargs will hold gid,sid,did - self = args[0] - driver = get_driver(PG_DEFAULT_DRIVER) - did = kwargs['did'] - self.manager = driver.connection_manager(kwargs['sid']) - self.conn = self.manager.connection(did=kwargs['did']) - self.qtIdent = driver.qtIdent - self.qtTypeIdent = driver.qtTypeIdent - # We need datlastsysoid to check if current table is system table - self.datlastsysoid = self.manager.db_info[ - did - ]['datlastsysoid'] if self.manager.db_info is not None and \ - did in self.manager.db_info else 0 - - ver = self.manager.version - # Set the template path for the SQL scripts - self.template_path = 'table/sql/#{0}#'.format(ver) - - # Template for Column ,check constraint and exclusion constraint node - self.column_template_path = 'column/sql/#{0}#'.format(ver) - self.check_constraint_template_path = 'check_constraint/sql/#{0}#'.format(ver) - self.exclusion_constraint_template_path = 'exclusion_constraint/sql/#{0}#'.format(ver) - - # Template for PK & Unique constraint node - self.index_constraint_template_path = 'index_constraint/sql' - - # Template for foreign key constraint node - self.foreign_key_template_path = 'foreign_key/sql/#{0}#'.format(ver) - - # Template for index node - self.index_template_path = 'index/sql/#{0}#'.format(ver) - - # Template for trigger node - self.trigger_template_path = 'trigger/sql/#{0}#'.format(ver) - - # Template for rules node - self.rules_template_path = 'rules/sql' - - # Supported ACL for table - self.acl = ['a', 'r', 'w', 'd', 'D', 'x', 't'] - - # Supported ACL for columns - self.column_acl = ['a', 'r', 'w', 'x'] - - return f(*args, **kwargs) - - return wrap - - @check_precondition + @BaseTableView.check_precondition def list(self, gid, sid, did, scid): """ This function is used to list all the table nodes within that collection. @@ -330,10 +258,11 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): Returns: JSON of available table nodes """ - SQL = render_template("/".join([self.template_path, - 'properties.sql']), - did=did, scid=scid, - datlastsysoid=self.datlastsysoid) + SQL = render_template( + "/".join([self.table_template_path, 'properties.sql']), + did=did, scid=scid, + datlastsysoid=self.datlastsysoid + ) status, res = self.conn.execute_dict(SQL) if not status: @@ -343,7 +272,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): status=200 ) - @check_precondition + @BaseTableView.check_precondition def node(self, gid, sid, did, scid, tid): """ This function is used to list all the table nodes within that collection. @@ -359,9 +288,10 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): JSON of available table nodes """ res = [] - SQL = render_template("/".join([self.template_path, - 'nodes.sql']), - scid=scid, tid=tid) + SQL = render_template( + "/".join([self.table_template_path, 'nodes.sql']), + scid=scid, tid=tid + ) status, rset = self.conn.execute_2darray(SQL) if not status: return internal_server_error(errormsg=rset) @@ -372,9 +302,10 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): rset['rows'][0]['oid'], scid, rset['rows'][0]['name'], - icon="icon-table", + icon="icon-partition" if 'is_partitioned' in rset['rows'][0] and rset['rows'][0]['is_partitioned'] else "icon-table", tigger_count=rset['rows'][0]['triggercount'], - has_enable_triggers=rset['rows'][0]['has_enable_triggers'] + has_enable_triggers=rset['rows'][0]['has_enable_triggers'], + is_partitioned=rset['rows'][0]['is_partitioned'] if 'is_partitioned' in rset['rows'][0] else False ) return make_json_response( @@ -382,8 +313,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): status=200 ) - - @check_precondition + @BaseTableView.check_precondition def nodes(self, gid, sid, did, scid): """ This function is used to list all the table nodes within that collection. @@ -398,9 +328,10 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): JSON of available table nodes """ res = [] - SQL = render_template("/".join([self.template_path, - 'nodes.sql']), - scid=scid) + SQL = render_template( + "/".join([self.table_template_path, 'nodes.sql']), + scid=scid + ) status, rset = self.conn.execute_2darray(SQL) if not status: return internal_server_error(errormsg=rset) @@ -411,9 +342,10 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): row['oid'], scid, row['name'], - icon="icon-table", + icon="icon-partition" if 'is_partitioned' in row and row['is_partitioned'] else "icon-table", tigger_count=row['triggercount'], - has_enable_triggers=row['has_enable_triggers'] + has_enable_triggers=row['has_enable_triggers'], + is_partitioned=row['is_partitioned'] if 'is_partitioned' in row else False )) return make_json_response( @@ -421,7 +353,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): status=200 ) - @check_precondition + @BaseTableView.check_precondition def get_all_tables(self, gid, sid, did, scid, tid=None): """ Args: @@ -435,9 +367,12 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): Returns the lits of tables required for constraints. """ try: - SQL = render_template("/".join([self.template_path, - 'get_tables_for_constraints.sql']), - show_sysobj=self.blueprint.show_system_objects) + SQL = render_template( + "/".join([ + self.table_template_path, 'get_tables_for_constraints.sql' + ]), + show_sysobj=self.blueprint.show_system_objects + ) status, res = self.conn.execute_dict(SQL) @@ -452,7 +387,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): except Exception as e: return internal_server_error(errormsg=str(e)) - @check_precondition + @BaseTableView.check_precondition def get_table_vacuum(self, gid, sid, did, scid=None, tid=None): """ Fetch the default values for table auto-vacuum @@ -468,7 +403,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): status=200 ) - @check_precondition + @BaseTableView.check_precondition def get_toast_table_vacuum(self, gid, sid, did, scid=None, tid=None): """ Fetch the default values for toast table auto-vacuum @@ -484,7 +419,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): status=200 ) - @check_precondition + @BaseTableView.check_precondition def get_access_methods(self, gid, sid, did, scid, tid=None): """ This function returns access methods. @@ -501,9 +436,10 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): """ res = [{'label': '', 'value': ''}] - sql = render_template( - "/".join([self.exclusion_constraint_template_path, - 'get_access_methods.sql'])) + sql = render_template("/".join([ + self.exclusion_constraint_template_path, 'get_access_methods.sql' + ])) + status, rest = self.conn.execute_2darray(sql) if not status: @@ -518,7 +454,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): status=200 ) - @check_precondition + @BaseTableView.check_precondition def get_oper_class(self, gid, sid, did, scid, tid=None): """ @@ -537,9 +473,12 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): try: if data and 'indextype' in data: SQL = render_template( - "/".join([self.exclusion_constraint_template_path, - 'get_oper_class.sql']), - indextype=data['indextype']) + "/".join([ + self.exclusion_constraint_template_path, + 'get_oper_class.sql' + ]), + indextype=data['indextype'] + ) status, res = self.conn.execute_2darray(SQL) @@ -555,7 +494,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): except Exception as e: return internal_server_error(errormsg=str(e)) - @check_precondition + @BaseTableView.check_precondition def get_operator(self, gid, sid, did, scid, tid=None): """ @@ -574,10 +513,13 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): try: if data and 'col_type' in data: SQL = render_template( - "/".join([self.exclusion_constraint_template_path, - 'get_operator.sql']), + "/".join([ + self.exclusion_constraint_template_path, + 'get_operator.sql' + ]), type=data['col_type'], - show_sysobj=self.blueprint.show_system_objects) + show_sysobj=self.blueprint.show_system_objects + ) status, res = self.conn.execute_2darray(SQL) @@ -593,506 +535,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): except Exception as e: return internal_server_error(errormsg=str(e)) - def _columns_formatter(self, tid, data): - """ - Args: - tid: Table OID - data: dict of query result - - Returns: - It will return formatted output of query result - as per client model format for column node - """ - for column in data['columns']: - - # We need to format variables according to client js collection - if 'attoptions' in column and column['attoptions'] is not None: - spcoptions = [] - for spcoption in column['attoptions']: - k, v = spcoption.split('=') - spcoptions.append({'name': k, 'value': v}) - - column['attoptions'] = spcoptions - - # Need to format security labels according to client js collection - if 'seclabels' in column and column['seclabels'] is not None: - seclabels = [] - for seclbls in column['seclabels']: - k, v = seclbls.split('=') - seclabels.append({'provider': k, 'label': v}) - - column['seclabels'] = seclabels - - if 'attnum' in column and column['attnum'] is not None and \ - column['attnum'] > 0: - # We need to parse & convert ACL coming from database to json format - SQL = render_template("/".join([self.column_template_path, 'acl.sql']), - tid=tid, clid=column['attnum']) - status, acl = self.conn.execute_dict(SQL) - - if not status: - return internal_server_error(errormsg=acl) - - # We will set get privileges from acl sql so we don't need - # it from properties sql - column['attacl'] = [] - - for row in acl['rows']: - priv = parse_priv_from_db(row) - column.setdefault(row['deftype'], []).append(priv) - - # we are receiving request when in edit mode - # we will send filtered types related to current type - present_type = column['cltype'] - - type_id = column['atttypid'] - - fulltype = self.get_full_type( - column['typnspname'], column['typname'], - column['isdup'], column['attndims'], column['atttypmod'] - ) - - length = False - precision = False - if 'elemoid' in column: - length, precision, typeval = self.get_length_precision(column['elemoid']) - - # Set length and precision to None - column['attlen'] = None - column['attprecision'] = None - - # If we have length & precision both - if length and precision: - matchObj = re.search(r'(\d+),(\d+)', fulltype) - if matchObj: - column['attlen'] = matchObj.group(1) - column['attprecision'] = matchObj.group(2) - elif length: - # If we have length only - matchObj = re.search(r'(\d+)', fulltype) - if matchObj: - column['attlen'] = matchObj.group(1) - column['attprecision'] = None - - - SQL = render_template("/".join([self.column_template_path, - 'is_referenced.sql']), - tid=tid, clid=column['attnum']) - - status, is_reference = self.conn.execute_scalar(SQL) - - edit_types_list = list() - # We will need present type in edit mode - - if column['typnspname'] == "pg_catalog" or column['typnspname'] == "public": - edit_types_list.append(present_type) - else: - t = self.qtTypeIdent(self.conn, column['typnspname'], present_type) - edit_types_list.append(t) - column['cltype'] = t - - if int(is_reference) == 0: - SQL = render_template("/".join([self.column_template_path, - 'edit_mode_types.sql']), - type_id=type_id) - status, rset = self.conn.execute_2darray(SQL) - - for row in rset['rows']: - edit_types_list.append(row['typname']) - else: - edit_types_list.append(present_type) - - column['edit_types'] = edit_types_list - column['cltype'] = DataTypeReader.parse_type_name(column['cltype']) - - if 'indkey' in column: - # Current column - attnum = str(column['attnum']) - - # Single/List of primary key column(s) - indkey = str(column['indkey']) - - # We will check if column is in primary column(s) - if attnum in indkey.split(" "): - column['is_primary_key'] = True - else: - column['is_primary_key'] = False - - return data - - def _index_constraints_formatter(self, did, tid, data): - """ - Args: - tid: Table OID - data: dict of query result - - Returns: - It will return formatted output of query result - as per client model format for index constraint node - """ - - # We will fetch all the index constraints for the table - index_constraints = { - 'p': 'primary_key', 'u': 'unique_constraint' - } - - for ctype in index_constraints.keys(): - data[index_constraints[ctype]] = [] - - sql = render_template("/".join([self.index_constraint_template_path, - 'properties.sql']), - did=did, tid=tid, - constraint_type=ctype) - status, res = self.conn.execute_dict(sql) - - if not status: - return internal_server_error(errormsg=res) - - for row in res['rows']: - result = row - sql = render_template( - "/".join([self.index_constraint_template_path, - 'get_constraint_cols.sql']), - cid=row['oid'], - colcnt=row['indnatts']) - status, res = self.conn.execute_dict(sql) - - if not status: - return internal_server_error(errormsg=res) - - columns = [] - for r in res['rows']: - columns.append({"column": r['column'].strip('"')}) - - result['columns'] = columns - - # If not exists then create list and/or append into - # existing list [ Adding into main data dict] - data.setdefault(index_constraints[ctype], []).append(result) - - return data - - def _foreign_key_formatter(self, tid, data): - """ - Args: - tid: Table OID - data: dict of query result - - Returns: - It will return formatted output of query result - as per client model format for foreign key constraint node - """ - - # We will fetch all the index constraints for the table - sql = render_template("/".join([self.foreign_key_template_path, - 'properties.sql']), - tid=tid) - - status, result = self.conn.execute_dict(sql) - - if not status: - return internal_server_error(errormsg=result) - - for fk in result['rows']: - - sql = render_template("/".join([self.foreign_key_template_path, - 'get_constraint_cols.sql']), - tid=tid, - keys=zip(fk['confkey'], fk['conkey']), - confrelid=fk['confrelid']) - - status, res = self.conn.execute_dict(sql) - - if not status: - return internal_server_error(errormsg=res) - - columns = [] - cols = [] - for row in res['rows']: - columns.append({"local_column": row['conattname'], - "references": fk['confrelid'], - "referenced": row['confattname']}) - cols.append(row['conattname']) - - fk['columns'] = columns - - SQL = render_template("/".join([self.foreign_key_template_path, - 'get_parent.sql']), - tid=fk['columns'][0]['references']) - - status, rset = self.conn.execute_2darray(SQL) - if not status: - return internal_server_error(errormsg=rset) - - fk['remote_schema'] = rset['rows'][0]['schema'] - fk['remote_table'] = rset['rows'][0]['table'] - - coveringindex = self.search_coveringindex(tid, cols) - - fk['coveringindex'] = coveringindex - if coveringindex: - fk['autoindex'] = True - fk['hasindex'] = True - else: - fk['autoindex'] = False - fk['hasindex'] = False - # If not exists then create list and/or append into - # existing list [ Adding into main data dict] - data.setdefault('foreign_key', []).append(fk) - - return data - - def _check_constraint_formatter(self, tid, data): - """ - Args: - tid: Table OID - data: dict of query result - - Returns: - It will return formatted output of query result - as per client model format for check constraint node - """ - - # We will fetch all the index constraints for the table - SQL = render_template("/".join([self.check_constraint_template_path, - 'properties.sql']), - tid=tid) - - status, res = self.conn.execute_dict(SQL) - - if not status: - return internal_server_error(errormsg=res) - # If not exists then create list and/or append into - # existing list [ Adding into main data dict] - - data['check_constraint'] = res['rows'] - - return data - - def _exclusion_constraint_formatter(self, did, tid, data): - """ - Args: - tid: Table OID - data: dict of query result - - Returns: - It will return formatted output of query result - as per client model format for exclusion constraint node - """ - - # We will fetch all the index constraints for the table - sql = render_template("/".join([self.exclusion_constraint_template_path, - 'properties.sql']), - did=did, tid=tid) - - status, result = self.conn.execute_dict(sql) - - if not status: - return internal_server_error(errormsg=result) - - for ex in result['rows']: - - sql = render_template("/".join([self.exclusion_constraint_template_path, - 'get_constraint_cols.sql']), - cid=ex['oid'], - colcnt=ex['indnatts']) - - status, res = self.conn.execute_dict(sql) - - if not status: - return internal_server_error(errormsg=res) - - columns = [] - for row in res['rows']: - if row['options'] & 1: - order = False - nulls_order = True if (row['options'] & 2) else False - else: - order = True - nulls_order = True if (row['options'] & 2) else False - - columns.append({"column": row['coldef'].strip('"'), - "oper_class": row['opcname'], - "order": order, - "nulls_order": nulls_order, - "operator": row['oprname'], - "col_type": row['datatype'] - }) - - ex['columns'] = columns - # If not exists then create list and/or append into - # existing list [ Adding into main data dict] - data.setdefault('exclude_constraint', []).append(ex) - - return data - - def search_coveringindex(self, tid, cols): - """ - - Args: - tid: Table id - cols: column list - - Returns: - - """ - - cols = set(cols) - SQL = render_template("/".join([self.foreign_key_template_path, - 'get_constraints.sql']), - tid=tid) - status, constraints = self.conn.execute_dict(SQL) - - if not status: - raise Exception(constraints) - - for costrnt in constraints['rows']: - - sql = render_template( - "/".join([self.foreign_key_template_path, 'get_cols.sql']), - cid=costrnt['oid'], - colcnt=costrnt['indnatts']) - status, rest = self.conn.execute_dict(sql) - - if not status: - return internal_server_error(errormsg=rest) - - indexcols = set() - for r in rest['rows']: - indexcols.add(r['column'].strip('"')) - - if len(cols - indexcols) == len(indexcols - cols) == 0: - return costrnt["idxname"] - - return None - - def _formatter(self, did, scid, tid, data): - """ - Args: - data: dict of query result - scid: schema oid - tid: table oid - - Returns: - It will return formatted output of query result - as per client model format - """ - # Need to format security labels according to client js collection - if 'seclabels' in data and data['seclabels'] is not None: - seclabels = [] - for seclbls in data['seclabels']: - k, v = seclbls.split('=') - seclabels.append({'provider': k, 'label': v}) - - data['seclabels'] = seclabels - - # We need to parse & convert ACL coming from database to json format - SQL = render_template("/".join([self.template_path, 'acl.sql']), - tid=tid, scid=scid) - status, acl = self.conn.execute_dict(SQL) - if not status: - return internal_server_error(errormsg=acl) - - # We will set get privileges from acl sql so we don't need - # it from properties sql - for row in acl['rows']: - priv = parse_priv_from_db(row) - if row['deftype'] in data: - data[row['deftype']].append(priv) - else: - data[row['deftype']] = [priv] - - # We will add Auto vacuum defaults with out result for grid - data['vacuum_table'] = self.parse_vacuum_data(self.conn, data, 'table') - data['vacuum_toast'] = self.parse_vacuum_data(self.conn, data, 'toast') - - # Fetch columns for the table logic - # - # 1) Check if of_type and inherited tables are present? - # 2) If yes then Fetch all the columns for of_type and inherited tables - # 3) Add columns in columns collection - # 4) Find all the columns for tables and filter out columns which are - # not inherited from any table & format them one by one - - # Get of_type table columns and add it into columns dict - if data['typname']: - SQL = render_template("/".join([self.template_path, - 'get_columns_for_table.sql']), - tname=data['typname']) - - status, res = self.conn.execute_dict(SQL) - if not status: - return internal_server_error(errormsg=res) - data['columns'] = res['rows'] - - # Get inherited table(s) columns and add it into columns dict - elif data['coll_inherits'] and len(data['coll_inherits']) > 0: - columns = [] - # Return all tables which can be inherited & do not show - # system columns - SQL = render_template("/".join([self.template_path, 'get_inherits.sql']), - show_system_objects=False - ) - status, rset = self.conn.execute_2darray(SQL) - if not status: - return internal_server_error(errormsg=rset) - - for row in rset['rows']: - if row['inherits'] in data['coll_inherits']: - # Fetch columns using inherited table OID - SQL = render_template("/".join([self.template_path, - 'get_columns_for_table.sql']), - tid=row['oid']) - status, res = self.conn.execute_dict(SQL) - if not status: - return internal_server_error(errormsg=res) - columns.extend(res['rows'][:]) - data['columns'] = columns - - # We will fetch all the columns for the table using - # columns properties.sql, so we need to set template path - SQL = render_template("/".join([self.column_template_path, - 'properties.sql']), - tid=tid, - show_sys_objects=False - ) - - status, res = self.conn.execute_dict(SQL) - if not status: - return internal_server_error(errormsg=res) - all_columns = res['rows'] - - # Filter inherited columns from all columns - if 'columns' in data and len(data['columns']) > 0 \ - and len(all_columns) > 0: - for row in data['columns']: - for i, col in enumerate(all_columns): - # If both name are same then remove it - # as it is inherited from other table - if col['name'] == row['name']: - # Remove same column from all_columns as - # already have it columns collection - del all_columns[i] - - # If any column is added then update columns collection - if len(all_columns) > 0: - data['columns'] += all_columns - # If no inherited columns found then add all columns - elif len(all_columns) > 0: - data['columns'] = all_columns - - if 'columns' in data and len(data['columns']) > 0: - data = self._columns_formatter(tid, data) - - # Here we will add constraint in our output - data = self._index_constraints_formatter(did, tid, data) - data = self._foreign_key_formatter(tid, data) - data = self._check_constraint_formatter(tid, data) - data = self._exclusion_constraint_formatter(did, tid, data) - - return data - - @check_precondition + @BaseTableView.check_precondition def properties(self, gid, sid, did, scid, tid): """ This function will show the properties of the selected table node. @@ -1109,10 +552,11 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): JSON of selected table node """ - SQL = render_template("/".join([self.template_path, - 'properties.sql']), - did=did, scid=scid, tid=tid, - datlastsysoid=self.datlastsysoid) + SQL = render_template( + "/".join([self.table_template_path, 'properties.sql']), + did=did, scid=scid, tid=tid, + datlastsysoid=self.datlastsysoid + ) status, res = self.conn.execute_dict(SQL) if not status: return internal_server_error(errormsg=res) @@ -1120,41 +564,22 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): if len(res['rows']) == 0: return gone(gettext("The specified table could not be found.")) - data = res['rows'][0] + return super(TableView, self).properties( + gid, sid, did, scid, tid, res) - data['vacuum_settings_str'] = "" - - if data['table_vacuum_settings_str'] is not None: - data['vacuum_settings_str'] += data[ - 'table_vacuum_settings_str'].replace(',', '\n') - - if data['toast_table_vacuum_settings_str'] is not None: - data['vacuum_settings_str'] += '\n' + '\n'.join( - ['toast_' + setting for setting in data[ - 'toast_table_vacuum_settings_str' - ].split(',')] - ) - data['vacuum_settings_str'] = data[ - 'vacuum_settings_str' - ].replace("=", " = ") - - data = self._formatter(did, scid, tid, data) - - return ajax_response( - response=data, - status=200 - ) - - @check_precondition + @BaseTableView.check_precondition def types(self, gid, sid, did, scid, tid=None, clid=None): """ Returns: This function will return list of types available for column node for node-ajax-control """ - condition = render_template("/".join([self.template_path, - 'get_types_where_condition.sql']), - show_system_objects=self.blueprint.show_system_objects) + condition = render_template( + "/".join([ + self.table_template_path, 'get_types_where_condition.sql' + ]), + show_system_objects=self.blueprint.show_system_objects + ) status, types = self.get_types(self.conn, condition, True) @@ -1166,7 +591,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): status=200 ) - @check_precondition + @BaseTableView.check_precondition def get_columns(self, gid, sid, did, scid, tid=None): """ Returns the Table Columns. @@ -1189,13 +614,19 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): data = request.args if request.args else None try: if data and 'tid' in data: - SQL = render_template("/".join([self.template_path, - 'get_columns_for_table.sql']), - tid=data['tid']) + SQL = render_template( + "/".join([ + self.table_template_path, 'get_columns_for_table.sql' + ]), + tid=data['tid'] + ) elif data and 'tname' in data: - SQL = render_template("/".join([self.template_path, - 'get_columns_for_table.sql']), - tname=data['tname']) + SQL = render_template( + "/".join([ + self.table_template_path, 'get_columns_for_table.sql' + ]), + tname=data['tname'] + ) if SQL: status, res = self.conn.execute_dict(SQL) @@ -1211,7 +642,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): except Exception as e: return internal_server_error(errormsg=str(e)) - @check_precondition + @BaseTableView.check_precondition def get_oftype(self, gid, sid, did, scid, tid=None): """ Returns: @@ -1220,10 +651,12 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): """ res = [{'label': '', 'value': ''}] try: - SQL = render_template("/".join([self.template_path, - 'get_oftype.sql']), scid=scid, - server_type=self.manager.server_type, - show_sys_objects=self.blueprint.show_system_objects) + SQL = render_template( + "/".join([self.table_template_path, 'get_oftype.sql']), + scid=scid, + server_type=self.manager.server_type, + show_sys_objects=self.blueprint.show_system_objects + ) status, rset = self.conn.execute_2darray(SQL) if not status: return internal_server_error(errormsg=res) @@ -1241,7 +674,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): except Exception as e: return internal_server_error(errormsg=str(e)) - @check_precondition + @BaseTableView.check_precondition def get_inherits(self, gid, sid, did, scid, tid=None): """ Returns: @@ -1250,10 +683,12 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): """ try: res = [] - SQL = render_template("/".join([self.template_path, 'get_inherits.sql']), - show_system_objects=self.blueprint.show_system_objects, - tid=tid, - server_type=self.manager.server_type) + SQL = render_template( + "/".join([self.table_template_path, 'get_inherits.sql']), + show_system_objects=self.blueprint.show_system_objects, + tid=tid, + server_type=self.manager.server_type + ) status, rset = self.conn.execute_2darray(SQL) if not status: return internal_server_error(errormsg=res) @@ -1271,7 +706,40 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): except Exception as e: return internal_server_error(errormsg=str(e)) - @check_precondition + @BaseTableView.check_precondition + def get_attach_tables(self, gid, sid, did, scid, tid=None): + """ + Returns: + This function will return list of tables available to be attached + to the partitioned table. + """ + try: + res = [] + SQL = render_template( + "/".join([ + self.partition_template_path, 'get_attach_tables.sql' + ]), + tid=tid + ) + + status, rset = self.conn.execute_2darray(SQL) + if not status: + return internal_server_error(errormsg=res) + + for row in rset['rows']: + res.append( + {'label': row['table_name'], 'value': row['oid']} + ) + + return make_json_response( + data=res, + status=200 + ) + + except Exception as e: + return internal_server_error(errormsg=str(e)) + + @BaseTableView.check_precondition def get_relations(self, gid, sid, did, scid, tid=None): """ Returns: @@ -1280,9 +748,11 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): """ res = [{'label': '', 'value': ''}] try: - SQL = render_template("/".join([self.template_path, 'get_relations.sql']), - show_sys_objects=self.blueprint.show_system_objects, - server_type=self.manager.server_type) + SQL = render_template( + "/".join([self.table_template_path, 'get_relations.sql']), + show_sys_objects=self.blueprint.show_system_objects, + server_type=self.manager.server_type + ) status, rset = self.conn.execute_2darray(SQL) if not status: return internal_server_error(errormsg=res) @@ -1298,41 +768,6 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): except Exception as e: return internal_server_error(errormsg=str(e)) - @staticmethod - def _cltype_formatter(data_type): - """ - - Args: - data_type: Type string - - Returns: - We need to remove [] from type and append it - after length/precision so we will send flag for - sql template - """ - if '[]' in data_type: - return data_type[:-2], True - else: - return data_type, False - - @staticmethod - def convert_length_precision_to_string(data): - """ - This function is used to convert length & precision to string - to handle case like when user gives 0 as length - - Args: - data: Data from client - - Returns: - Converted data - """ - if 'attlen' in data and data['attlen'] is not None: - data['attlen'] = str(data['attlen']) - if 'attprecision' in data and data['attprecision'] is not None: - data['attprecision'] = str(data['attprecision']) - return data - def _parse_format_columns(self, data, mode=None): """ data: @@ -1360,12 +795,12 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): # check type for '[]' in it c['cltype'], c['hasSqrBracket'] = self._cltype_formatter(c['cltype']) - c = self.convert_length_precision_to_string(c) + c = TableView.convert_length_precision_to_string(c) data['columns'][action] = final_columns else: - # We need to exclude all the columns which are inherited from other tables - # 'CREATE' mode + # We need to exclude all the columns which are inherited from other + # tables 'CREATE' mode final_columns = [] for c in columns: @@ -1382,35 +817,13 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): # check type for '[]' in it c['cltype'], c['hasSqrBracket'] = self._cltype_formatter(c['cltype']) - c = self.convert_length_precision_to_string(c) + c = TableView.convert_length_precision_to_string(c) data['columns'] = final_columns return data - def check_and_convert_name_to_string(self, data): - """ - This function will check and covert table to string incase - it is numeric - - Args: - data: data dict - - Returns: - Updated data dict - """ - # For Python2, it can be int, long, float - if hasattr(str, 'decode'): - if isinstance(data['name'], (int, long, float)): - data['name'] = str(data['name']) - else: - # For Python3, it can be int, float - if isinstance(data['name'], (int, float)): - data['name'] = str(data['name']) - return data - - - @check_precondition + @BaseTableView.check_precondition def create(self, gid, sid, did, scid): """ This function will creates new the table object @@ -1451,7 +864,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): # Parse & format columns data = self._parse_format_columns(data) - data = self.check_and_convert_name_to_string(data) + data = TableView.check_and_convert_name_to_string(data) # 'coll_inherits' is Array but it comes as string from browser # We will convert it again to list @@ -1463,9 +876,12 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): if 'foreign_key' in data: for c in data['foreign_key']: - SQL = render_template("/".join([self.foreign_key_template_path, - 'get_parent.sql']), - tid=c['columns'][0]['references']) + SQL = render_template( + "/".join([ + self.foreign_key_template_path, 'get_parent.sql' + ]), + tid=c['columns'][0]['references'] + ) status, rset = self.conn.execute_2darray(SQL) if not status: return internal_server_error(errormsg=rset) @@ -1474,9 +890,23 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): c['remote_table'] = rset['rows'][0]['table'] try: - SQL = render_template("/".join([self.template_path, - 'create.sql']), - data=data, conn=self.conn) + partitions_sql = '' + partitioned = False + if 'is_partitioned' in data and data['is_partitioned']: + data['relkind'] = 'p' + # create partition scheme + data['partition_scheme'] = self.get_partition_scheme(data) + partitions_sql = self.get_partitions_sql(data) + partitioned = True + + SQL = render_template( + "/".join([self.table_template_path, 'create.sql']), + data=data, conn=self.conn + ) + + # Append SQL for partitions + SQL += '\n' + partitions_sql + status, res = self.conn.execute_scalar(SQL) if not status: return internal_server_error(errormsg=res) @@ -1490,16 +920,21 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): data['name'] = data['name'][0:CONST_MAX_CHAR_COUNT] # Get updated schema oid - SQL = render_template("/".join([self.template_path, - 'get_schema_oid.sql']), tname=data['name']) + SQL = render_template( + "/".join([self.table_template_path, 'get_schema_oid.sql']), + tname=data['name'] + ) status, scid = self.conn.execute_scalar(SQL) if not status: return internal_server_error(errormsg=scid) # we need oid to to add object in tree at browser - SQL = render_template("/".join([self.template_path, - 'get_oid.sql']), scid=scid, data=data) + SQL = render_template( + "/".join([self.table_template_path, 'get_oid.sql']), + scid=scid, data=data + ) + status, tid = self.conn.execute_scalar(SQL) if not status: return internal_server_error(errormsg=tid) @@ -1509,13 +944,14 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): tid, scid, data['name'], - icon="icon-table" + icon="icon-partition" if partitioned else "icon-table", + is_partitioned=partitioned ) ) except Exception as e: return internal_server_error(errormsg=str(e)) - @check_precondition + @BaseTableView.check_precondition def update(self, gid, sid, did, scid, tid): """ This function will update an existing table object @@ -1538,34 +974,21 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): data[k] = v try: - SQL, name = self.get_sql(did, scid, tid, data) - - SQL = SQL.strip('\n').strip(' ') - status, res = self.conn.execute_scalar(SQL) - if not status: - return internal_server_error(errormsg=res) - - SQL = render_template("/".join([self.template_path, - 'get_schema_oid.sql']), tid=tid) - status, res = self.conn.execute_2darray(SQL) - if not status: - return internal_server_error(errormsg=res) - - # new schema id - scid = res['rows'][0]['scid'] - - return jsonify( - node=self.blueprint.generate_browser_node( - tid, - scid, - name, - icon="icon-%s" % self.node_type - ) + SQL = render_template( + "/".join([self.table_template_path, 'properties.sql']), + did=did, scid=scid, tid=tid, + datlastsysoid=self.datlastsysoid ) + status, res = self.conn.execute_dict(SQL) + if not status: + return internal_server_error(errormsg=res) + + return super(TableView, self).update( + gid, sid, did, scid, tid, data, res) except Exception as e: return internal_server_error(errormsg=str(e)) - @check_precondition + @BaseTableView.check_precondition def delete(self, gid, sid, did, scid, tid): """ This function will deletes the table object @@ -1585,10 +1008,11 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): cascade = False try: - SQL = render_template("/".join([self.template_path, - 'properties.sql']), - did=did, scid=scid, tid=tid, - datlastsysoid=self.datlastsysoid) + SQL = render_template( + "/".join([self.table_template_path, 'properties.sql']), + did=did, scid=scid, tid=tid, + datlastsysoid=self.datlastsysoid + ) status, res = self.conn.execute_dict(SQL) if not status: return internal_server_error(errormsg=res) @@ -1606,10 +1030,11 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): data = res['rows'][0] - SQL = render_template("/".join([self.template_path, - 'delete.sql']), - data=data, cascade=cascade, - conn=self.conn) + SQL = render_template( + "/".join([self.table_template_path, 'delete.sql']), + data=data, cascade=cascade, + conn=self.conn + ) status, res = self.conn.execute_scalar(SQL) if not status: return internal_server_error(errormsg=res) @@ -1626,7 +1051,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): except Exception as e: return internal_server_error(errormsg=str(e)) - @check_precondition + @BaseTableView.check_precondition def truncate(self, gid, sid, did, scid, tid): """ This function will truncate the table object @@ -1638,43 +1063,23 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): scid: Schema ID tid: Table ID """ - # Below will decide if it's simple drop or drop with cascade call - data = request.form if request.form else json.loads( - request.data, encoding='utf-8' - ) - # Convert str 'true' to boolean type - is_cascade = json.loads(data['cascade']) try: - SQL = render_template("/".join([self.template_path, - 'properties.sql']), - did=did, scid=scid, tid=tid, - datlastsysoid=self.datlastsysoid) + SQL = render_template( + "/".join([self.table_template_path, 'properties.sql']), + did=did, scid=scid, tid=tid, + datlastsysoid=self.datlastsysoid + ) status, res = self.conn.execute_dict(SQL) if not status: return internal_server_error(errormsg=res) - data = res['rows'][0] - SQL = render_template("/".join([self.template_path, - 'truncate.sql']), - data=data, cascade=is_cascade) - status, res = self.conn.execute_scalar(SQL) - if not status: - return internal_server_error(errormsg=res) - - return make_json_response( - success=1, - info=gettext("Table truncated"), - data={ - 'id': tid, - 'scid': scid - } - ) + return super(TableView, self).truncate(gid, sid, did, scid, tid, res) except Exception as e: return internal_server_error(errormsg=str(e)) - @check_precondition + @BaseTableView.check_precondition def enable_disable_triggers(self, gid, sid, did, scid, tid): """ This function will enable/disable trigger(s) on the table object @@ -1694,18 +1099,22 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): is_enable = json.loads(data['enable']) try: - SQL = render_template("/".join([self.template_path, - 'properties.sql']), - did=did, scid=scid, tid=tid, - datlastsysoid=self.datlastsysoid) + SQL = render_template( + "/".join([self.table_template_path, 'properties.sql']), + did=did, scid=scid, tid=tid, + datlastsysoid=self.datlastsysoid + ) status, res = self.conn.execute_dict(SQL) if not status: return internal_server_error(errormsg=res) data = res['rows'][0] - SQL = render_template("/".join([self.template_path, - 'enable_disable_trigger.sql']), - data=data, is_enable_trigger=is_enable) + SQL = render_template( + "/".join([ + self.table_template_path, 'enable_disable_trigger.sql' + ]), + data=data, is_enable_trigger=is_enable + ) status, res = self.conn.execute_scalar(SQL) if not status: return internal_server_error(errormsg=res) @@ -1723,7 +1132,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): except Exception as e: return internal_server_error(errormsg=str(e)) - @check_precondition + @BaseTableView.check_precondition def reset(self, gid, sid, did, scid, tid): """ This function will reset statistics of table @@ -1735,27 +1144,9 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): scid: Schema ID tid: Table ID """ - try: - SQL = render_template("/".join([self.template_path, - 'reset_stats.sql']), - tid=tid) - status, res = self.conn.execute_scalar(SQL) - if not status: - return internal_server_error(errormsg=res) + return BaseTableView.reset_statistics(self, scid, tid) - return make_json_response( - success=1, - info=gettext("Table statistics have been reset"), - data={ - 'id': tid, - 'scid': scid - } - ) - - except Exception as e: - return internal_server_error(errormsg=str(e)) - - @check_precondition + @BaseTableView.check_precondition def msql(self, gid, sid, did, scid, tid=None): """ This function will create modified sql for table object @@ -1768,13 +1159,24 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): tid: Table ID """ data = dict() + res = None for k, v in request.args.items(): try: data[k] = json.loads(v, encoding='utf-8') except (ValueError, TypeError, KeyError): data[k] = v - SQL, name = self.get_sql(did, scid, tid, data) + if tid is not None: + SQL = render_template( + "/".join([self.table_template_path, 'properties.sql']), + did=did, scid=scid, tid=tid, + datlastsysoid=self.datlastsysoid + ) + status, res = self.conn.execute_dict(SQL) + if not status: + return internal_server_error(errormsg=res) + + SQL, name = self.get_sql(did, scid, tid, data, res) SQL = re.sub('\n{2,}', '\n\n', SQL) SQL = SQL.strip('\n') if SQL == '': @@ -1784,670 +1186,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): status=200 ) - def get_index_constraint_sql(self, did, tid, data): - """ - Args: - tid: Table ID - data: data dict coming from the client - - Returns: - This function will generate modified sql for index constraints - (Primary Key & Unique) - """ - sql = [] - # We will fetch all the index constraints for the table - index_constraints = { - 'p': 'primary_key', 'u': 'unique_constraint' - } - - for ctype in index_constraints.keys(): - # Check if constraint is in data - # If yes then we need to check for add/change/delete - if index_constraints[ctype] in data: - constraint = data[index_constraints[ctype]] - # If constraint(s) is/are deleted - if 'deleted' in constraint: - for c in constraint['deleted']: - c['schema'] = data['schema'] - c['table'] = data['name'] - - # Sql for drop - sql.append( - render_template("/".join( - [self.index_constraint_template_path, - 'delete.sql']), - data=c, conn=self.conn).strip('\n') - ) - - if 'changed' in constraint: - for c in constraint['changed']: - c['schema'] = data['schema'] - c['table'] = data['name'] - - properties_sql = render_template("/".join( - [self.index_constraint_template_path, 'properties.sql']), - did=did, tid=tid, cid=c['oid'], constraint_type=ctype) - status, res = self.conn.execute_dict(properties_sql) - if not status: - return internal_server_error(errormsg=res) - - old_data = res['rows'][0] - # Sql to update object - sql.append( - render_template("/".join([ - self.index_constraint_template_path, - 'update.sql']), data=c, o_data=old_data, - conn=self.conn).strip('\n') - ) - - if 'added' in constraint: - for c in constraint['added']: - c['schema'] = data['schema'] - c['table'] = data['name'] - - # Sql to add object - if self.validate_constrains(index_constraints[ctype], c): - sql.append( - render_template( - "/".join([self.index_constraint_template_path, - 'create.sql']), - data=c, conn=self.conn, - constraint_name='PRIMARY KEY' - if ctype == 'p' else 'UNIQUE' - ).strip('\n') - ) - else: - sql.append( - gettext( - '-- definition incomplete for {0} constraint'.format(index_constraints[ctype]) - ) - ) - if len(sql) > 0: - # Join all the sql(s) as single string - return '\n\n'.join(sql) - else: - return None - - def get_foreign_key_sql(self, tid, data): - """ - Args: - tid: Table ID - data: data dict coming from the client - - Returns: - This function will generate modified sql for foreign key - """ - sql = [] - # Check if constraint is in data - # If yes then we need to check for add/change/delete - if 'foreign_key' in data: - constraint = data['foreign_key'] - # If constraint(s) is/are deleted - if 'deleted' in constraint: - for c in constraint['deleted']: - c['schema'] = data['schema'] - c['table'] = data['name'] - - # Sql for drop - sql.append( - render_template("/".join( - [self.foreign_key_template_path, - 'delete.sql']), - data=c, conn=self.conn).strip('\n') - ) - - if 'changed' in constraint: - for c in constraint['changed']: - c['schema'] = data['schema'] - c['table'] = data['name'] - - properties_sql = render_template("/".join( - [self.foreign_key_template_path, 'properties.sql']), - tid=tid, cid=c['oid']) - status, res = self.conn.execute_dict(properties_sql) - if not status: - return internal_server_error(errormsg=res) - - old_data = res['rows'][0] - # Sql to update object - sql.append( - render_template("/".join([ - self.foreign_key_template_path, - 'update.sql']), data=c, o_data=old_data, - conn=self.conn).strip('\n') - ) - - if not self.validate_constrains('foreign_key', c): - sql.append( - gettext( - '-- definition incomplete for foreign_key constraint' - ) - ) - return '\n\n'.join(sql) - - if 'columns' in c: - cols = [] - for col in c['columns']: - cols.append(col['local_column']) - - coveringindex = self.search_coveringindex(tid, cols) - - if coveringindex is None and 'autoindex' in c and c['autoindex'] and \ - ('coveringindex' in c and - c['coveringindex'] != ''): - sql.append(render_template( - "/".join([self.foreign_key_template_path, 'create_index.sql']), - data=c, conn=self.conn).strip('\n') - ) - - if 'added' in constraint: - for c in constraint['added']: - c['schema'] = data['schema'] - c['table'] = data['name'] - - # Sql to add object - # Columns - - if not self.validate_constrains('foreign_key', c): - sql.append( - gettext( - '-- definition incomplete for foreign_key constraint' - ) - ) - return '\n\n'.join(sql) - - SQL = render_template("/".join([self.foreign_key_template_path, - 'get_parent.sql']), - tid=c['columns'][0]['references']) - status, rset = self.conn.execute_2darray(SQL) - if not status: - return internal_server_error(errormsg=rset) - - c['remote_schema'] = rset['rows'][0]['schema'] - c['remote_table'] = rset['rows'][0]['table'] - - sql.append( - render_template( - "/".join([self.foreign_key_template_path, - 'create.sql']), - data=c, conn=self.conn - ).strip('\n') - ) - - if c['autoindex']: - sql.append( - render_template( - "/".join([self.foreign_key_template_path, - 'create_index.sql']), - data=c, conn=self.conn).strip('\n') - ) - - if len(sql) > 0: - # Join all the sql(s) as single string - return '\n\n'.join(sql) - else: - return None - - def get_check_constraint_sql(self, tid, data): - """ - Args: - tid: Table ID - data: data dict coming from the client - - Returns: - This function will generate modified sql for check constraint - """ - sql = [] - # Check if constraint is in data - # If yes then we need to check for add/change/delete - if 'check_constraint' in data: - constraint = data['check_constraint'] - # If constraint(s) is/are deleted - if 'deleted' in constraint: - for c in constraint['deleted']: - c['schema'] = data['schema'] - c['table'] = data['name'] - - # Sql for drop - sql.append( - render_template("/".join( - [self.check_constraint_template_path, - 'delete.sql']), - data=c, conn=self.conn).strip('\n') - ) - - if 'changed' in constraint: - for c in constraint['changed']: - c['schema'] = data['schema'] - c['table'] = data['name'] - - properties_sql = render_template("/".join( - [self.check_constraint_template_path, 'properties.sql']), - tid=tid, cid=c['oid']) - status, res = self.conn.execute_dict(properties_sql) - if not status: - return internal_server_error(errormsg=res) - - old_data = res['rows'][0] - # Sql to update object - sql.append( - render_template("/".join([ - self.check_constraint_template_path, - 'update.sql']), data=c, o_data=old_data, - conn=self.conn).strip('\n') - ) - - if 'added' in constraint: - for c in constraint['added']: - c['schema'] = data['schema'] - c['table'] = data['name'] - - if not self.validate_constrains('check_constraint', c): - sql.append( - gettext( - '-- definition incomplete for check_constraint' - ) - ) - return '\n\n'.join(sql) - - sql.append( - render_template( - "/".join([self.check_constraint_template_path, - 'create.sql']), - data=c, conn=self.conn - ).strip('\n') - ) - - if len(sql) > 0: - # Join all the sql(s) as single string - return '\n\n'.join(sql) - else: - return None - - def get_exclusion_constraint_sql(self, did, tid, data): - """ - Args: - tid: Table ID - data: data dict coming from the client - - Returns: - This function will generate modified sql for exclusion constraint - """ - sql = [] - # Check if constraint is in data - # If yes then we need to check for add/change/delete - if 'exclude_constraint' in data: - constraint = data['exclude_constraint'] - # If constraint(s) is/are deleted - if 'deleted' in constraint: - for c in constraint['deleted']: - c['schema'] = data['schema'] - c['table'] = data['name'] - - # Sql for drop - sql.append( - render_template("/".join( - [self.exclusion_constraint_template_path, - 'delete.sql']), - data=c, conn=self.conn).strip('\n') - ) - - if 'changed' in constraint: - for c in constraint['changed']: - c['schema'] = data['schema'] - c['table'] = data['name'] - - properties_sql = render_template("/".join( - [self.exclusion_constraint_template_path, 'properties.sql']), - did=did, tid=tid, cid=c['oid']) - status, res = self.conn.execute_dict(properties_sql) - if not status: - return internal_server_error(errormsg=res) - - old_data = res['rows'][0] - # Sql to update object - sql.append( - render_template("/".join([ - self.exclusion_constraint_template_path, - 'update.sql']), data=c, o_data=old_data, - conn=self.conn).strip('\n') - ) - - if 'added' in constraint: - for c in constraint['added']: - c['schema'] = data['schema'] - c['table'] = data['name'] - - if not self.validate_constrains('exclude_constraint', c): - sql.append( - gettext( - '-- definition incomplete for exclusion_constraint' - ) - ) - return '\n\n'.join(sql) - - sql.append( - render_template( - "/".join([self.exclusion_constraint_template_path, - 'create.sql']), - data=c, conn=self.conn - ).strip('\n') - ) - - if len(sql) > 0: - # Join all the sql(s) as single string - return u'\n\n'.join(sql) - else: - return None - - def get_trigger_function_schema(self, data): - """ - This function will return trigger function with schema name - """ - # If language is 'edbspl' then trigger function should be - # 'Inline EDB-SPL' else we will find the trigger function - # with schema name. - if data['lanname'] == 'edbspl': - data['tfunction'] = 'Inline EDB-SPL' - else: - SQL = render_template( - "/".join( - [self.trigger_template_path,'get_triggerfunctions.sql'] - ), - tgfoid=data['tgfoid'], - show_system_objects=self.blueprint.show_system_objects - ) - - status, result = self.conn.execute_dict(SQL) - if not status: - return internal_server_error(errormsg=res) - - # Update the trigger function which we have fetched with - # schema name - if 'rows' in result and len(result['rows']) > 0 and \ - 'tfunctions' in result['rows'][0]: - data['tfunction'] = result['rows'][0]['tfunctions'] - return data - - def _format_args(self, args): - """ - This function will format arguments. - - Args: - args: Arguments - - Returns: - Formated arguments for function - """ - formatted_args = ["'{0}'".format(arg) for arg in args] - return ', '.join(formatted_args) - - def get_sql(self, did, scid, tid, data): - """ - This function will generate create/update sql from model data - coming from client - """ - if tid is not None: - SQL = render_template("/".join([self.template_path, - 'properties.sql']), - did=did, scid=scid, tid=tid, - datlastsysoid=self.datlastsysoid) - status, res = self.conn.execute_dict(SQL) - if not status: - return internal_server_error(errormsg=res) - - old_data = res['rows'][0] - old_data = self._formatter(did, scid, tid, old_data) - - # We will convert privileges coming from client required - if 'relacl' in data: - for mode in ['added', 'changed', 'deleted']: - if mode in data['relacl']: - data['relacl'][mode] = parse_priv_to_db( - data['relacl'][mode], self.acl - ) - - # If name is not present in request data - if 'name' not in data: - data['name'] = old_data['name'] - - data = self.check_and_convert_name_to_string(data) - - # If name if not present - if 'schema' not in data: - data['schema'] = old_data['schema'] - - # Filter out new tables from list, we will send complete list - # and not newly added tables in the list from client - # so we will filter new tables here - if 'coll_inherits' in data: - p_len = len(old_data['coll_inherits']) - c_len = len(data['coll_inherits']) - # If table(s) added - if c_len > p_len: - data['coll_inherits_added'] = list( - set(data['coll_inherits']) - set(old_data['coll_inherits']) - ) - # If table(s)removed - elif c_len < p_len: - data['coll_inherits_removed'] = list( - set(old_data['coll_inherits']) - set(data['coll_inherits']) - ) - # Safe side verification,In case it happens.. - # If user removes and adds same number of table - # eg removed one table and added one new table - elif c_len == p_len: - data['coll_inherits_added'] = list( - set(data['coll_inherits']) - set(old_data['coll_inherits']) - ) - data['coll_inherits_removed'] = list( - set(old_data['coll_inherits']) - set(data['coll_inherits']) - ) - - SQL = render_template("/".join([self.template_path, 'update.sql']), - o_data=old_data, data=data, conn=self.conn) - # Removes training new lines - SQL = SQL.strip('\n') + '\n\n' - - # Parse/Format columns & create sql - if 'columns' in data: - # Parse the data coming from client - data = self._parse_format_columns(data, mode='edit') - - columns = data['columns'] - column_sql = '\n' - - # If column(s) is/are deleted - if 'deleted' in columns: - for c in columns['deleted']: - c['schema'] = data['schema'] - c['table'] = data['name'] - # Sql for drop column - if 'inheritedfrom' not in c: - column_sql += render_template("/".join( - [self.column_template_path, 'delete.sql']), - data=c, conn=self.conn).strip('\n') + '\n\n' - - # If column(s) is/are changed - # Here we will be needing previous properties of column - # so that we can compare & update it - if 'changed' in columns: - for c in columns['changed']: - c['schema'] = data['schema'] - c['table'] = data['name'] - if 'attacl' in c: - c['attacl'] = parse_priv_to_db(c['attacl'], - self.column_acl) - - properties_sql = render_template("/".join([self.column_template_path, - 'properties.sql']), - tid=tid, - clid=c['attnum'], - show_sys_objects=self.blueprint.show_system_objects - ) - - status, res = self.conn.execute_dict(properties_sql) - if not status: - return internal_server_error(errormsg=res) - old_data = res['rows'][0] - - old_data['cltype'], old_data['hasSqrBracket'] = self._cltype_formatter(old_data['cltype']) - old_data = self.convert_length_precision_to_string(old_data) - - fulltype = self.get_full_type( - old_data['typnspname'], old_data['typname'], - old_data['isdup'], old_data['attndims'], old_data['atttypmod'] - ) - - # If we have length & precision both - matchObj = re.search(r'(\d+),(\d+)', fulltype) - if matchObj: - old_data['attlen'] = int(matchObj.group(1)) - old_data['attprecision'] = int(matchObj.group(2)) - else: - # If we have length only - matchObj = re.search(r'(\d+)', fulltype) - if matchObj: - old_data['attlen'] = int(matchObj.group(1)) - old_data['attprecision'] = None - else: - old_data['attlen'] = None - old_data['attprecision'] = None - - old_data['cltype'] = DataTypeReader.parse_type_name(old_data['cltype']) - - # Sql for alter column - if 'inheritedfrom' not in c: - column_sql += render_template("/".join( - [self.column_template_path, 'update.sql']), - data=c, o_data=old_data, conn=self.conn).strip('\n') + '\n\n' - - # If column(s) is/are added - if 'added' in columns: - for c in columns['added']: - c['schema'] = data['schema'] - c['table'] = data['name'] - # Sql for create column - if 'attacl' in c: - c['attacl'] = parse_priv_to_db(c['attacl'], - self.column_acl) - - c = self.convert_length_precision_to_string(c) - - if 'inheritedfrom' not in c: - column_sql += render_template("/".join( - [self.column_template_path, 'create.sql']), - data=c, conn=self.conn).strip('\n') + '\n\n' - - # Combine all the SQL together - SQL += column_sql.strip('\n') - - # Check if index constraints are added/changed/deleted - index_constraint_sql = self.get_index_constraint_sql(did, tid, data) - # If we have index constraint sql then ad it in main sql - if index_constraint_sql is not None: - SQL += '\n' + index_constraint_sql - - # Check if foreign key(s) is/are added/changed/deleted - foreign_key_sql = self.get_foreign_key_sql(tid, data) - # If we have foreign key sql then ad it in main sql - if foreign_key_sql is not None: - SQL += '\n' + foreign_key_sql - - # Check if check constraint(s) is/are added/changed/deleted - check_constraint_sql = self.get_check_constraint_sql(tid, data) - # If we have check constraint sql then ad it in main sql - if check_constraint_sql is not None: - SQL += '\n' + check_constraint_sql - - # Check if exclusion constraint(s) is/are added/changed/deleted - exclusion_constraint_sql = self.get_exclusion_constraint_sql(did, tid, data) - # If we have check constraint sql then ad it in main sql - if exclusion_constraint_sql is not None: - SQL += '\n' + exclusion_constraint_sql - - else: - required_args = [ - 'name' - ] - - for arg in required_args: - if arg not in data: - return gettext('-- definition incomplete') - - # validate constraint data. - for key in ['primary_key', 'unique_constraint', - 'foreign_key', 'check_constraint', - 'exclude_constraint']: - if key in data and len(data[key]) > 0: - for constraint in data[key]: - if not self.validate_constrains(key, constraint): - return gettext('-- definition incomplete for {0}'.format(key)) - - # We will convert privileges coming from client required - # in server side format - if 'relacl' in data: - data['relacl'] = parse_priv_to_db(data['relacl'], self.acl) - - # Parse & format columns - data = self._parse_format_columns(data) - data = self.check_and_convert_name_to_string(data) - - if 'foreign_key' in data: - for c in data['foreign_key']: - SQL = render_template("/".join([self.foreign_key_template_path, - 'get_parent.sql']), - tid=c['columns'][0]['references']) - status, rset = self.conn.execute_2darray(SQL) - if not status: - return internal_server_error(errormsg=rset) - - c['remote_schema'] = rset['rows'][0]['schema'] - c['remote_table'] = rset['rows'][0]['table'] - - # If the request for new object which do not have did - SQL = render_template("/".join([self.template_path, 'create.sql']), - data=data, conn=self.conn) - SQL = re.sub('\n{2,}', '\n\n', SQL) - SQL = SQL.strip('\n') - - return SQL, data['name'] if 'name' in data else old_data['name'] - - @staticmethod - def validate_constrains(key, data): - - if key == 'primary_key' or key == 'unique_constraint': - if 'columns' in data and len(data['columns']) > 0: - return True - else: - return False - elif key == 'foreign_key': - if 'oid' not in data: - for arg in ['columns']: - if arg not in data: - return False - elif isinstance(data[arg], list) and len(data[arg]) < 1: - return False - - if 'autoindex' in data and data['autoindex'] and \ - ('coveringindex' not in data or - data['coveringindex'] == ''): - return False - - return True - - elif key == 'check_constraint': - for arg in ['consrc']: - if arg not in data or data[arg] == '': - return False - return True - - elif key == 'exclude_constraint': - pass - - return True - - @check_precondition + @BaseTableView.check_precondition def dependents(self, gid, sid, did, scid, tid): """ This function get the dependents and return ajax response @@ -2460,43 +1199,9 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): scid: Schema ID tid: Table ID """ - # Specific condition for column which we need to append - where = "WHERE dep.refobjid={0}::OID".format(tid) + return BaseTableView.get_table_dependents(self, tid) - dependents_result = self.get_dependents( - self.conn, tid - ) - - # Specific sql to run againt column to fetch dependents - SQL = render_template("/".join([self.template_path, - 'depend.sql']), where=where) - - status, res = self.conn.execute_dict(SQL) - if not status: - return internal_server_error(errormsg=res) - - for row in res['rows']: - ref_name = row['refname'] - if ref_name is None: - continue - - dep_type = '' - dep_str = row['deptype'] - if dep_str == 'a': - dep_type = 'auto' - elif dep_str == 'n': - dep_type = 'normal' - elif dep_str == 'i': - dep_type = 'internal' - - dependents_result.append({'type': 'sequence', 'name': ref_name, 'field': dep_type}) - - return ajax_response( - response=dependents_result, - status=200 - ) - - @check_precondition + @BaseTableView.check_precondition def dependencies(self, gid, sid, did, scid, tid): """ This function get the dependencies and return ajax response @@ -2508,18 +1213,10 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): did: Database ID scid: Schema ID tid: Table ID - """ - dependencies_result = self.get_dependencies( - self.conn, tid - ) + return BaseTableView.get_table_dependencies(self, tid) - return ajax_response( - response=dependencies_result, - status=200 - ) - - @check_precondition + @BaseTableView.check_precondition def sql(self, gid, sid, did, scid, tid): """ This function will creates reverse engineered sql for @@ -2534,15 +1231,11 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): """ main_sql = [] - """ - ##################################### - # 1) Reverse engineered sql for TABLE - ##################################### - """ - SQL = render_template("/".join([self.template_path, - 'properties.sql']), - did=did, scid=scid, tid=tid, - datlastsysoid=self.datlastsysoid) + SQL = render_template( + "/".join([self.table_template_path, 'properties.sql']), + did=did, scid=scid, tid=tid, + datlastsysoid=self.datlastsysoid + ) status, res = self.conn.execute_dict(SQL) if not status: return internal_server_error(errormsg=res) @@ -2552,250 +1245,10 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): data = res['rows'][0] - # Table & Schema declaration so that we can use them in child nodes - schema = data['schema'] - table = data['name'] + return BaseTableView.get_reverse_engineered_sql( + self, did, scid, tid, main_sql, data) - data = self._formatter(did, scid, tid, data) - - # Now we have all lis of columns which we need - # to include in our create definition, Let's format them - if 'columns' in data: - for c in data['columns']: - if 'attacl' in c: - c['attacl'] = parse_priv_to_db(c['attacl'], self.column_acl) - - # check type for '[]' in it - if 'cltype' in c: - c['cltype'], c['hasSqrBracket'] = self._cltype_formatter(c['cltype']) - - sql_header = u"-- Table: {0}\n\n-- ".format(self.qtIdent(self.conn, - data['schema'], - data['name'])) - - sql_header += render_template("/".join([self.template_path, - 'delete.sql']), - data=data, conn=self.conn) - - sql_header = sql_header.strip('\n') - sql_header += '\n' - - # Add into main sql - main_sql.append(sql_header) - - # Parse privilege data - if 'relacl' in data: - data['relacl'] = parse_priv_to_db(data['relacl'], self.acl) - - # If the request for new object which do not have did - table_sql = render_template("/".join([self.template_path, - 'create.sql']), - data=data, conn=self.conn, is_sql=True) - - # Add into main sql - table_sql = re.sub('\n{2,}', '\n\n', table_sql) - main_sql.append(table_sql.strip('\n')) - - """ - ###################################### - # 2) Reverse engineered sql for INDEX - ###################################### - """ - - SQL = render_template("/".join([self.index_template_path, - 'nodes.sql']), tid=tid) - status, rset = self.conn.execute_2darray(SQL) - if not status: - return internal_server_error(errormsg=rset) - - for row in rset['rows']: - - SQL = render_template("/".join([self.index_template_path, - 'properties.sql']), - did=did, tid=tid, idx=row['oid'], - datlastsysoid=self.datlastsysoid) - - status, res = self.conn.execute_dict(SQL) - if not status: - return internal_server_error(errormsg=res) - - data = dict(res['rows'][0]) - # Adding parent into data dict, will be using it while creating sql - data['schema'] = schema - data['table'] = table - # We also need to fecth columns of index - SQL = render_template("/".join([self.index_template_path, - 'column_details.sql']), - idx=row['oid']) - status, rset = self.conn.execute_2darray(SQL) - if not status: - return internal_server_error(errormsg=rset) - - # 'attdef' comes with quotes from query so we need to strip them - # 'options' we need true/false to render switch ASC(false)/DESC(true) - columns = [] - cols = [] - for col_row in rset['rows']: - # We need all data as collection for ColumnsModel - cols_data = { - 'colname': col_row['attdef'].strip('"'), - 'collspcname': col_row['collnspname'], - 'op_class': col_row['opcname'], - } - if col_row['options'][0] == 'DESC': - cols_data['sort_order'] = True - columns.append(cols_data) - - # We need same data as string to display in properties window - # If multiple column then separate it by colon - cols_str = col_row['attdef'] - if col_row['collnspname']: - cols_str += ' COLLATE ' + col_row['collnspname'] - if col_row['opcname']: - cols_str += ' ' + col_row['opcname'] - if col_row['options'][0] == 'DESC': - cols_str += ' DESC' - cols.append(cols_str) - - # Push as collection - data['columns'] = columns - # Push as string - data['cols'] = ', '.join(cols) - - sql_header = u"\n-- Index: {0}\n\n-- ".format(data['name']) - - sql_header += render_template("/".join([self.index_template_path, - 'delete.sql']), - data=data, conn=self.conn) - - index_sql = render_template("/".join([self.index_template_path, - 'create.sql']), - data=data, conn=self.conn) - index_sql += "\n" - index_sql += render_template("/".join([self.index_template_path, - 'alter.sql']), - data=data, conn=self.conn) - - # Add into main sql - index_sql = re.sub('\n{2,}', '\n\n', index_sql) - main_sql.append(sql_header + '\n\n' + index_sql.strip('\n')) - - """ - ######################################## - # 3) Reverse engineered sql for TRIGGERS - ######################################## - """ - SQL = render_template("/".join([self.trigger_template_path, - 'nodes.sql']), tid=tid) - status, rset = self.conn.execute_2darray(SQL) - if not status: - return internal_server_error(errormsg=rset) - - for row in rset['rows']: - trigger_sql = '' - - SQL = render_template("/".join([self.trigger_template_path, - 'properties.sql']), - tid=tid, trid=row['oid'], - datlastsysoid=self.datlastsysoid) - - status, res = self.conn.execute_dict(SQL) - if not status: - return internal_server_error(errormsg=res) - - data = dict(res['rows'][0]) - # Adding parent into data dict, will be using it while creating sql - data['schema'] = schema - data['table'] = table - - data = self.get_trigger_function_schema(data) - - if len(data['custom_tgargs']) > 1: - # We know that trigger has more than 1 argument, let's join them - data['tgargs'] = self._format_args(data['custom_tgargs']) - - if len(data['tgattr']) > 1: - columns = ', '.join(data['tgattr'].split(' ')) - - SQL = render_template("/".join([self.trigger_template_path, - 'get_columns.sql']), - tid=tid, clist=columns) - - status, rset = self.conn.execute_2darray(SQL) - if not status: - return internal_server_error(errormsg=rset) - # 'tgattr' contains list of columns from table used in trigger - columns = [] - - for col_row in rset['rows']: - columns.append({'column': col_row['name']}) - - data['columns'] = columns - - data = trigger_definition(data) - - sql_header = u"\n-- Trigger: {0}\n\n-- ".format(data['name']) - - sql_header += render_template("/".join([self.trigger_template_path, - 'delete.sql']), - data=data, conn=self.conn) - - # If the request for new object which do not have did - trigger_sql = render_template("/".join([self.trigger_template_path, - 'create.sql']), - data=data, conn=self.conn) - - trigger_sql = sql_header + '\n\n' + trigger_sql.strip('\n') - - # If trigger is disabled then add sql code for the same - if not data['is_enable_trigger']: - trigger_sql += '\n\n' - trigger_sql += render_template("/".join([ - self.trigger_template_path, - 'enable_disable_trigger.sql']), - data=data, conn=self.conn) - - # Add into main sql - trigger_sql = re.sub('\n{2,}', '\n\n', trigger_sql) - main_sql.append(trigger_sql) - - """ - ##################################### - # 4) Reverse engineered sql for RULES - ##################################### - """ - - SQL = render_template("/".join( - [self.rules_template_path, 'properties.sql']), tid=tid) - - status, rset = self.conn.execute_2darray(SQL) - if not status: - return internal_server_error(errormsg=rset) - - for row in rset['rows']: - rules_sql = '\n' - SQL = render_template("/".join( - [self.rules_template_path, 'properties.sql'] - ), rid=row['oid'], datlastsysoid=self.datlastsysoid) - - status, res = self.conn.execute_dict(SQL) - if not status: - return internal_server_error(errormsg=res) - - res_data = parse_rule_definition(res) - rules_sql += render_template("/".join( - [self.rules_template_path, 'create.sql']), - data=res_data, display_comments=True) - - # Add into main sql - rules_sql = re.sub('\n{2,}', '\n\n', rules_sql) - main_sql.append(rules_sql) - - sql = '\n'.join(main_sql) - - return ajax_response(response=sql.strip('\n')) - - @check_precondition + @BaseTableView.check_precondition def select_sql(self, gid, sid, did, scid, tid): """ SELECT script sql for the object @@ -2810,10 +1263,11 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): Returns: SELECT Script sql for the object """ - SQL = render_template("/".join([self.template_path, - 'properties.sql']), - did=did, scid=scid, tid=tid, - datlastsysoid=self.datlastsysoid) + SQL = render_template( + "/".join([self.table_template_path, 'properties.sql']), + did=did, scid=scid, tid=tid, + datlastsysoid=self.datlastsysoid + ) status, res = self.conn.execute_dict(SQL) if not status: return internal_server_error(errormsg=res) @@ -2839,7 +1293,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): ) return ajax_response(response=sql) - @check_precondition + @BaseTableView.check_precondition def insert_sql(self, gid, sid, did, scid, tid): """ INSERT script sql for the object @@ -2854,10 +1308,11 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): Returns: INSERT Script sql for the object """ - SQL = render_template("/".join([self.template_path, - 'properties.sql']), - did=did, scid=scid, tid=tid, - datlastsysoid=self.datlastsysoid) + SQL = render_template( + "/".join([self.table_template_path, 'properties.sql']), + did=did, scid=scid, tid=tid, + datlastsysoid=self.datlastsysoid + ) status, res = self.conn.execute_dict(SQL) if not status: return internal_server_error(errormsg=res) @@ -2886,7 +1341,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): return ajax_response(response=sql) - @check_precondition + @BaseTableView.check_precondition def update_sql(self, gid, sid, did, scid, tid): """ UPDATE script sql for the object @@ -2901,10 +1356,11 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): Returns: UPDATE Script sql for the object """ - SQL = render_template("/".join([self.template_path, - 'properties.sql']), - did=did, scid=scid, tid=tid, - datlastsysoid=self.datlastsysoid) + SQL = render_template( + "/".join([self.table_template_path, 'properties.sql']), + did=did, scid=scid, tid=tid, + datlastsysoid=self.datlastsysoid + ) status, res = self.conn.execute_dict(SQL) if not status: return internal_server_error(errormsg=res) @@ -2935,7 +1391,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): return ajax_response(response=sql) - @check_precondition + @BaseTableView.check_precondition def delete_sql(self, gid, sid, did, scid, tid): """ DELETE script sql for the object @@ -2950,10 +1406,11 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): Returns: DELETE Script sql for the object """ - SQL = render_template("/".join([self.template_path, - 'properties.sql']), - did=did, scid=scid, tid=tid, - datlastsysoid=self.datlastsysoid) + SQL = render_template( + "/".join([self.table_template_path, 'properties.sql']), + did=did, scid=scid, tid=tid, + datlastsysoid=self.datlastsysoid + ) status, res = self.conn.execute_dict(SQL) if not status: return internal_server_error(errormsg=res) @@ -2966,7 +1423,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): return ajax_response(response=sql) - @check_precondition + @BaseTableView.check_precondition def statistics(self, gid, sid, did, scid, tid=None): """ Statistics @@ -2982,63 +1439,6 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings): otherwise it will return statistics for all the tables in that schema. """ - - # Fetch schema name - status, schema_name = self.conn.execute_scalar( - render_template( - "/".join([self.template_path, 'get_schema.sql']), - conn=self.conn, scid=scid - ) - ) - if not status: - return internal_server_error(errormsg=schema_name) - - if tid is None: - status, res = self.conn.execute_dict( - render_template( - "/".join([self.template_path, 'coll_table_stats.sql']), - conn=self.conn, schema_name=schema_name - ) - ) - else: - # For Individual table stats - - # Check if pgstattuple extension is already created? - # if created then only add extended stats - status, is_pgstattuple = self.conn.execute_scalar(""" - SELECT (count(extname) > 0) AS is_pgstattuple - FROM pg_extension - WHERE extname='pgstattuple' - """) - if not status: - return internal_server_error(errormsg=is_pgstattuple) - - # Fetch Table name - status, table_name = self.conn.execute_scalar( - render_template( - "/".join([self.template_path, 'get_table.sql']), - conn=self.conn, scid=scid, tid=tid - ) - ) - if not status: - return internal_server_error(errormsg=table_name) - - status, res = self.conn.execute_dict( - render_template( - "/".join([self.template_path, 'stats.sql']), - conn=self.conn, schema_name=schema_name, - table_name=table_name, - is_pgstattuple=is_pgstattuple, tid=tid - ) - ) - - if not status: - return internal_server_error(errormsg=res) - - return make_json_response( - data=res, - status=200 - ) - + return BaseTableView.get_table_statistics(self, scid, tid) TableView.register_node_view(blueprint) diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/column/templates/column/js/column.js b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/column/templates/column/js/column.js index 179b9610c..10737e0a2 100644 --- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/column/templates/column/js/column.js +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/column/templates/column/js/column.js @@ -86,6 +86,7 @@ define('pgadmin.node.column', [ if (!pgBrowser.Nodes['column']) { pgBrowser.Nodes['column'] = pgBrowser.Node.extend({ + getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy, parent_type: ['table', 'view', 'mview'], collection_type: ['coll-table', 'coll-view', 'coll-mview'], type: 'column', @@ -197,11 +198,17 @@ define('pgadmin.node.column', [ ); }, disabled: function(m){ - // If primary key already exist then disable. + // Disable it, when one of this: + // - Primary key already exist + // - Table is a partitioned table if ( - m.top && !_.isUndefined(m.top.get('oid')) && - m.top.get('primary_key').length > 0 && - !_.isUndefined(m.top.get('primary_key').first().get('oid')) + m.top && (( + !_.isUndefined(m.top.get('oid')) && + m.top.get('primary_key').length > 0 && + !_.isUndefined(m.top.get('primary_key').first().get('oid')) + ) || ( + m.top.has('is_partitioned') && m.top.get('is_partitioned') + )) ) { return true; } @@ -228,6 +235,17 @@ define('pgadmin.node.column', [ return false; } + // If table is partitioned table then disable + if (m.top && !_.isUndefined(m.top.get('is_partitioned')) && + m.top.get('is_partitioned')) + { + setTimeout(function () { + m.set('is_primary_key', false); + }, 10); + + return false; + } + if(!m.inSchemaWithColumnCheck.apply(this, [m]) && !_.isUndefined(name) && !_.isNull(name) && name !== '') { return true; diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/column/tests/test_column_get.py b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/column/tests/test_column_get.py index 713d9992b..4adfbef11 100644 --- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/column/tests/test_column_get.py +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/column/tests/test_column_get.py @@ -24,7 +24,7 @@ from . import utils as columns_utils class ColumnGetTestCase(BaseTestGenerator): """This class will get column under table node.""" scenarios = [ - ('Fetch table Node URL', dict(url='/browser/column/obj/')) + ('Fetch columns under table node', dict(url='/browser/column/obj/')) ] def setUp(self): diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/check_constraint/templates/check_constraint/js/check_constraint.js b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/check_constraint/templates/check_constraint/js/check_constraint.js index b12d9d300..85f62f719 100644 --- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/check_constraint/templates/check_constraint/js/check_constraint.js +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/check_constraint/templates/check_constraint/js/check_constraint.js @@ -10,6 +10,7 @@ define('pgadmin.node.check_constraints', [ // Check Constraint Node if (!pgBrowser.Nodes['check_constraints']) { pgAdmin.Browser.Nodes['check_constraints'] = pgBrowser.Node.extend({ + getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy, type: 'check_constraints', label: gettext('Check'), collection_type: 'coll-constraints', @@ -18,7 +19,7 @@ define('pgadmin.node.check_constraints', [ dialogHelp: url_for('help.static', {'filename': 'check_dialog.html'}), hasSQL: true, hasDepends: true, - parent_type: ['table'], + parent_type: ['table','partition'], Init: function() { // Avoid mulitple registration of menus if (this.initialized) @@ -137,6 +138,18 @@ define('pgadmin.node.check_constraints', [ 'switch', cell: 'boolean', group: gettext('Definition'), mode: ['properties', 'create', 'edit'], min_version: 90200, disabled: function(m) { + // Disabled if table is a partitioned table. + if ((_.has(m , 'top') && !_.isUndefined(m.top) && m.top.get('is_partitioned')) || + (_.has(m, 'node_info') && _.has(m.node_info, 'table') && + _.has(m.node_info.table, 'is_partitioned') && m.node_info.table.is_partitioned) + ){ + setTimeout(function(){ + m.set('connoinherit', false); + },10); + + return true; + } + return ((_.has(m, 'handler') && !_.isUndefined(m.handler) && !_.isUndefined(m.get('oid'))) || (_.isFunction(m.isNew) && !m.isNew())); diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/exclusion_constraint/templates/exclusion_constraint/js/exclusion_constraint.js b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/exclusion_constraint/templates/exclusion_constraint/js/exclusion_constraint.js index 814edce50..605495d79 100644 --- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/exclusion_constraint/templates/exclusion_constraint/js/exclusion_constraint.js +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/exclusion_constraint/templates/exclusion_constraint/js/exclusion_constraint.js @@ -602,6 +602,7 @@ define('pgadmin.node.exclusion_constraint', [ // Extend the browser's node class for exclusion constraint node if (!pgBrowser.Nodes['exclusion_constraint']) { pgAdmin.Browser.Nodes['exclusion_constraint'] = pgBrowser.Node.extend({ + getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy, type: 'exclusion_constraint', label: gettext('Exclusion constraint'), collection_type: 'coll-constraints', @@ -609,7 +610,7 @@ define('pgadmin.node.exclusion_constraint', [ sqlCreateHelp: 'ddl-constraints.html', dialogHelp: url_for('help.static', {'filename': 'exclusion_constraint_dialog.html'}), hasSQL: true, - parent_type: 'table', + parent_type: ['table','partition'], canDrop: true, canDropCascade: true, hasDepends: true, @@ -916,12 +917,22 @@ define('pgadmin.node.exclusion_constraint', [ if (data && data.check == false) return true; - var t = pgBrowser.tree, i = item, d = itemData, parents = []; + var t = pgBrowser.tree, i = item, d = itemData, parents = [], + immediate_parent_table_found = false, + is_immediate_parent_table_partitioned = false; // To iterate over tree to check parent node while (i) { + // If table is partitioned table then return false + if (!immediate_parent_table_found && (d._type == 'table' || d._type == 'partition')) { + immediate_parent_table_found = true; + if ('is_partitioned' in d && d.is_partitioned) { + is_immediate_parent_table_partitioned = true; + } + } + // If it is schema then allow user to create table if (_.indexOf(['schema'], d._type) > -1) - return true; + return !is_immediate_parent_table_partitioned; parents.push(d._type); i = t.hasParent(i) ? t.parent(i) : null; d = i ? t.itemData(i) : null; @@ -930,7 +941,7 @@ define('pgadmin.node.exclusion_constraint', [ if (_.indexOf(parents, 'catalog') > -1) { return false; } else { - return true; + return !is_immediate_parent_table_partitioned; } } }); diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/foreign_key/templates/foreign_key/js/foreign_key.js b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/foreign_key/templates/foreign_key/js/foreign_key.js index 95afa3ef9..a4bec09de 100644 --- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/foreign_key/templates/foreign_key/js/foreign_key.js +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/foreign_key/templates/foreign_key/js/foreign_key.js @@ -602,6 +602,7 @@ define('pgadmin.node.foreign_key', [ // Extend the browser's node class for foreign key node if (!pgBrowser.Nodes['foreign_key']) { pgAdmin.Browser.Nodes['foreign_key'] = pgBrowser.Node.extend({ + getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy, type: 'foreign_key', label: gettext('Foreign key'), collection_type: 'coll-constraints', @@ -610,7 +611,7 @@ define('pgadmin.node.foreign_key', [ dialogHelp: url_for('help.static', {'filename': 'foreign_key_dialog.html'}), hasSQL: true, hasDepends: false, - parent_type: 'table', + parent_type: ['table','partition'], canDrop: true, canDropCascade: true, hasDepends: true, @@ -1068,12 +1069,22 @@ define('pgadmin.node.foreign_key', [ if (data && data.check == false) return true; - var t = pgBrowser.tree, i = item, d = itemData, parents = []; + var t = pgBrowser.tree, i = item, d = itemData, parents = [], + immediate_parent_table_found = false, + is_immediate_parent_table_partitioned = false; // To iterate over tree to check parent node while (i) { + // If table is partitioned table then return false + if (!immediate_parent_table_found && (d._type == 'table' || d._type == 'partition')) { + immediate_parent_table_found = true; + if ('is_partitioned' in d && d.is_partitioned) { + is_immediate_parent_table_partitioned = true; + } + } + // If it is schema then allow user to c reate table if (_.indexOf(['schema'], d._type) > -1) - return true; + return !is_immediate_parent_table_partitioned; parents.push(d._type); i = t.hasParent(i) ? t.parent(i) : null; d = i ? t.itemData(i) : null; @@ -1082,7 +1093,7 @@ define('pgadmin.node.foreign_key', [ if (_.indexOf(parents, 'catalog') > -1) { return false; } else { - return true; + return !is_immediate_parent_table_partitioned; } } }); diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/index_constraint/templates/index_constraint/js/index_constraint.js b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/index_constraint/templates/index_constraint/js/index_constraint.js index 97a404a4d..9479acd8f 100644 --- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/index_constraint/templates/index_constraint/js/index_constraint.js +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/index_constraint/templates/index_constraint/js/index_constraint.js @@ -6,6 +6,7 @@ define('pgadmin.node.{{node_type}}', [ // Extend the browser's node class for index constraint node if (!pgBrowser.Nodes['{{node_type}}']) { pgAdmin.Browser.Nodes['{{node_type}}'] = pgBrowser.Node.extend({ + getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy, type: '{{node_type}}', label: '{{ node_label }}', collection_type: 'coll-constraints', @@ -20,7 +21,7 @@ define('pgadmin.node.{{node_type}}', [ hasDepends: true, hasStatistics: true, statsPrettifyFields: ['Index size'], - parent_type: 'table', + parent_type: ['table','partition'], canDrop: true, canDropCascade: true, Init: function() { @@ -45,12 +46,28 @@ define('pgadmin.node.{{node_type}}', [ if (data && data.check == false) return true; - var t = pgBrowser.tree, i = item, d = itemData, parents = []; + var t = pgBrowser.tree, i = item, d = itemData, parents = [], + immediate_parent_table_found = false, + is_immediate_parent_table_partitioned = false; + // To iterate over tree to check parent node while (i) { - // If it is schema then allow user to c reate table + // If table is partitioned table then return false + if (!immediate_parent_table_found && (d._type == 'table' || d._type == 'partition')) { + immediate_parent_table_found = true; + if ('is_partitioned' in d && d.is_partitioned) { + is_immediate_parent_table_partitioned = true; + } + } + + // If it is schema then allow user to create table if (_.indexOf(['schema'], d._type) > -1) { {% if node_type == 'primary_key' %} + + if (is_immediate_parent_table_partitioned) { + return false; + } + // There should be only one primary key per table. var children = t.children(arguments[1], false), primary_key_found = false; @@ -63,7 +80,7 @@ define('pgadmin.node.{{node_type}}', [ }); return !primary_key_found; {% else %} - return true; + return !is_immediate_parent_table_partitioned; {% endif %} } parents.push(d._type); @@ -74,7 +91,7 @@ define('pgadmin.node.{{node_type}}', [ if (_.indexOf(parents, 'catalog') > -1) { return false; } else { - return true; + return !is_immediate_parent_table_partitioned; } }, diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/templates/constraints/js/constraints.js b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/templates/constraints/js/constraints.js index a7346f1e5..e372b3ccb 100644 --- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/templates/constraints/js/constraints.js +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/templates/constraints/js/constraints.js @@ -9,16 +9,18 @@ define('pgadmin.node.constraints', [ node: 'constraints', label: gettext('Constraints'), type: 'coll-constraints', + getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy, columns: ['name', 'comment'] }); }; if (!pgBrowser.Nodes['constraints']) { pgAdmin.Browser.Nodes['constraints'] = pgBrowser.Node.extend({ + getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy, type: 'constraints', label: gettext('Constraints'), collection_type: 'coll-constraints', - parent_type: ['table'], + parent_type: ['table','partition'], Init: function() { /* Avoid mulitple registration of menus */ if (this.initialized) diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/indexes/__init__.py b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/indexes/__init__.py index e52c15ba1..13f9d8648 100644 --- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/indexes/__init__.py +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/indexes/__init__.py @@ -72,12 +72,28 @@ class IndexesModule(CollectionNodeModule): if super(IndexesModule, self).BackendSupported(manager, **kwargs): conn = manager.connection(did=kwargs['did']) + # In case of partitioned table return false. + if 'tid' in kwargs and manager.version >= 100000: + partition_path = 'partition/sql/#{0}#'.format(manager.version) + SQL = render_template( + "/".join([partition_path, 'backend_support.sql']), + tid=kwargs['tid'] + ) + status, res = conn.execute_scalar(SQL) + + # check if any errors + if not status: + return internal_server_error(errormsg=res) + return not res + if 'vid' not in kwargs: return True template_path = 'index/sql/#{0}#'.format(manager.version) - SQL = render_template("/".join( - [template_path, 'backend_support.sql']), vid=kwargs['vid']) + SQL = render_template( + "/".join([template_path, 'backend_support.sql']), + vid=kwargs['vid'] + ) status, res = conn.execute_scalar(SQL) # check if any errors @@ -239,9 +255,10 @@ class IndexesView(PGChildNodeView): # We need parent's name eg table name and schema name # when we create new index in update we can fetch it using # property sql - SQL = render_template("/".join([self.template_path, - 'get_parent.sql']), - tid=kwargs['tid']) + SQL = render_template( + "/".join([self.template_path, 'get_parent.sql']), + tid=kwargs['tid'] + ) status, rset = self.conn.execute_2darray(SQL) if not status: return internal_server_error(errormsg=rset) @@ -262,8 +279,9 @@ class IndexesView(PGChildNodeView): """ res = [{'label': '', 'value': ''}] try: - SQL = render_template("/".join([self.template_path, - 'get_collations.sql'])) + SQL = render_template( + "/".join([self.template_path, 'get_collations.sql']) + ) status, rset = self.conn.execute_2darray(SQL) if not status: return internal_server_error(errormsg=res) @@ -289,8 +307,7 @@ class IndexesView(PGChildNodeView): """ res = [{'label': '', 'value': ''}] try: - SQL = render_template("/".join([self.template_path, - 'get_am.sql'])) + SQL = render_template("/".join([self.template_path, 'get_am.sql'])) status, rset = self.conn.execute_2darray(SQL) if not status: return internal_server_error(errormsg=res) @@ -317,17 +334,17 @@ class IndexesView(PGChildNodeView): res = dict() try: # Fetching all the access methods - SQL = render_template("/".join([self.template_path, - 'get_am.sql'])) + SQL = render_template("/".join([self.template_path, 'get_am.sql'])) status, rset = self.conn.execute_2darray(SQL) if not status: return internal_server_error(errormsg=res) for row in rset['rows']: # Fetching all the op_classes for each access method - SQL = render_template("/".join([self.template_path, - 'get_op_class.sql']), - oid=row['oid']) + SQL = render_template( + "/".join([self.template_path, 'get_op_class.sql']), + oid=row['oid'] + ) status, result = self.conn.execute_2darray(SQL) if not status: return internal_server_error(errormsg=res) @@ -365,8 +382,9 @@ class IndexesView(PGChildNodeView): JSON of available schema nodes """ - SQL = render_template("/".join([self.template_path, - 'nodes.sql']), tid=tid) + SQL = render_template( + "/".join([self.template_path, 'nodes.sql']), tid=tid + ) status, res = self.conn.execute_dict(SQL) if not status: @@ -393,10 +411,10 @@ class IndexesView(PGChildNodeView): Returns: JSON of available schema child nodes """ - SQL = render_template("/".join([self.template_path, - 'nodes.sql']), - tid=tid, - idx=idx) + SQL = render_template( + "/".join([self.template_path, 'nodes.sql']), + tid=tid, idx=idx + ) status, rset = self.conn.execute_2darray(SQL) if not status: return internal_server_error(errormsg=rset) @@ -433,8 +451,9 @@ class IndexesView(PGChildNodeView): JSON of available schema child nodes """ res = [] - SQL = render_template("/".join([self.template_path, - 'nodes.sql']), tid=tid) + SQL = render_template( + "/".join([self.template_path, 'nodes.sql']), tid=tid + ) status, rset = self.conn.execute_2darray(SQL) if not status: return internal_server_error(errormsg=rset) @@ -465,8 +484,9 @@ class IndexesView(PGChildNodeView): Updated properties data with column details """ - SQL = render_template("/".join([self.template_path, - 'column_details.sql']), idx=idx) + SQL = render_template( + "/".join([self.template_path, 'column_details.sql']), idx=idx + ) status, rset = self.conn.execute_2darray(SQL) if not status: return internal_server_error(errormsg=rset) @@ -521,10 +541,10 @@ class IndexesView(PGChildNodeView): JSON of selected schema node """ - SQL = render_template("/".join([self.template_path, - 'properties.sql']), - did=did, tid=tid, idx=idx, - datlastsysoid=self.datlastsysoid) + SQL = render_template( + "/".join([self.template_path, 'properties.sql']), + did=did, tid=tid, idx=idx, datlastsysoid=self.datlastsysoid + ) status, res = self.conn.execute_dict(SQL) @@ -595,9 +615,10 @@ class IndexesView(PGChildNodeView): try: # Start transaction. self.conn.execute_scalar("BEGIN;") - SQL = render_template("/".join([self.template_path, - 'create.sql']), - data=data, conn=self.conn, mode='create') + SQL = render_template( + "/".join([self.template_path, 'create.sql']), + data=data, conn=self.conn, mode='create' + ) status, res = self.conn.execute_scalar(SQL) if not status: # End transaction. @@ -606,9 +627,10 @@ class IndexesView(PGChildNodeView): # If user chooses concurrent index then we cannot run it along # with other alter statements so we will separate alter index part - SQL = render_template("/".join([self.template_path, - 'alter.sql']), - data=data, conn=self.conn) + SQL = render_template( + "/".join([self.template_path, 'alter.sql']), + data=data, conn=self.conn + ) SQL = SQL.strip('\n').strip(' ') if SQL != '': status, res = self.conn.execute_scalar(SQL) @@ -618,9 +640,10 @@ class IndexesView(PGChildNodeView): return internal_server_error(errormsg=res) # we need oid to to add object in tree at browser - SQL = render_template("/".join([self.template_path, - 'get_oid.sql']), - tid=tid, data=data) + SQL = render_template( + "/".join([self.template_path, 'get_oid.sql']), + tid=tid, data=data + ) status, idx = self.conn.execute_scalar(SQL) if not status: # End transaction. @@ -665,10 +688,10 @@ class IndexesView(PGChildNodeView): try: # We will first fetch the index name for current request # so that we create template for dropping index - SQL = render_template("/".join([self.template_path, - 'properties.sql']), - did=did, tid=tid, idx=idx, - datlastsysoid=self.datlastsysoid) + SQL = render_template( + "/".join([self.template_path, 'properties.sql']), + did=did, tid=tid, idx=idx, datlastsysoid=self.datlastsysoid + ) status, res = self.conn.execute_dict(SQL) if not status: @@ -687,9 +710,10 @@ class IndexesView(PGChildNodeView): data = dict(res['rows'][0]) - SQL = render_template("/".join([self.template_path, - 'delete.sql']), - data=data, conn=self.conn, cascade=cascade) + SQL = render_template( + "/".join([self.template_path, 'delete.sql']), + data=data, conn=self.conn, cascade=cascade + ) status, res = self.conn.execute_scalar(SQL) if not status: return internal_server_error(errormsg=res) @@ -787,10 +811,10 @@ class IndexesView(PGChildNodeView): This function will genrate sql from model data """ if idx is not None: - SQL = render_template("/".join([self.template_path, - 'properties.sql']), - did=did, tid=tid, idx=idx, - datlastsysoid=self.datlastsysoid) + SQL = render_template( + "/".join([self.template_path, 'properties.sql']), + did=did, tid=tid, idx=idx, datlastsysoid=self.datlastsysoid + ) status, res = self.conn.execute_dict(SQL) if not status: @@ -826,11 +850,15 @@ class IndexesView(PGChildNodeView): return gettext('-- definition incomplete') # If the request for new object which do not have did - SQL = render_template("/".join([self.template_path, 'create.sql']), - data=data, conn=self.conn, mode=mode) + SQL = render_template( + "/".join([self.template_path, 'create.sql']), + data=data, conn=self.conn, mode=mode + ) SQL += "\n" - SQL += render_template("/".join([self.template_path, 'alter.sql']), - data=data, conn=self.conn) + SQL += render_template( + "/".join([self.template_path, 'alter.sql']), + data=data, conn=self.conn + ) return SQL, data['name'] if 'name' in data else old_data['name'] @@ -848,10 +876,10 @@ class IndexesView(PGChildNodeView): idx: Index ID """ - SQL = render_template("/".join([self.template_path, - 'properties.sql']), - did=did, tid=tid, idx=idx, - datlastsysoid=self.datlastsysoid) + SQL = render_template( + "/".join([self.template_path, 'properties.sql']), + did=did, tid=tid, idx=idx, datlastsysoid=self.datlastsysoid + ) status, res = self.conn.execute_dict(SQL) if not status: @@ -872,9 +900,10 @@ class IndexesView(PGChildNodeView): return SQL sql_header = u"-- Index: {0}\n\n-- ".format(data['name']) - sql_header += render_template("/".join([self.template_path, - 'delete.sql']), - data=data, conn=self.conn) + sql_header += render_template( + "/".join([self.template_path, 'delete.sql']), + data=data, conn=self.conn + ) SQL = sql_header + '\n\n' + SQL @@ -959,10 +988,11 @@ class IndexesView(PGChildNodeView): if is_pgstattuple: # Fetch index details only if extended stats available - SQL = render_template("/".join([self.template_path, - 'properties.sql']), - did=did, tid=tid, idx=idx, - datlastsysoid=self.datlastsysoid) + SQL = render_template( + "/".join([self.template_path, 'properties.sql']), + did=did, tid=tid, idx=idx, + datlastsysoid=self.datlastsysoid + ) status, res = self.conn.execute_dict(SQL) if not status: return internal_server_error(errormsg=res) diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/indexes/templates/index/js/index.js b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/indexes/templates/index/js/index.js index 24f37f0ed..4fc1d54a6 100644 --- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/indexes/templates/index/js/index.js +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/indexes/templates/index/js/index.js @@ -10,6 +10,7 @@ define('pgadmin.node.index', [ node: 'index', label: gettext('Indexes'), type: 'coll-index', + getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy, sqlAlterHelp: 'sql-alterindex.html', sqlCreateHelp: 'sql-createindex.html', dialogHelp: url_for('help.static', {'filename': 'index_dialog.html'}), @@ -210,8 +211,9 @@ define('pgadmin.node.index', [ }); if (!pgBrowser.Nodes['index']) { - pgAdmin.Browser.Nodes['index'] = pgAdmin.Browser.Node.extend({ - parent_type: ['table', 'view', 'mview'], + pgAdmin.Browser.Nodes['index'] = pgBrowser.Node.extend({ + getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy, + parent_type: ['table', 'view', 'mview', 'partition'], collection_type: ['coll-table', 'coll-view'], sqlAlterHelp: 'sql-alterindex.html', sqlCreateHelp: 'sql-createindex.html', @@ -246,6 +248,12 @@ define('pgadmin.node.index', [ category: 'create', priority: 4, label: gettext('Index...'), icon: 'wcTabIcon icon-index', data: {action: 'create', check: true}, enable: 'canCreate' + },{ + name: 'create_index_onPartition', node: 'partition', module: this, + applies: ['object', 'context'], callback: 'show_obj_properties', + category: 'create', priority: 4, label: gettext('Index...'), + icon: 'wcTabIcon icon-index', data: {action: 'create', check: true}, + enable: 'canCreate' },{ name: 'create_index_onMatView', node: 'mview', module: this, applies: ['object', 'context'], callback: 'show_obj_properties', @@ -472,12 +480,23 @@ define('pgadmin.node.index', [ if (data && data.check == false) return true; - var t = pgBrowser.tree, i = item, d = itemData, parents = []; + var t = pgBrowser.tree, i = item, d = itemData, parents = [], + immediate_parent_table_found = false, + is_immediate_parent_table_partitioned = false; // To iterate over tree to check parent node while (i) { - // If it is schema then allow user to c reate table + // Do not allow creating index on partitioned tables. + if (!immediate_parent_table_found && + _.indexOf(['table', 'partition'], d._type) > -1) { + immediate_parent_table_found = true; + if ('is_partitioned' in d && d.is_partitioned) { + is_immediate_parent_table_partitioned = true; + } + } + + // If it is schema then allow user to create index if (_.indexOf(['schema'], d._type) > -1) - return true; + return !is_immediate_parent_table_partitioned; parents.push(d._type); i = t.hasParent(i) ? t.parent(i) : null; d = i ? t.itemData(i) : null; @@ -486,7 +505,7 @@ define('pgadmin.node.index', [ if (_.indexOf(parents, 'catalog') > -1) { return false; } else { - return true; + return !is_immediate_parent_table_partitioned; } } }); diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/partitions/__init__.py b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/partitions/__init__.py new file mode 100644 index 000000000..a1c010f8b --- /dev/null +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/partitions/__init__.py @@ -0,0 +1,583 @@ +########################################################################## +# +# pgAdmin 4 - PostgreSQL Tools +# +# Copyright (C) 2013 - 2017, The pgAdmin Development Team +# This software is released under the PostgreSQL Licence +# +########################################################################## + +""" Implements Partitions Node """ + +import re +import simplejson as json +import pgadmin.browser.server_groups.servers.databases.schemas as schema +from flask import render_template, request +from flask_babel import gettext +from pgadmin.browser.server_groups.servers.databases.schemas.utils \ + import DataTypeReader, VacuumSettings +from pgadmin.utils.ajax import internal_server_error, \ + make_response as ajax_response, gone +from pgadmin.browser.server_groups.servers.databases.schemas.tables.utils \ + import BaseTableView +from pgadmin.browser.collection import CollectionNodeModule +from pgadmin.utils.ajax import make_json_response, precondition_required +from config import PG_DEFAULT_DRIVER +from pgadmin.browser.utils import PGChildModule + + +class PartitionsModule(CollectionNodeModule): + """ + class PartitionsModule(CollectionNodeModule) + + A module class for Partition node derived from CollectionNodeModule. + + Methods: + ------- + * __init__(*args, **kwargs) + - Method is used to initialize the Partition and it's base module. + + * get_nodes(gid, sid, did, scid, tid) + - Method is used to generate the browser collection node. + + * node_inode() + - Method is overridden from its base class to make the node as leaf node. + + * script_load() + - Load the module script for schema, when any of the server node is + initialized. + """ + + NODE_TYPE = 'partition' + COLLECTION_LABEL = gettext("Partitions") + + def __init__(self, *args, **kwargs): + """ + Method is used to initialize the PartitionsModule and it's base module. + + Args: + *args: + **kwargs: + """ + super(PartitionsModule, self).__init__(*args, **kwargs) + self.min_ver = 100000 + self.max_ver = None + + def get_nodes(self, gid, sid, did, scid, **kwargs): + """ + Generate the collection node + """ + yield self.generate_browser_collection_node(kwargs['tid']) + + @property + def script_load(self): + """ + Load the module script for server, when any of the server-group node is + initialized. + """ + return schema.SchemaModule.NODE_TYPE + + @property + def node_inode(self): + """ + Load the module node as a leaf node + """ + return True + + def BackendSupported(self, manager, **kwargs): + """ + Load this module if it is a partition table + """ + if 'tid' in kwargs and CollectionNodeModule.BackendSupported(self, manager, **kwargs): + conn = manager.connection(did=kwargs['did']) + + template_path = 'partition/sql/#{0}#'.format(manager.version) + SQL = render_template("/".join( + [template_path, 'backend_support.sql']), tid=kwargs['tid']) + status, res = conn.execute_scalar(SQL) + + # check if any errors + if not status: + return internal_server_error(errormsg=res) + + return res + + def register(self, app, options, first_registration=False): + """ + Override the default register function to automatically register + sub-modules of table node under partition table node. + """ + + if first_registration: + self.submodules = list(app.find_submodules(self.import_name)) + + super(CollectionNodeModule, self).register(app, options, first_registration) + + for module in self.submodules: + if first_registration: + module.parentmodules.append(self) + app.register_blueprint(module) + + # Now add sub modules of table node to partition table node. + if first_registration: + # Exclude 'partition' module for now to avoid cyclic import issue. + modules_to_skip = ['partition', 'column'] + for parent in self.parentmodules: + if parent.NODE_TYPE == 'table': + self.submodules += [ + submodule for submodule in parent.submodules + if submodule.NODE_TYPE not in modules_to_skip + ] + + @property + def module_use_template_javascript(self): + """ + Returns whether Jinja2 template is used for generating the javascript + module. + """ + return False + + +blueprint = PartitionsModule(__name__) + + +class PartitionsView(BaseTableView, DataTypeReader, VacuumSettings): + """ + This class is responsible for generating routes for Partition node + + Methods: + ------- + + * list() + - This function is used to list all the Partition nodes within that + collection. + + * nodes() + - This function will used to create all the child node within that + collection, Here it will create all the Partition node. + + * properties(gid, sid, did, scid, tid, ptid) + - This function will show the properties of the selected Partition node + + """ + + node_type = blueprint.node_type + + parent_ids = [ + {'type': 'int', 'id': 'gid'}, + {'type': 'int', 'id': 'sid'}, + {'type': 'int', 'id': 'did'}, + {'type': 'int', 'id': 'scid'}, + {'type': 'int', 'id': 'tid'} + ] + ids = [ + {'type': 'int', 'id': 'ptid'} + ] + + operations = dict({ + 'obj': [ + {'get': 'properties', 'delete': 'delete', 'put': 'update'}, + {'get': 'list', 'post': 'create'} + ], + 'nodes': [{'get': 'nodes'}, {'get': 'nodes'}], + 'children': [{'get': 'children'}], + 'sql': [{'get': 'sql'}], + 'msql': [{'get': 'msql'}, {}], + 'detach': [{'put': 'detach'}], + 'truncate': [{'put': 'truncate'}] + + }) + + def children(self, **kwargs): + """Build a list of treeview nodes from the child nodes.""" + + if 'sid' not in kwargs: + return precondition_required( + gettext('Required properties are missing.') + ) + + from pgadmin.utils.driver import get_driver + manager = get_driver(PG_DEFAULT_DRIVER).connection_manager( + sid=kwargs['sid'] + ) + + did = None + if 'did' in kwargs: + did = kwargs['did'] + + conn = manager.connection(did=did) + + if not conn.connected(): + return precondition_required( + gettext( + "Connection to the server has been lost." + ) + ) + + nodes = [] + for module in self.blueprint.submodules: + if isinstance(module, PGChildModule): + if manager is not None and \ + module.BackendSupported(manager, **kwargs): + # treat partition table as normal table. + # replace tid with ptid and pop ptid from kwargs + if 'ptid' in kwargs: + ptid = kwargs.pop('ptid') + kwargs['tid'] = ptid + nodes.extend(module.get_nodes(**kwargs)) + else: + nodes.extend(module.get_nodes(**kwargs)) + + # Explicitly include 'partition' module as we had excluded it during + # registration. + nodes.extend(self.blueprint.get_nodes(**kwargs)) + + # Return sorted nodes based on label + return make_json_response( + data=sorted( + nodes, key=lambda c: c['label'] + ) + ) + + @BaseTableView.check_precondition + def list(self, gid, sid, did, scid, tid): + """ + This function is used to list all the table nodes within that + collection. + + Args: + gid: Server group ID + sid: Server ID + did: Database ID + scid: Schema ID + tid: Table ID + + Returns: + JSON of available table nodes + """ + SQL = render_template("/".join([self.partition_template_path, + 'properties.sql']), + did=did, scid=scid, tid=tid, + datlastsysoid=self.datlastsysoid) + status, res = self.conn.execute_dict(SQL) + + if not status: + return internal_server_error(errormsg=res) + return ajax_response( + response=res['rows'], + status=200 + ) + + @BaseTableView.check_precondition + def nodes(self, gid, sid, did, scid, tid, ptid=None): + """ + This function is used to list all the table nodes within that + collection. + + Args: + gid: Server group ID + sid: Server ID + did: Database ID + scid: Schema ID + tid: Parent Table ID + ptid: Partition Table ID + + Returns: + JSON of available table nodes + """ + SQL = render_template( + "/".join([self.partition_template_path, 'nodes.sql']), + scid=scid, tid=tid + ) + status, rset = self.conn.execute_2darray(SQL) + if not status: + return internal_server_error(errormsg=rset) + + def browser_node(row): + return self.blueprint.generate_browser_node( + row['oid'], + tid, + row['name'], + icon="icon-partition", + tigger_count=row['triggercount'], + has_enable_triggers=row['has_enable_triggers'], + is_partitioned=row['is_partitioned'], + parent_schema_id=scid, + schema_id=row['schema_id'], + schema_name=row['schema_name'] + ) + + if ptid is not None: + if len(rset['rows']) == 0: + return gone(gettext( + "The specified partitioned table could not be found." + )) + + return make_json_response( + data=browser_node(rset['rows'][0]), status=200 + ) + + res = [] + for row in rset['rows']: + res.append(browser_node(row)) + + return make_json_response( + data=res, + status=200 + ) + + @BaseTableView.check_precondition + def properties(self, gid, sid, did, scid, tid, ptid): + """ + This function will show the properties of the selected table node. + + Args: + gid: Server Group ID + sid: Server ID + did: Database ID + scid: Schema ID + scid: Schema ID + tid: Table ID + ptid: Partition Table ID + + Returns: + JSON of selected table node + """ + + SQL = render_template("/".join([self.partition_template_path, + 'properties.sql']), + did=did, scid=scid, tid=tid, + ptid=ptid, datlastsysoid=self.datlastsysoid) + status, res = self.conn.execute_dict(SQL) + if not status: + return internal_server_error(errormsg=res) + + if len(res['rows']) == 0: + return gone(gettext( + "The specified partitioned table could not be found.")) + + return super(PartitionsView, self).properties( + gid, sid, did, scid, ptid, res) + + @BaseTableView.check_precondition + def sql(self, gid, sid, did, scid, tid, ptid): + """ + This function will creates reverse engineered sql for + the table object + + Args: + gid: Server Group ID + sid: Server ID + did: Database ID + scid: Schema ID + tid: Table ID + ptid: Partition Table ID + """ + main_sql = [] + + SQL = render_template("/".join([self.partition_template_path, + 'properties.sql']), + did=did, scid=scid, tid=tid, + ptid=ptid, datlastsysoid=self.datlastsysoid) + status, res = self.conn.execute_dict(SQL) + if not status: + return internal_server_error(errormsg=res) + + if len(res['rows']) == 0: + return gone(gettext( + "The specified partitioned table could not be found.")) + + data = res['rows'][0] + + return BaseTableView.get_reverse_engineered_sql(self, did, scid, ptid, + main_sql, data) + + @BaseTableView.check_precondition + def detach(self, gid, sid, did, scid, tid, ptid): + """ + This function will reset statistics of table + + Args: + gid: Server Group ID + sid: Server ID + did: Database ID + scid: Schema ID + tid: Table ID + ptid: Partition Table ID + """ + # Fetch schema name + status, parent_schema = self.conn.execute_scalar( + render_template( + "/".join([self.table_template_path, 'get_schema.sql']), + conn=self.conn, scid=scid + ) + ) + if not status: + return internal_server_error(errormsg=parent_schema) + + # Fetch Parent Table name + status, partitioned_table_name = self.conn.execute_scalar( + render_template( + "/".join([self.table_template_path, 'get_table.sql']), + conn=self.conn, scid=scid, tid=tid + ) + ) + if not status: + return internal_server_error(errormsg=partitioned_table_name) + + # Get schema oid of partition + status, pscid = self.conn.execute_scalar( + render_template("/".join([self.table_template_path, + 'get_schema_oid.sql']), tid=ptid)) + if not status: + return internal_server_error(errormsg=scid) + + # Fetch schema name + status, partition_schema = self.conn.execute_scalar( + render_template("/".join([self.table_template_path, + 'get_schema.sql']), conn=self.conn, + scid=pscid) + ) + if not status: + return internal_server_error(errormsg=partition_schema) + + # Fetch Partition Table name + status, partition_name = self.conn.execute_scalar( + render_template( + "/".join([self.table_template_path, 'get_table.sql']), + conn=self.conn, scid=pscid, tid=ptid + ) + ) + if not status: + return internal_server_error(errormsg=partition_name) + + try: + temp_data = dict() + temp_data['parent_schema'] = parent_schema + temp_data['partitioned_table_name'] = partitioned_table_name + temp_data['schema'] = partition_schema + temp_data['name'] = partition_name + + SQL = render_template("/".join( + [self.partition_template_path, 'detach.sql']), + data=temp_data, conn=self.conn) + + status, res = self.conn.execute_scalar(SQL) + if not status: + return internal_server_error(errormsg=res) + + return make_json_response( + success=1, + info=gettext("Partition detached."), + data={ + 'id': ptid, + 'scid': scid + } + ) + except Exception as e: + return internal_server_error(errormsg=str(e)) + + @BaseTableView.check_precondition + def msql(self, gid, sid, did, scid, tid, ptid=None): + """ + This function will create modified sql for table object + + Args: + gid: Server Group ID + sid: Server ID + did: Database ID + scid: Schema ID + tid: Table ID + """ + data = dict() + for k, v in request.args.items(): + try: + data[k] = json.loads(v, encoding='utf-8') + except (ValueError, TypeError, KeyError): + data[k] = v + + if ptid is not None: + SQL = render_template("/".join([self.partition_template_path, + 'properties.sql']), + did=did, scid=scid, tid=tid, + ptid=ptid, datlastsysoid=self.datlastsysoid) + status, res = self.conn.execute_dict(SQL) + if not status: + return internal_server_error(errormsg=res) + + SQL, name = self.get_sql(did, scid, ptid, data, res) + SQL = re.sub('\n{2,}', '\n\n', SQL) + SQL = SQL.strip('\n') + if SQL == '': + SQL = "--modified SQL" + return make_json_response( + data=SQL, + status=200 + ) + + @BaseTableView.check_precondition + def update(self, gid, sid, did, scid, tid, ptid): + """ + This function will update an existing table object + + Args: + gid: Server Group ID + sid: Server ID + did: Database ID + scid: Schema ID + tid: Table ID + ptid: Partition Table ID + """ + data = request.form if request.form else json.loads( + request.data, encoding='utf-8' + ) + + for k, v in data.items(): + try: + data[k] = json.loads(v, encoding='utf-8') + except (ValueError, TypeError, KeyError): + data[k] = v + + try: + SQL = render_template("/".join([self.partition_template_path, + 'properties.sql']), + did=did, scid=scid, tid=tid, + ptid=ptid, datlastsysoid=self.datlastsysoid) + status, res = self.conn.execute_dict(SQL) + if not status: + return internal_server_error(errormsg=res) + + return super(PartitionsView, self).update( + gid, sid, did, scid, ptid, data, res, parent_id=tid) + except Exception as e: + return internal_server_error(errormsg=str(e)) + + @BaseTableView.check_precondition + def truncate(self, gid, sid, did, scid, tid, ptid): + """ + This function will truncate the table object + + Args: + gid: Server Group ID + sid: Server ID + did: Database ID + scid: Schema ID + tid: Table ID + """ + + try: + SQL = render_template("/".join([self.partition_template_path, + 'properties.sql']), + did=did, scid=scid, tid=tid, + ptid=ptid, datlastsysoid=self.datlastsysoid) + status, res = self.conn.execute_dict(SQL) + if not status: + return internal_server_error(errormsg=res) + + return super(PartitionsView, self).truncate(gid, sid, did, scid, ptid, res) + + except Exception as e: + return internal_server_error(errormsg=str(e)) + + +PartitionsView.register_node_view(blueprint) diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/partitions/static/img/coll-partition.png b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/partitions/static/img/coll-partition.png new file mode 100644 index 0000000000000000000000000000000000000000..8536c66afb1099e1898f3c6163a709e3e5375564 GIT binary patch literal 1433 zcmeAS@N?(olHy`uVBq!ia0vp^0wB!61|;P_|4#%`k|nMYCBgY=CFO}lsSJ)O`AMk? zp1FzXsX?iUDV2pMQ*9U+m@6_vB1$5BeXNr6bM+EIYV;~{3xK*A7;Nk-3KEmEQ%e+* zQqwc@Y?a>c-mj#PnPRIHZt82`Ti~3Uk?B!Ylp0*+7m{3+ootz+WN)WnQ(*-(AUCxn zQK2F?C$HG5!d3}vt`(3C64qBz04piUwpD^SD#ABF!8yMuRl!uxOgGuk*h0bFQqR!T z(!$6@N5ROz&`jUJQs2--*TB%qz|zXVPyq^*fVLH-q*(>IxIyg#@@$ndN=gc>^!3Zj z%k|2Q_413-^$jg8E%gnI^o@*kfhu&1EAvVcD|GXUm0>2hq!uR^WfqiV=I1GZOiWD5 zFD#PU%0_}#n6BP2AO_EVu8M)o`HUDF34YC)x{-2sR(CaRb3oXS&*t9 zlvWQiR1f?-$y8V26cBo;ukpW z=+RYGK7G-gzl?l(e4Fal=E%q_nKMVmMN`v0tN7NF1u8|8pD6Kdd7GMhZQtXWmE{{H zj~M%Yu3h%L@|p2@p3iOH%O^{U`B*nhQJ8EZ(aRtEZesQK;D2!vJ}aKMd}lkrZTQQg z`{=361Cu0b1FGg^aZGV#Z~Ad8;^L7;pIfsXUQ4Z>W&6XJH@wmKcN&MBj)b5 z)v^{(Ue9nkczc-mj#PnPRIHZt82`Ti~3Uk?B!Ylp0*+7m{3+ootz+WN)WnQ(*-(AUCxn zQK2F?C$HG5!d3}vt`(3C64qBz04piUwpD^SD#ABF!8yMuRl!uxOgGuk*h0bFQqR!T z(!$6@N5ROz&`jUJQs2--*TB%qz|zXVPyq^*fVLH-q*(>IxIyg#@@$ndN=gc>^!3Zj z%k|2Q_413-^$jg8E%gnI^o@*kfhu&1EAvVcD|GXUm0>2hq!uR^WfqiV=I1GZOiWD5 zFD$Tv3bSNU;+l1ennz|zM-B0$V)JVzP|XC=H|jx7ncO3BHWAB;NpiyW)Z+ZoqGVvir744~DzI`cN=+=uFAB-e&w+(vKt_H^esM;Afr7I$DAddqG<*}2 zGxI=#nqXbNzE+-j#U+V($*G<$wn{*A^fEJ3tSsFO-He>fEln(q4Gj%l%`IKbot#Xa zOx+BPU7XC#++ccL@{>z*Q}aq-dQ%X39dYUfC5YStpv^9+MVV!(DQ-pixe8#9TV>*Q zi#bm7pn6kqyTt;hUVWfr^g+>!6x}c(U>X83;fWW>fhYgeJYbqH0w(S^3o~jM7#QO` zT^vIyZmkKn^>#KCu{HNx{X&7O>4DOVM_O7I8cHDoRzI2laO*W);8VKNfAC=aL;ek1 zcSWpru3Wu(^5pJ-TTiMceb3^4bSeGNUX|x}izijT?>;m4-rc&&m1WEVBAFhaPHDba z;TY+3=vPIIt9|UXW!+ECpW;n8)^U7-UZ35$Lepxdh_;_SW(Trkrmt8$BkOH=tc_%w z+iq*c+9A$^on3a-1W{y!a6O7W<9@pyjtsn z2#3gyjQwu5NAKOLQIZzYdHe7sul4F~k*yr}(wQvdBIBEuXDo=_7XKhAFh*xOo5ZT< zRnqU)Rz>)&R}{@_DE1e=+VY&U&n}{QnV3+3?velfB3Va+3MT%o*ztRH@9g?5h1Dy1 zqpz?15-*yeeCgog=l<)oZ(e*Iv59G>Wrj%=xAxa{{PyQ}u9v(2rl57{`=Xjpe}BG- bZeV0EdB614s(jTpP#NUu>gTe~DWM4f22aJ% literal 0 HcmV?d00001 diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/partitions/static/js/partition.js b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/partitions/static/js/partition.js new file mode 100644 index 000000000..63adb5ec5 --- /dev/null +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/partitions/static/js/partition.js @@ -0,0 +1,1276 @@ +define([ + 'sources/gettext', 'sources/url_for', 'jquery', 'underscore', + 'underscore.string', 'pgadmin', 'pgadmin.browser', 'backform', 'alertify', + 'pgadmin.browser.collection', 'pgadmin.browser.table.partition.utils' +], +function(gettext, url_for, $, _, S, pgAdmin, pgBrowser, Backform, alertify) { + + if (!pgBrowser.Nodes['coll-partition']) { + var databases = pgAdmin.Browser.Nodes['coll-partition'] = + pgAdmin.Browser.Collection.extend({ + node: 'partition', + label: gettext('Partitions'), + getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy, + type: 'coll-partition', + columns: [ + 'name', 'schema', 'partition_value', 'is_partitioned', 'description' + ], + hasStatistics: true + }); + }; + + if (!pgBrowser.Nodes['partition']) { + pgAdmin.Browser.Nodes['partition'] = pgBrowser.Node.extend({ + getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy, + parent_type: 'table', + collection_type: 'coll-partition', + type: 'partition', + label: gettext('Partition'), + hasSQL: true, + hasDepends: true, + hasStatistics: true, + statsPrettifyFields: ['Size', 'Indexes size', 'Table size', + 'Toast table size', 'Tuple length', + 'Dead tuple length', 'Free space'], + sqlAlterHelp: 'sql-altertable.html', + sqlCreateHelp: 'sql-createtable.html', + dialogHelp: url_for('help.static', {'filename': 'table_dialog.html'}), + hasScriptTypes: ['create'], + height: '95%', + width: '85%', + Init: function() { + /* Avoid mulitple registration of menus */ + if (this.initialized) + return; + + this.initialized = true; + + pgBrowser.add_menus([{ + name: 'truncate_table', node: 'partition', module: this, + applies: ['object', 'context'], callback: 'truncate_table', + category: 'Truncate', priority: 3, label: gettext('Truncate'), + icon: 'fa fa-eraser', enable : 'canCreate' + },{ + name: 'truncate_table_cascade', node: 'partition', module: this, + applies: ['object', 'context'], callback: 'truncate_table_cascade', + category: 'Truncate', priority: 3, label: gettext('Truncate Cascade'), + icon: 'fa fa-eraser', enable : 'canCreate' + },{ + // To enable/disable all triggers for the table + name: 'enable_all_triggers', node: 'partition', module: this, + applies: ['object', 'context'], callback: 'enable_triggers_on_table', + category: 'Trigger(s)', priority: 4, label: gettext('Enable All'), + icon: 'fa fa-check', enable : 'canCreate_with_trigger_enable' + },{ + name: 'disable_all_triggers', node: 'partition', module: this, + applies: ['object', 'context'], callback: 'disable_triggers_on_table', + category: 'Trigger(s)', priority: 4, label: gettext('Disable All'), + icon: 'fa fa-times', enable : 'canCreate_with_trigger_disable' + },{ + name: 'reset_table_stats', node: 'partition', module: this, + applies: ['object', 'context'], callback: 'reset_table_stats', + category: 'Reset', priority: 4, label: gettext('Reset Statistics'), + icon: 'fa fa-bar-chart', enable : 'canCreate' + },{ + name: 'detach_partition', node: 'partition', module: this, + applies: ['object', 'context'], callback: 'detach_partition', + priority: 2, label: gettext('Detach Partition'), + icon: 'fa fa-remove' + } + ]); + }, + getTreeNodeHierarchy: function(i) { + var idx = 0, + res = {}, + t = pgBrowser.tree; + + do { + d = t.itemData(i); + if ( + d._type in pgBrowser.Nodes && pgBrowser.Nodes[d._type].hasId + ) { + if (d._type == 'partition' && 'partition' in res) { + if (!('table' in res)) { + res['table'] = _.extend({}, d, {'priority': idx}); + idx -= 1; + } + } else if (d._type == 'table') { + if (!('table' in res)) { + res['table'] = _.extend({}, d, {'priority': idx}); + idx -= 1; + } + } else { + res[d._type] = _.extend({}, d, {'priority': idx}); + idx -= 1; + } + } + i = t.hasParent(i) ? t.parent(i) : null; + } while (i); + + return res; + }, + generate_url: function(item, type, d, with_id, info) { + if (_.indexOf([ + 'stats', 'statistics', 'dependency', 'dependent', 'reset', + 'get_relations', 'get_oftype', 'get_attach_tables' + ], type) == -1) { + return pgBrowser.Node.generate_url.apply(this, arguments); + } + + if (type == 'statistics') { + type = 'stats'; + } + + info = (_.isUndefined(item) || _.isNull(item)) ? + info || {} : this.getTreeNodeHierarchy(item); + + return S('table/%s/%s/%s/%s/%s/%s').sprintf( + encodeURIComponent(type), encodeURIComponent(info['server-group']._id), + encodeURIComponent(info['server']._id), + encodeURIComponent(info['database']._id), + encodeURIComponent(info['partition'].schema_id), + encodeURIComponent(info['partition']._id) + ).value(); + }, + canDrop: pgBrowser.Nodes['schema'].canChildDrop, + canDropCascade: pgBrowser.Nodes['schema'].canChildDrop, + callbacks: { + /* Enable trigger(s) on table */ + enable_triggers_on_table: function(args) { + var params = {'enable': true }; + this.callbacks.set_triggers.apply(this, [args, params]); + }, + /* Disable trigger(s) on table */ + disable_triggers_on_table: function(args) { + var params = {'enable': false }; + this.callbacks.set_triggers.apply(this, [args, params]); + }, + set_triggers: function(args, params) { + // This function will send request to enable or + // disable triggers on table level + var input = args || {}; + obj = this, + t = pgBrowser.tree, + i = input.item || t.selected(), + d = i && i.length == 1 ? t.itemData(i) : undefined; + if (!d) + return false; + + $.ajax({ + url: obj.generate_url(i, 'set_trigger' , d, true), + type:'PUT', + data: params, + dataType: "json", + success: function(res) { + if (res.success == 1) { + alertify.success(res.info); + t.unload(i); + t.setInode(i); + t.deselect(i); + setTimeout(function() { + t.select(i); + }, 10); + } + }, + error: function(xhr, status, error) { + try { + var err = $.parseJSON(xhr.responseText); + if (err.success == 0) { + alertify.error(err.errormsg); + } + } catch (e) {} + t.unload(i); + } + }); + }, + /* Truncate table */ + truncate_table: function(args) { + var params = {'cascade': false }; + this.callbacks.truncate.apply(this, [args, params]); + }, + /* Truncate table with cascade */ + truncate_table_cascade: function(args) { + var params = {'cascade': true }; + this.callbacks.truncate.apply(this, [args, params]); + }, + truncate: function(args, params) { + var input = args || {}; + obj = this, + t = pgBrowser.tree, + i = input.item || t.selected(), + d = i && i.length == 1 ? t.itemData(i) : undefined; + + if (!d) + return false; + + alertify.confirm( + gettext('Truncate Table'), + S(gettext('Are you sure you want to truncate table %s?')).sprintf(d.label).value(), + function (e) { + if (e) { + var data = d; + $.ajax({ + url: obj.generate_url(i, 'truncate' , d, true), + type:'PUT', + data: params, + dataType: "json", + success: function(res) { + if (res.success == 1) { + alertify.success(res.info); + t.removeIcon(i); + data.icon = 'icon-table'; + t.addIcon(i, {icon: data.icon}); + t.unload(i); + t.setInode(i); + t.deselect(i); + // Fetch updated data from server + setTimeout(function() { + t.select(i); + }, 10); + } + }, + error: function(xhr, status, error) { + try { + var err = $.parseJSON(xhr.responseText); + if (err.success == 0) { + alertify.error(err.errormsg); + } + } catch (e) {} + t.unload(i); + } + }); + }}, + function() {} + ); + }, + reset_table_stats: function(args) { + var input = args || {}, + obj = this, + t = pgBrowser.tree, + i = input.item || t.selected(), + d = i && i.length == 1 ? t.itemData(i) : undefined; + + if (!d) + return false; + + alertify.confirm( + gettext('Reset statistics'), + S(gettext('Are you sure you want to reset the statistics for table %s?')).sprintf(d._label).value(), + function (e) { + if (e) { + var data = d; + $.ajax({ + url: obj.generate_url(i, 'reset' , d, true), + type:'DELETE', + success: function(res) { + if (res.success == 1) { + alertify.success(res.info); + t.removeIcon(i); + data.icon = 'icon-table'; + t.addIcon(i, {icon: data.icon}); + t.unload(i); + t.setInode(i); + t.deselect(i); + // Fetch updated data from server + setTimeout(function() { + t.select(i); + }, 10); + } + }, + error: function(xhr, status, error) { + try { + var err = $.parseJSON(xhr.responseText); + if (err.success == 0) { + alertify.error(err.errormsg); + } + } catch (e) {} + t.unload(i); + } + }); + } + }, + function() {} + ); + }, + detach_partition: function(args) { + var input = args || {}, + obj = this, + t = pgBrowser.tree, + i = input.item || t.selected(), + d = i && i.length == 1 ? t.itemData(i) : undefined; + + if (!d) + return false; + + alertify.confirm( + gettext('Detach Partition'), + S(gettext('Are you sure you want to detach the partition %s?')).sprintf(d._label).value(), + function (e) { + if (e) { + var data = d; + $.ajax({ + url: obj.generate_url(i, 'detach' , d, true), + type:'PUT', + success: function(res) { + if (res.success == 1) { + alertify.success(res.info); + var n = t.next(i); + if (!n || !n.length) { + n = t.prev(i); + if (!n || !n.length) { + n = t.parent(i); + t.setInode(n, true); + } + } + t.remove(i); + if (n.length) { + t.select(n); + } + } + }, + error: function(xhr, status, error) { + try { + var err = $.parseJSON(xhr.responseText); + if (err.success == 0) { + alertify.error(err.errormsg); + } + } catch (e) {} + } + }); + } + }, + function() {} + ); + } + }, + model: pgBrowser.Node.Model.extend({ + defaults: { + name: undefined, + oid: undefined, + spcoid: undefined, + spcname: undefined, + relowner: undefined, + relacl: undefined, + relhasoids: undefined, + relhassubclass: undefined, + reltuples: undefined, + description: undefined, + conname: undefined, + conkey: undefined, + isrepl: undefined, + triggercount: undefined, + relpersistence: undefined, + fillfactor: undefined, + reloftype: undefined, + typname: undefined, + labels: undefined, + providers: undefined, + is_sys_table: undefined, + coll_inherits: [], + hastoasttable: true, + toast_autovacuum_enabled: false, + autovacuum_enabled: false, + primary_key: [], + partitions: [], + partition_type: 'range', + is_partitioned: false, + partition_value: undefined + }, + // Default values! + initialize: function(attrs, args) { + var self = this; + + if (_.size(attrs) === 0) { + var userInfo = pgBrowser.serverInfo[args.node_info.server._id].user, + schemaInfo = args.node_info.schema; + + this.set({ + 'relowner': userInfo.name, 'schema': schemaInfo._label + }, {silent: true}); + } + pgBrowser.Node.Model.prototype.initialize.apply(this, arguments); + + }, + schema: [{ + id: 'name', label: gettext('Name'), type: 'text', + mode: ['properties', 'create', 'edit'], disabled: 'inSchema' + },{ + id: 'oid', label: gettext('OID'), type: 'text', mode: ['properties'] + },{ + id: 'relowner', label: gettext('Owner'), type: 'text', node: 'role', + mode: ['properties', 'create', 'edit'], select2: {allowClear: false}, + disabled: 'inSchema', control: 'node-list-by-name' + },{ + id: 'schema', label: gettext('Schema'), type: 'text', node: 'schema', + control: 'node-list-by-name', mode: ['create', 'edit', 'properties'], + disabled: 'inSchema', filter: function(d) { + // If schema name start with pg_* then we need to exclude them + if(d && d.label.match(/^pg_/)) + { + return false; + } + return true; + }, cache_node: 'database', cache_level: 'database' + },{ + id: 'spcname', label: gettext('Tablespace'), node: 'tablespace', + type: 'text', control: 'node-list-by-name', disabled: 'inSchema', + mode: ['properties', 'create', 'edit'], + filter: function(d) { + // If tablespace name is not "pg_global" then we need to exclude them + return (!(d && d.label.match(/pg_global/))) + } + },{ + id: 'partition', type: 'group', label: gettext('Partition'), + mode: ['edit', 'create'], min_version: 100000, + visible: function(m) { + // Always show in case of create mode + if (m.isNew() || m.get('is_partitioned')) + return true; + return false; + } + },{ + id: 'is_partitioned', label:gettext('Partitioned Table?'), cell: 'switch', + type: 'switch', mode: ['properties', 'create', 'edit'], + visible: function(m) { + if(!_.isUndefined(m.node_info) && !_.isUndefined(m.node_info.server) + && !_.isUndefined(m.node_info.server.version) && + m.node_info.server.version >= 100000) + return true; + + return false; + }, + disabled: function(m) { + if (!m.isNew()) + return true; + return false; + } + },{ + id: 'description', label: gettext('Comment'), type: 'multiline', + mode: ['properties', 'create', 'edit'], disabled: 'inSchema' + }, + { + id: 'partition_value', label:gettext('Partition Scheme'), + type: 'text', visible: false + },{ + id: 'coll_inherits', label: gettext('Inherited from table(s)'), + type: 'text', group: gettext('Advanced'), mode: ['properties'] + },{ + id: 'Columns', type: 'group', label: gettext('Columns'), + mode: ['edit', 'create'], min_version: 100000, + visible: function(m) { + // Always hide in case of partition table. + return false; + } + },{ + // Tab control for columns + id: 'columns', label: gettext('Columns'), type: 'collection', + group: gettext('Columns'), + model: pgBrowser.Nodes['column'].model, + subnode: pgBrowser.Nodes['column'].model, + mode: ['create', 'edit'], + disabled: function(m) { + // In case of partitioned table remove inherited columns + if (m.isNew() && m.get('is_partitioned')) { + setTimeout(function() { + var coll = m.get('columns'); + coll.remove(coll.filter(function(model) { + if (_.isUndefined(model.get('inheritedfrom'))) + return false; + return true; + })); + }, 10); + } + + if(this.node_info && 'catalog' in this.node_info) + { + return true; + } + return false; + }, + deps: ['typname', 'is_partitioned'], + canAdd: 'check_grid_add_condition', + canEdit: true, canDelete: true, + // For each row edit/delete button enable/disable + canEditRow: 'check_grid_row_edit_delete', + canDeleteRow: 'check_grid_row_edit_delete', + uniqueCol : ['name'], + columns : ['name' , 'cltype', 'attlen', 'attprecision', 'attnotnull', 'is_primary_key'], + control: Backform.UniqueColCollectionControl.extend({ + initialize: function() { + Backform.UniqueColCollectionControl.prototype.initialize.apply(this, arguments); + var self = this, + collection = self.model.get(self.field.get('name')); + + collection.on("change:is_primary_key", function(m) { + var primary_key_coll = self.model.get('primary_key'), + column_name = m.get('name'), + primary_key; + + if(m.get('is_primary_key')) { + // Add column to primary key. + if (primary_key_coll.length < 1) { + primary_key = new (primary_key_coll.model)({}, { + top: self.model, + collection: primary_key_coll, + handler: primary_key_coll + }); + primary_key_coll.add(primary_key); + } else { + primary_key = primary_key_coll.first(); + } + // Do not alter existing primary key columns. + if (_.isUndefined(primary_key.get('oid'))) { + var primary_key_column_coll = primary_key.get('columns'), + primary_key_column_exist = primary_key_column_coll.where({column:column_name}); + + if (primary_key_column_exist.length == 0) { + var primary_key_column = new (primary_key_column_coll.model)( + {column: column_name}, { silent: true, + top: self.model, + collection: primary_key_coll, + handler: primary_key_coll + }); + + primary_key_column_coll.add(primary_key_column); + } + + primary_key_column_coll.trigger('pgadmin:multicolumn:updated', primary_key_column_coll); + } + + } else { + // remove column from primary key. + if (primary_key_coll.length > 0) { + var primary_key = primary_key_coll.first(); + // Do not alter existing primary key columns. + if (!_.isUndefined(primary_key.get('oid'))) { + return; + } + + var primary_key_column_coll = primary_key.get('columns'), + removedCols = primary_key_column_coll.where({column:column_name}); + if (removedCols.length > 0) { + primary_key_column_coll.remove(removedCols); + _.each(removedCols, function(m) { + m.destroy(); + }) + if (primary_key_column_coll.length == 0) { + setTimeout(function () { + // There will be only on primary key so remove the first one. + primary_key_coll.remove(primary_key_coll.first()); + /* Ideally above line of code should be "primary_key_coll.reset()". + * But our custom DataCollection (extended from Backbone collection in datamodel.js) + * does not respond to reset event, it only supports add, remove, change events. + * And hence no custom event listeners/validators get called for reset event. + */ + }, 10); + } + } + primary_key_column_coll.trigger('pgadmin:multicolumn:updated', primary_key_column_coll); + } + } + }) + }, + remove: function() { + var collection = this.model.get(this.field.get('name')); + if (collection) { + collection.off("change:is_primary_key"); + } + + Backform.UniqueColCollectionControl.prototype.remove.apply(this, arguments); + } + }), + allowMultipleEmptyRow: false + },{ + id: 'inherited_tables_cnt', label: gettext('Inherited tables count'), + type: 'text', mode: ['properties'], group: gettext('Advanced'), + disabled: 'inSchema' + },{ + // Here we will create tab control for constraints + type: 'nested', control: 'tab', group: gettext('Constraints'), + mode: ['edit', 'create'], + schema: [{ + id: 'primary_key', label: gettext('Primary key'), + model: pgBrowser.Nodes['primary_key'].model, + subnode: pgBrowser.Nodes['primary_key'].model, + editable: false, type: 'collection', + group: gettext('Primary Key'), mode: ['edit', 'create'], + canEdit: true, canDelete: true, deps:['is_partitioned'], + control: 'unique-col-collection', + columns : ['name', 'columns'], + canAdd: function(m) { + if (m.get('is_partitioned')) { + setTimeout(function() { + var coll = m.get('primary_key'); + coll.remove(coll.filter(function(model) { + return true; + })); + }, 10); + return false; + } + + return true; + }, + canAddRow: function(m) { + // User can only add one primary key + var columns = m.get('columns'); + + return (m.get('primary_key') && + m.get('primary_key').length < 1 && + _.some(columns.pluck('name'))); + } + },{ + id: 'foreign_key', label: gettext('Foreign key'), + model: pgBrowser.Nodes['foreign_key'].model, + subnode: pgBrowser.Nodes['foreign_key'].model, + editable: false, type: 'collection', + group: gettext('Foreign Key'), mode: ['edit', 'create'], + canEdit: true, canDelete: true, deps:['is_partitioned'], + control: 'unique-col-collection', + canAdd: function(m) { + if (m.get('is_partitioned')) { + setTimeout(function() { + var coll = m.get('foreign_key'); + coll.remove(coll.filter(function(model) { + return true; + })); + }, 10); + return false; + } + + return true; + }, + columns : ['name', 'columns'], + canAddRow: function(m) { + // User can only add if there is at least one column with name. + var columns = m.get('columns'); + return _.some(columns.pluck('name')); + } + },{ + id: 'check_constraint', label: gettext('Check constraint'), + model: pgBrowser.Nodes['check_constraints'].model, + subnode: pgBrowser.Nodes['check_constraints'].model, + editable: false, type: 'collection', + group: gettext('Check'), mode: ['edit', 'create'], + canEdit: true, canDelete: true, deps:['is_partitioned'], + control: 'unique-col-collection', + canAdd: true, + columns : ['name', 'consrc'] + },{ + id: 'unique_constraint', label: gettext('Unique Constraint'), + model: pgBrowser.Nodes['unique_constraint'].model, + subnode: pgBrowser.Nodes['unique_constraint'].model, + editable: false, type: 'collection', + group: gettext('Unique'), mode: ['edit', 'create'], + canEdit: true, canDelete: true, deps:['is_partitioned'], + control: 'unique-col-collection', + columns : ['name', 'columns'], + canAdd: function(m) { + if (m.get('is_partitioned')) { + setTimeout(function() { + var coll = m.get('unique_constraint'); + coll.remove(coll.filter(function(model) { + return true; + })); + }, 10); + return false; + } + + return true; + }, + canAddRow: function(m) { + // User can only add if there is at least one column with name. + var columns = m.get('columns'); + return _.some(columns.pluck('name')); + } + },{ + id: 'exclude_constraint', label: gettext('Exclude constraint'), + model: pgBrowser.Nodes['exclusion_constraint'].model, + subnode: pgBrowser.Nodes['exclusion_constraint'].model, + editable: false, type: 'collection', + group: gettext('Exclude'), mode: ['edit', 'create'], + canEdit: true, canDelete: true, deps:['is_partitioned'], + control: 'unique-col-collection', + columns : ['name', 'columns', 'constraint'], + canAdd: function(m) { + if (m.get('is_partitioned')) { + setTimeout(function() { + var coll = m.get('exclude_constraint'); + coll.remove(coll.filter(function(model) { + return true; + })); + }, 10); + return false; + } + + return true; + }, + canAddRow: function(m) { + // User can only add if there is at least one column with name. + var columns = m.get('columns'); + return _.some(columns.pluck('name')); + } + }] + },{ + id: 'typname', label: gettext('Of type'), type: 'text', + control: 'node-ajax-options', mode: ['properties', 'create', 'edit'], + disabled: 'checkOfType', url: 'get_oftype', group: gettext('Advanced'), + deps: ['coll_inherits', 'is_partitioned'], transform: function(data, cell) { + var control = cell || this, + m = control.model; + m.of_types_tables = data; + return data; + }, + control: Backform.NodeAjaxOptionsControl.extend({ + // When of_types changes we need to clear columns collection + onChange: function() { + Backform.NodeAjaxOptionsControl.prototype.onChange.apply(this, arguments); + var self = this, + tbl_oid = undefined, + tbl_name = self.model.get('typname'), + data = undefined, + arg = undefined, + column_collection = self.model.get('columns'); + + if (!_.isUndefined(tbl_name) && + tbl_name !== '' && column_collection.length !== 0) { + var msg = gettext('Changing of type table will clear columns collection'); + alertify.confirm(msg, function (e) { + if (e) { + // User clicks Ok, lets clear columns collection + column_collection.reset(); + } else { + return this; + } + }); + } else if (!_.isUndefined(tbl_name) && tbl_name === '') { + column_collection.reset(); + } + + // Run Ajax now to fetch columns + if (!_.isUndefined(tbl_name) && tbl_name !== '') { + arg = { 'tname': tbl_name } + data = self.model.fetch_columns_ajax.apply(self, [arg]); + // Add into column collection + column_collection.set(data, { merge:false,remove:false }); + } + } + }) + },{ + id: 'fillfactor', label: gettext('Fill factor'), type: 'int', + mode: ['create', 'edit'], min: 10, max: 100, + disabled: 'inSchema',group: gettext('Advanced') + },{ + id: 'relhasoids', label: gettext('Has OIDs?'), cell: 'switch', + type: 'switch', mode: ['properties', 'create', 'edit'], + disabled: 'inSchema', group: gettext('Advanced') + },{ + id: 'relpersistence', label: gettext('Unlogged?'), cell: 'switch', + type: 'switch', mode: ['properties', 'create', 'edit'], + disabled: 'inSchemaWithModelCheck', + group: gettext('Advanced') + },{ + id: 'conname', label: gettext('Primary key'), cell: 'string', + type: 'text', mode: ['properties'], group: gettext('Advanced'), + disabled: 'inSchema' + },{ + id: 'reltuples', label: gettext('Rows (estimated)'), cell: 'string', + type: 'text', mode: ['properties'], group: gettext('Advanced'), + disabled: 'inSchema' + },{ + id: 'rows_cnt', label: gettext('Rows (counted)'), cell: 'string', + type: 'text', mode: ['properties'], group: gettext('Advanced'), + disabled: 'inSchema' + },{ + id: 'relhassubclass', label: gettext('Inherits tables?'), cell: 'switch', + type: 'switch', mode: ['properties'], group: gettext('Advanced'), + disabled: 'inSchema' + },{ + id: 'is_sys_table', label: gettext('System table?'), cell: 'switch', + type: 'switch', mode: ['properties'], + disabled: 'inSchema' + },{ + type: 'nested', control: 'fieldset', label: gettext('Like'), + group: gettext('Advanced'), + schema:[{ + id: 'like_relation', label: gettext('Relation'), + type: 'text', mode: ['create', 'edit'], deps: ['typname'], + control: 'node-ajax-options', url: 'get_relations', + disabled: 'isLikeDisable', group: gettext('Like') + },{ + id: 'like_default_value', label: gettext('With default values?'), + type: 'switch', mode: ['create', 'edit'], deps: ['typname'], + disabled: 'isLikeDisable', group: gettext('Like') + },{ + id: 'like_constraints', label: gettext('With constraints?'), + type: 'switch', mode: ['create', 'edit'], deps: ['typname'], + disabled: 'isLikeDisable', group: gettext('Like') + },{ + id: 'like_indexes', label: gettext('With indexes?'), + type: 'switch', mode: ['create', 'edit'], deps: ['typname'], + disabled: 'isLikeDisable', group: gettext('Like') + },{ + id: 'like_storage', label: gettext('With storage?'), + type: 'switch', mode: ['create', 'edit'], deps: ['typname'], + disabled: 'isLikeDisable', group: gettext('Like') + },{ + id: 'like_comments', label: gettext('With comments?'), + type: 'switch', mode: ['create', 'edit'], deps: ['typname'], + disabled: 'isLikeDisable', group: gettext('Like') + }] + },{ + id: 'partition_type', label:gettext('Partition Type'), + editable: false, type: 'select2', select2: {allowClear: false}, + group: 'partition', deps: ['is_partitioned'], + options:[{ + label: 'Range', value: 'range' + },{ + label: 'List', value: 'list' + }], + mode:['create'], + visible: function(m) { + if(!_.isUndefined(m.node_info) && !_.isUndefined(m.node_info.server) + && !_.isUndefined(m.node_info.server.version) && + m.node_info.server.version >= 100000) + return true; + + return false; + }, + disabled: function(m) { + if (!m.isNew() || !m.get('is_partitioned')) + return true; + return false; + } + },{ + id: 'partition_keys', label:gettext('Partition Keys'), + model: Backform.PartitionKeyModel, + subnode: Backform.PartitionKeyModel, + editable: true, type: 'collection', + group: 'partition', mode: ['create'], + deps: ['is_partitioned', 'partition_type'], + canEdit: false, canDelete: true, + control: 'sub-node-collection', + canAdd: function(m) { + if (m.isNew() && m.get('is_partitioned')) + return true; + return false; + }, + canAddRow: function(m) { + var columns = m.get('columns'); + var max_row_count = 1000; + + if (m.get('partition_type') && m.get('partition_type') == 'list') + max_row_count = 1; + + return (m.get('partition_keys') && + m.get('partition_keys').length < max_row_count && + _.some(columns.pluck('name')) + ); + }, + visible: function(m) { + if(!_.isUndefined(m.node_info) && !_.isUndefined(m.node_info.server) + && !_.isUndefined(m.node_info.server.version) && + m.node_info.server.version >= 100000) + return true; + + return false; + }, + disabled: function(m) { + if (m.get('partition_keys') && m.get('partition_keys').models.length > 0) { + setTimeout(function () { + var coll = m.get('partition_keys'); + coll.remove(coll.filter(function(model) { + return true; + })); + }, 10); + } + } + },{ + id: 'partition_scheme', label: gettext('Partition Scheme'), + type: 'note', group: 'partition', mode: ['edit'], + visible: function(m) { + if(!_.isUndefined(m.node_info) && !_.isUndefined(m.node_info.server) + && !_.isUndefined(m.node_info.server.version) && + m.node_info.server.version >= 100000) + return true; + + return false; + }, + disabled: function(m) { + if (!m.isNew()) { + this.text = m.get('partition_scheme'); + } + } + },{ + id: 'partitions', label:gettext('Partitions'), + model: Backform.PartitionsModel, + subnode: Backform.PartitionsModel, + editable: true, type: 'collection', + group: 'partition', mode: ['edit', 'create'], + deps: ['is_partitioned', 'partition_type'], + canEdit: false, canDelete: true, + customDeleteTitle: gettext('Detach Partition'), + customDeleteMsg: gettext('Are you sure you wish to detach this partition?'), + columns:['is_attach', 'partition_name', 'values_from', 'values_to', 'values_in'], + control: Backform.SubNodeCollectionControl.extend({ + row: Backgrid.PartitionRow, + initialize: function() { + Backform.SubNodeCollectionControl.prototype.initialize.apply(this, arguments); + var self = this; + if (!this.model.isNew()) { + var node = this.field.get('schema_node'), + node_info = this.field.get('node_info'); + + // Make ajax call to get the tables to be attached + $.ajax({ + url: node.generate_url.apply( + node, [ + null, 'get_attach_tables', this.field.get('node_data'), + true, node_info + ]), + + type: 'GET', + async: false, + success: function(res) { + if (res.success == 1) { + self.model.table_options = res.data; + } + else { + alertify.alert( + 'Error fetching tables to be attached', res.data.result + ); + } + }, + error: function(e) { + var errmsg = $.parseJSON(e.responseText); + alertify.alert('Error fetching tables to be attached.', errmsg.errormsg); + } + }); + } + } + } + ), + canAdd: function(m) { + if (m.get('is_partitioned')) + return true; + return false; + }, + visible: function(m) { + if(!_.isUndefined(m.node_info) && !_.isUndefined(m.node_info.server) + && !_.isUndefined(m.node_info.server.version) && + m.node_info.server.version >= 100000) + return true; + + return false; + }, + disabled: function(m) { + if (m.isNew() && m.get('partitions') && m.get('partitions').models.length > 0) { + setTimeout(function () { + var coll = m.get('partitions'); + coll.remove(coll.filter(function(model) { + return true; + })); + }, 10); + } + } + },{ + id: 'partition_note', label: gettext('Partition'), + type: 'note', group: 'partition', + text: gettext('Above control will be used to Create/Attach/Detach partitions.
' + + '
  • Create Mode: User will be able to create N number of partitions. Mode switch control is disabled in this scenario.
  • ' + + '
  • Edit Mode: User will be able to create/attach/detach N number of partitions.' + + 'In attach mode there will be list of suitable tables to be attached.
'), + visible: function(m) { + if(!_.isUndefined(m.node_info) && !_.isUndefined(m.node_info.server) + && !_.isUndefined(m.node_info.server.version) && + m.node_info.server.version >= 100000) + return true; + + return false; + } + },{ + // Here - we will create tab control for storage parameters + // (auto vacuum). + type: 'nested', control: 'tab', group: gettext('Parameter'), + mode: ['edit', 'create'], deps: ['is_partitioned'], + schema: Backform.VacuumSettingsSchema + },{ + id: 'relacl_str', label: gettext('Privileges'), disabled: 'inSchema', + type: 'text', mode: ['properties'], group: gettext('Security') + }, pgBrowser.SecurityGroupUnderSchema,{ + id: 'relacl', label: gettext('Privileges'), type: 'collection', + group: 'security', control: 'unique-col-collection', + model: pgBrowser.Node.PrivilegeRoleModel.extend({ + privileges: ['a','r','w','d','D','x','t']}), + mode: ['edit', 'create'], canAdd: true, canDelete: true, + uniqueCol : ['grantee'] + },{ + id: 'seclabels', label: gettext('Security labels'), canEdit: false, + model: pgBrowser.SecLabelModel, editable: false, canAdd: true, + type: 'collection', min_version: 90100, mode: ['edit', 'create'], + group: 'security', canDelete: true, control: 'unique-col-collection' + },{ + id: 'vacuum_settings_str', label: gettext('Storage settings'), + type: 'multiline', group: gettext('Advanced'), mode: ['properties'] + }], + validate: function(keys) { + var err = {}, + changedAttrs = this.changed, + msg = undefined, + name = this.get('name'), + schema = this.get('schema'), + relowner = this.get('relowner'), + is_partitioned = this.get('is_partitioned'), + partition_keys = this.get('partition_keys'); + + // If nothing to validate or VacuumSetting keys then + // return from here + if ( keys && (keys.length == 0 + || _.indexOf(keys, 'autovacuum_enabled') != -1 + || _.indexOf(keys, 'toast_autovacuum_enabled') != -1) ) { + return null; + } + + // Have to clear existing validation before initiating current state validation only + this.errorModel.clear(); + + if (_.isUndefined(name) || _.isNull(name) || + String(name).replace(/^\s+|\s+$/g, '') == '') { + msg = gettext('Table name cannot be empty.'); + this.errorModel.set('name', msg); + return msg; + } else if (_.isUndefined(schema) || _.isNull(schema) || + String(schema).replace(/^\s+|\s+$/g, '') == '') { + msg = gettext('Table schema cannot be empty.'); + this.errorModel.set('schema', msg); + return msg; + } else if (_.isUndefined(relowner) || _.isNull(relowner) || + String(relowner).replace(/^\s+|\s+$/g, '') == '') { + msg = gettext('Table owner cannot be empty.'); + this.errorModel.set('relowner', msg); + return msg; + } else if (is_partitioned && this.isNew() && + !_.isNull(partition_keys) && partition_keys.length <= 0) + { + msg = gettext('Please specify at least one key for partitioned table.'); + this.errorModel.set('partition_keys', msg); + return msg; + } + return null; + }, + // We will disable everything if we are under catalog node + inSchema: function() { + if(this.node_info && 'catalog' in this.node_info) + { + return true; + } + return false; + }, + isInheritedTable: function(m) { + if(!m.inSchema.apply(this, [m])) { + if( + (!_.isUndefined(m.get('coll_inherits')) && m.get('coll_inherits').length != 0) + || + (!_.isUndefined(m.get('typname')) && String(m.get('typname')).replace(/^\s+|\s+$/g, '') !== '') + ) { + // Either of_types or coll_inherits has value + return false; + } else { + return true; + } + } + return false; + }, + // Oftype is defined? + checkInheritance: function(m) { + // Disabled if it is partitioned table + if (m.get('is_partitioned')) { + setTimeout( function() { + m.set('coll_inherits', []); + }, 10); + return true; + } + + // coll_inherits || typname + if(!m.inSchema.apply(this, [m]) && + ( _.isUndefined(m.get('typname')) || + _.isNull(m.get('typname')) || + String(m.get('typname')).replace(/^\s+|\s+$/g, '') == '')) { + return false; + } + return true; + }, + // We will disable Like if ofType is defined + isLikeDisable: function(m) { + if(!m.inSchemaWithModelCheck.apply(this, [m]) && + ( _.isUndefined(m.get('typname')) || + _.isNull(m.get('typname')) || + String(m.get('typname')).replace(/^\s+|\s+$/g, '') == '')) { + return false; + } + return true; + }, + // Check for column grid when to Add + check_grid_add_condition: function(m) { + var enable_flag = true; + if(!m.inSchema.apply(this, [m])) { + // if of_type then disable add in grid + if (!_.isUndefined(m.get('typname')) && + !_.isNull(m.get('typname')) && + m.get('typname') !== '') { + enable_flag = false; + } + } + return enable_flag; + }, + // Check for column grid when to edit/delete (for each row) + check_grid_row_edit_delete: function(m) { + var flag = true; + if(!_.isUndefined(m.get('inheritedfrom')) && + !_.isNull(m.get('inheritedfrom')) && + String(m.get('inheritedfrom')).replace(/^\s+|\s+$/g, '') !== '') { + flag = false; + } + return flag; + }, + // We will disable it if Inheritance is defined + checkOfType: function(m) { + // Disabled if it is partitioned table + if (m.get('is_partitioned')) { + setTimeout( function() { + m.set('typname', undefined); + }, 10); + return true; + } + + //coll_inherits || typname + if(!m.inSchemaWithModelCheck.apply(this, [m]) && + (_.isUndefined(m.get('coll_inherits')) || + _.isNull(m.get('coll_inherits')) || + String(m.get('coll_inherits')).replace(/^\s+|\s+$/g, '') == '')) { + return false; + } + return true; + }, + // We will check if we are under schema node & in 'create' mode + inSchemaWithModelCheck: function(m) { + if(this.node_info && 'schema' in this.node_info) + { + // We will disbale control if it's in 'edit' mode + if (m.isNew()) { + return false; + } else { + return true; + } + } + return true; + }, + isTableAutoVacuumEnable: function(m) { + // We need to check additional condition to toggle enable/disable + // for table auto-vacuum + if(!m.inSchema.apply(this, [m]) && + m.get('autovacuum_enabled') === true) { + return false; + } + return true; + }, + isToastTableAutoVacuumEnable: function(m) { + // We need to check additional condition to toggle enable/disable + // for toast table auto-vacuum + if(!m.inSchemaWithModelCheck.apply(this, [m]) && + m.get('toast_autovacuum_enabled') == true) { + return false; + } + return true; + }, + fetch_columns_ajax: function(arg) { + var self = this, + url = 'get_columns', + m = self.model.top || self.model, + old_columns = _.clone(m.get('columns')) + data = undefined, + node = this.field.get('schema_node'), + node_info = this.field.get('node_info'), + full_url = node.generate_url.apply( + node, [ + null, url, this.field.get('node_data'), + this.field.get('url_with_id') || false, node_info + ] + ), + cache_level = this.field.get('cache_level') || node.type, + cache_node = this.field.get('cache_node'); + + cache_node = (cache_node && pgBrowser.Nodes['cache_node']) || node; + + m.trigger('pgadmin:view:fetching', m, self.field); + // Fetching Columns data for the selected table. + $.ajax({ + async: false, + url: full_url, + data: arg, + success: function(res) { + data = cache_node.cache(url, node_info, cache_level, res.data); + }, + error: function() { + m.trigger('pgadmin:view:fetch:error', m, self.field); + } + }); + m.trigger('pgadmin:view:fetched', m, self.field); + data = (data && data.data) || []; + return data; + } + }), + canCreate: function(itemData, item, data) { + //If check is false then , we will allow create menu + if (data && data.check == false) + return true; + + var t = pgBrowser.tree, i = item, d = itemData; + // To iterate over tree to check parent node + while (i) { + // If it is schema then allow user to create table + if (_.indexOf(['schema'], d._type) > -1) + return true; + + if ('coll-table' == d._type) { + //Check if we are not child of catalog + prev_i = t.hasParent(i) ? t.parent(i) : null; + prev_d = prev_i ? t.itemData(prev_i) : null; + if( prev_d._type == 'catalog') { + return false; + } else { + return true; + } + } + i = t.hasParent(i) ? t.parent(i) : null; + d = i ? t.itemData(i) : null; + } + // by default we do not want to allow create menu + return true; + }, + // Check to whether table has disable trigger(s) + canCreate_with_trigger_enable: function(itemData, item, data) { + if(this.canCreate.apply(this, [itemData, item, data])) { + // We are here means we can create menu, now let's check condition + if(itemData.tigger_count > 0) { + return true; + } else { + return false; + } + } + }, + // Check to whether table has enable trigger(s) + canCreate_with_trigger_disable: function(itemData, item, data) { + if(this.canCreate.apply(this, [itemData, item, data])) { + // We are here means we can create menu, now let's check condition + if(itemData.tigger_count > 0 && itemData.has_enable_triggers > 0) { + return true; + } else { + return false; + } + } + } + }); + } + + return pgBrowser.Nodes['partition']; +}); diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/rules/templates/rules/js/rules.js b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/rules/templates/rules/js/rules.js index 25fe96305..db91af32b 100644 --- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/rules/templates/rules/js/rules.js +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/rules/templates/rules/js/rules.js @@ -16,6 +16,7 @@ define('pgadmin.node.rule', [ node: 'rule', label: gettext('Rules'), type: 'coll-rule', + getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy, columns: ["name", "owner", "comment"] }); } @@ -33,8 +34,9 @@ define('pgadmin.node.rule', [ rule option in the context menu */ if (!pgBrowser.Nodes['rule']) { - pgAdmin.Browser.Nodes['rule'] = pgAdmin.Browser.Node.extend({ - parent_type: ['table','view'], + pgAdmin.Browser.Nodes['rule'] = pgBrowser.Node.extend({ + getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy, + parent_type: ['table','view', 'partition'], type: 'rule', sqlAlterHelp: 'sql-alterrule.html', sqlCreateHelp: 'sql-createrule.html', @@ -98,6 +100,12 @@ define('pgadmin.node.rule', [ category: 'create', priority: 4, label: gettext('Rule...'), icon: 'wcTabIcon icon-rule', data: {action: 'create', check: true}, enable: 'canCreate' + },{ + name: 'create_rule', node: 'partition', module: this, + applies: ['object', 'context'], callback: 'show_obj_properties', + category: 'create', priority: 4, label: gettext('Rule...'), + icon: 'wcTabIcon icon-rule', data: {action: 'create', check: true}, + enable: 'canCreate' } ]); }, diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/static/js/partition.utils.js b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/static/js/partition.utils.js new file mode 100644 index 000000000..6fe46bae2 --- /dev/null +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/static/js/partition.utils.js @@ -0,0 +1,323 @@ +define( + ['sources/gettext', 'jquery', 'underscore', 'pgadmin.browser', + 'backform','backgrid', 'pgadmin.browser.collection'], +function(gettext, $, _, pgBrowser, Backform, Backgrid) { + + Backgrid.PartitionRow = Backgrid.Row.extend({ + modelDuplicateColor: "lightYellow", + + modelUniqueColor: "#fff", + + initialize: function () { + Backgrid.Row.prototype.initialize.apply(this, arguments); + var self = this; + self.model.on("change:is_attach", function() { + setTimeout(function() { + self.columns.each(function(col) { + if (col.get('name') == 'partition_name') { + var idx = self.columns.indexOf(col), + cf = col.get("cellFunction"), + cell = new (cf.apply(col, [self.model]))({ + column: col, + model: self.model + }), + oldCell = self.cells[idx]; + oldCell.remove(); + self.cells[idx] = cell; + self.render(); + } + }); + }, 10); + }); + self.listenTo(self.model, 'pgadmin-session:model:duplicate', self.modelDuplicate); + self.listenTo(self.model, 'pgadmin-session:model:unique', self.modelUnique); + }, + modelDuplicate: function() { + $(this.el).removeClass("new"); + this.el.style.backgroundColor = this.modelDuplicateColor; + }, + modelUnique: function() { + this.el.style.backgroundColor = this.modelUniqueColor; + } + }); + + var getPartitionCell = function(model) { + var is_attach = model.get("is_attach"); + if (is_attach) { + var options = []; + model.set({'partition_name': undefined}, {silent:true}); + _.each(model.top.table_options, function(t) { + options.push([t.label, t.value]); + }); + return Backgrid.Extension.Select2Cell.extend({optionValues: options}); + + } else { + return Backgrid.StringCell; + } + }; + + Backform.PartitionKeyModel = pgBrowser.Node.Model.extend({ + defaults: { + key_type: 'column', + pt_column: undefined, + expression: undefined + }, + keys:['pt_column'], + schema: [{ + id: 'key_type', label:'Key type', type:'select2', editable: true, + cell:'select2', cellHeaderClasses: 'width_percent_25', + select2: {allowClear: false}, + options:[{ + label: 'Column', value: 'column' + },{ + label: 'Expression', value: 'expression' + }] + },{ + id: 'pt_column', label: gettext('Column'), type:'text', + cell: Backgrid.Extension.Select2DepCell.extend({ + keyPathAccessor: function(obj, path) { + var res = obj; + if(_.isArray(res)) { + return _.map(res, function(o) { return o['pt_column'] + }); + } + path = path.split('.'); + for (var i = 0; i < path.length; i++) { + if (_.isNull(res)) return null; + if (_.isEmpty(path[i])) continue; + if (!_.isUndefined(res[path[i]])) res = res[path[i]]; + } + return _.isObject(res) && !_.isArray(res) ? null : res; + }, + initialize: function() { + // Here we will decide if we need to call URL + // Or fetch the data from parent columns collection + var self = this; + if(this.model.handler) { + Backgrid.Extension.Select2DepCell.prototype.initialize.apply(this, arguments); + // Do not listen for any event(s) for existing constraint. + if (_.isUndefined(self.model.get('oid'))) { + var tableCols = self.model.top.get('columns'); + self.listenTo(tableCols, 'remove' , self.resetColOptions); + self.listenTo(tableCols, 'change:name', self.resetColOptions); + } + + self.custom_options(); + } + }, + resetColOptions: function(m) { + var self = this; + + setTimeout(function () { + self.custom_options(); + self.render.apply(self); + }, 50); + }, + custom_options: function() { + // We will add all the columns entered by user in table model + var columns = this.model.top.get('columns'), + added_columns_from_tables = []; + + if (columns.length > 0) { + _.each(columns.models, function(m) { + var col = m.get('name'); + if(!_.isUndefined(col) && !_.isNull(col)) { + added_columns_from_tables.push( + {label: col, value: col, image:'icon-column'} + ); + } + }); + } + // Set the values in to options so that user can select + this.column.set('options', added_columns_from_tables); + }, + remove: function() { + if(this.model.handler) { + var self = this, + tableCols = self.model.top.get('columns'); + self.stopListening(tableCols, 'remove' , self.resetColOptions); + self.stopListening(tableCols, 'change:name' , self.resetColOptions); + Backgrid.Extension.Select2DepCell.prototype.remove.apply(this, arguments); + } + } + }), + deps: ['key_type'], + cellHeaderClasses: 'width_percent_30', + transform : function(data){ + var res = []; + if (data && _.isArray(data)) { + _.each(data, function(d) { + res.push({label: d.label, value: d.label, image:'icon-column'}); + }) + } + return res; + }, + select2:{allowClear:false}, + editable: function(m) { + if (m.get('key_type') == 'expression') { + setTimeout( function() { + m.set('pt_column', undefined); + }, 10); + return false; + } + return true; + } + },{ + id: 'expression', label:'Expression', type:'text', + cell:Backgrid.Extension.StringDepCell, + cellHeaderClasses: 'width_percent_45', + deps: ['key_type'], + editable: function(m) { + if (m.get('key_type') == 'column') { + setTimeout( function() { + m.set('expression', undefined); + }, 10); + return false; + } + return true; + } + } + ], + validate: function(keys) { + var col_type = this.get('key_type'), + pt_column = this.get('pt_column'), + expression = this.get('expression'); + + // Have to clear existing validation before initiating current state + // validation only + this.errorModel.clear(); + + if (_.isUndefined(col_type) || _.isNull(col_type) || + String(col_type).replace(/^\s+|\s+$/g, '') == '') { + msg = gettext('Partition key type cannot be empty.'); + this.errorModel.set('key_type', msg); + return msg; + } + else if (col_type == 'column' && + _.isUndefined(pt_column) || _.isNull(pt_column) || + String(pt_column).replace(/^\s+|\s+$/g, '') == '') { + msg = gettext('Partition key column cannot be empty.'); + this.errorModel.set('pt_column', msg); + return msg; + } + else if (col_type == 'expression' && + _.isUndefined(expression) || _.isNull(expression) || + String(expression).replace(/^\s+|\s+$/g, '') == '') { + msg = gettext('Partition key expression cannot be empty.'); + this.errorModel.set('expression', msg); + return msg; + } + + return null; + } + }); + + Backform.PartitionsModel = pgBrowser.Node.Model.extend({ + defaults: { + oid: undefined, + is_attach: false, + partition_name: undefined, + values_from: undefined, + values_to: undefined, + values_in: undefined + }, + keys:['partition_name'], + schema: [{ + id: 'oid', label: gettext('OID'), type: 'text' + },{ + id: 'is_attach', label:gettext('Operation'), cell: 'switch', + type: 'switch', options: { 'onText': 'Attach', 'offText': 'Create'}, + cellHeaderClasses: 'width_percent_5', + editable: function(m) { + if (m instanceof Backbone.Model && m.isNew() && !m.top.isNew()) + return true; + return false; + } + },{ + id: 'partition_name', label: gettext('Name'), type: 'text', cell:'string', + cellHeaderClasses: 'width_percent_25', + editable: function(m) { + if (m instanceof Backbone.Model && m.isNew()) + return true; + return false; + }, cellFunction: getPartitionCell + },{ + id: 'values_from', label:'From', type:'text', + cell:Backgrid.Extension.StringDepCell, + cellHeaderClasses: 'width_percent_20', + editable: function(m) { + if(m.handler && m.handler.top && + m.handler.top.attributes && + m.handler.top.attributes.partition_type == 'range' && + m instanceof Backbone.Model && m.isNew()) + return true; + return false; + } + },{ + id: 'values_to', label:'To', type:'text', + cell:Backgrid.Extension.StringDepCell, + cellHeaderClasses: 'width_percent_20', + editable: function(m) { + if(m.handler && m.handler.top && + m.handler.top.attributes && + m.handler.top.attributes.partition_type == 'range' && + m instanceof Backbone.Model && m.isNew()) + return true; + return false; + } + },{ + id: 'values_in', label:'In', type:'text', + cell:Backgrid.Extension.StringDepCell, + cellHeaderClasses: 'width_percent_25', + editable: function(m) { + if(m.handler && m.handler.top && + m.handler.top.attributes && + m.handler.top.attributes.partition_type == 'list' && + m instanceof Backbone.Model && m.isNew()) + return true; + return false; + } + }], + validate: function(keys) { + var partition_name = this.get('partition_name'), + values_from = this.get('values_from'), + values_to = this.get('values_to'), + values_in = this.get('values_in'); + + // Have to clear existing validation before initiating current state + // validation only + this.errorModel.clear(); + + if (_.isUndefined(partition_name) || _.isNull(partition_name) || + String(partition_name).replace(/^\s+|\s+$/g, '') == '') { + msg = gettext('Partition name cannot be empty.'); + this.errorModel.set('partition_name', msg); + return msg; + } + + if (this.top.get('partition_type') == 'range') { + if (_.isUndefined(values_from) || _.isNull(values_from) || + String(values_from).replace(/^\s+|\s+$/g, '') == '') { + msg = gettext('For range partition From field cannot be empty.'); + this.errorModel.set('values_from', msg); + return msg; + } else if (_.isUndefined(values_to) || _.isNull(values_to) || + String(values_to).replace(/^\s+|\s+$/g, '') == '') { + msg = gettext('For range partition To field cannot be empty.'); + this.errorModel.set('values_to', msg); + return msg; + } + } else if (this.top.get('partition_type') == 'list') { + if (_.isUndefined(values_in) || _.isNull(values_in) || + String(values_in).replace(/^\s+|\s+$/g, '') == '') { + msg = gettext('For list partition In field cannot be empty.'); + this.errorModel.set('values_in', msg); + return msg; + } + } + + return null; + } + }); + +}); diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/static/js/table.js b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/static/js/table.js index dbd171e31..7f6aa0c81 100644 --- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/static/js/table.js +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/static/js/table.js @@ -3,8 +3,10 @@ define('pgadmin.node.table', [ 'underscore.string', 'pgadmin', 'pgadmin.browser', 'alertify', 'sources/alerts/alertify_wrapper', 'pgadmin.browser.collection', 'pgadmin.node.column', - 'pgadmin.node.constraints' -], function(gettext, url_for, $, _, S, pgAdmin, pgBrowser, alertify, AlertifyWrapper) { + 'pgadmin.node.constraints', 'pgadmin.browser.table.partition.utils' +], function( + gettext, url_for, $, _, S, pgAdmin, pgBrowser, alertify, AlertifyWrapper +) { if (!pgBrowser.Nodes['coll-table']) { var databases = pgBrowser.Nodes['coll-table'] = @@ -12,7 +14,7 @@ define('pgadmin.node.table', [ node: 'table', label: gettext('Tables'), type: 'coll-table', - columns: ['name', 'relowner', 'description'], + columns: ['name', 'relowner', 'is_partitioned', 'description'], hasStatistics: true, statsPrettifyFields: ['Size', 'Indexes size', 'Table size', 'Toast table size', 'Tuple length', @@ -22,6 +24,7 @@ define('pgadmin.node.table', [ if (!pgBrowser.Nodes['table']) { pgBrowser.Nodes['table'] = pgBrowser.Node.extend({ + getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy, type: 'table', label: gettext('Table'), collection_type: 'coll-table', @@ -39,7 +42,7 @@ define('pgadmin.node.table', [ height: '95%', width: '85%', Init: function() { - /* Avoid mulitple registration of menus */ + /* Avoid multiple registration of menus */ if (this.initialized) return; @@ -91,6 +94,9 @@ define('pgadmin.node.table', [ icon: 'fa fa-bar-chart', enable : 'canCreate' } ]); + pgBrowser.Events.on( + 'pgadmin:browser:node:table:updated', this.onTableUpdated, this + ); }, canDrop: pgBrowser.Nodes['schema'].canChildDrop, canDropCascade: pgBrowser.Nodes['schema'].canChildDrop, @@ -284,7 +290,10 @@ define('pgadmin.node.table', [ hastoasttable: true, toast_autovacuum_enabled: false, autovacuum_enabled: false, - primary_key: [] + primary_key: [], + partitions: [], + partition_type: 'range', + is_partitioned: false }, // Default values! initialize: function(attrs, args) { @@ -329,13 +338,38 @@ define('pgadmin.node.table', [ // If tablespace name is not "pg_global" then we need to exclude them return (!(d && d.label.match(/pg_global/))) } + },{ + id: 'partition', type: 'group', label: gettext('Partition'), + mode: ['edit', 'create'], min_version: 100000, + visible: function(m) { + // Always show in case of create mode + if (m.isNew() || m.get('is_partitioned')) + return true; + return false; + } + },{ + id: 'is_partitioned', label:gettext('Partitioned Table?'), cell: 'switch', + type: 'switch', mode: ['properties', 'create', 'edit'], + visible: function(m) { + if(!_.isUndefined(m.node_info) && !_.isUndefined(m.node_info.server) + && !_.isUndefined(m.node_info.server.version) && + m.node_info.server.version >= 100000) + return true; + + return false; + }, + disabled: function(m) { + if (!m.isNew()) + return true; + return false; + } },{ id: 'description', label: gettext('Comment'), type: 'multiline', mode: ['properties', 'create', 'edit'], disabled: 'inSchema' },{ id: 'coll_inherits', label: gettext('Inherited from table(s)'), url: 'get_inherits', type: 'array', group: gettext('Columns'), - disabled: 'checkInheritance', deps: ['typname'], + disabled: 'checkInheritance', deps: ['typname', 'is_partitioned'], mode: ['create', 'edit'], select2: { multiple: true, allowClear: true, placeholder: gettext('Select to inherit from...')}, @@ -432,7 +466,26 @@ define('pgadmin.node.table', [ model: pgBrowser.Nodes['column'].model, subnode: pgBrowser.Nodes['column'].model, mode: ['create', 'edit'], - disabled: 'inSchema', deps: ['typname'], + disabled: function(m) { + // In case of partitioned table remove inherited columns + if (m.isNew() && m.get('is_partitioned')) { + setTimeout(function() { + var coll = m.get('columns'); + coll.remove(coll.filter(function(model) { + if (_.isUndefined(model.get('inheritedfrom'))) + return false; + return true; + })); + }, 10); + } + + if(this.node_info && 'catalog' in this.node_info) + { + return true; + } + return false; + }, + deps: ['typname', 'is_partitioned'], canAdd: 'check_grid_add_condition', canEdit: true, canDelete: true, // For each row edit/delete button enable/disable @@ -535,10 +588,22 @@ define('pgadmin.node.table', [ subnode: pgBrowser.Nodes['primary_key'].model, editable: false, type: 'collection', group: gettext('Primary Key'), mode: ['edit', 'create'], - canEdit: true, canDelete: true, + canEdit: true, canDelete: true, deps:['is_partitioned'], control: 'unique-col-collection', columns : ['name', 'columns'], - canAdd: true, + canAdd: function(m) { + if (m.get('is_partitioned')) { + setTimeout(function() { + var coll = m.get('primary_key'); + coll.remove(coll.filter(function(model) { + return true; + })); + }, 10); + return false; + } + + return true; + }, canAddRow: function(m) { // User can only add one primary key var columns = m.get('columns'); @@ -553,9 +618,21 @@ define('pgadmin.node.table', [ subnode: pgBrowser.Nodes['foreign_key'].model, editable: false, type: 'collection', group: gettext('Foreign Key'), mode: ['edit', 'create'], - canEdit: true, canDelete: true, + canEdit: true, canDelete: true, deps:['is_partitioned'], control: 'unique-col-collection', - canAdd: true, + canAdd: function(m) { + if (m.get('is_partitioned')) { + setTimeout(function() { + var coll = m.get('foreign_key'); + coll.remove(coll.filter(function(model) { + return true; + })); + }, 10); + return false; + } + + return true; + }, columns : ['name', 'columns'], canAddRow: function(m) { // User can only add if there is at least one column with name. @@ -568,7 +645,7 @@ define('pgadmin.node.table', [ subnode: pgBrowser.Nodes['check_constraints'].model, editable: false, type: 'collection', group: gettext('Check'), mode: ['edit', 'create'], - canEdit: true, canDelete: true, + canEdit: true, canDelete: true, deps:['is_partitioned'], control: 'unique-col-collection', canAdd: true, columns : ['name', 'consrc'] @@ -578,10 +655,22 @@ define('pgadmin.node.table', [ subnode: pgBrowser.Nodes['unique_constraint'].model, editable: false, type: 'collection', group: gettext('Unique'), mode: ['edit', 'create'], - canEdit: true, canDelete: true, + canEdit: true, canDelete: true, deps:['is_partitioned'], control: 'unique-col-collection', columns : ['name', 'columns'], - canAdd: true, + canAdd: function(m) { + if (m.get('is_partitioned')) { + setTimeout(function() { + var coll = m.get('unique_constraint'); + coll.remove(coll.filter(function(model) { + return true; + })); + }, 10); + return false; + } + + return true; + }, canAddRow: function(m) { // User can only add if there is at least one column with name. var columns = m.get('columns'); @@ -593,10 +682,22 @@ define('pgadmin.node.table', [ subnode: pgBrowser.Nodes['exclusion_constraint'].model, editable: false, type: 'collection', group: gettext('Exclude'), mode: ['edit', 'create'], - canEdit: true, canDelete: true, + canEdit: true, canDelete: true, deps:['is_partitioned'], control: 'unique-col-collection', columns : ['name', 'columns', 'constraint'], - canAdd: true, + canAdd: function(m) { + if (m.get('is_partitioned')) { + setTimeout(function() { + var coll = m.get('exclude_constraint'); + coll.remove(coll.filter(function(model) { + return true; + })); + }, 10); + return false; + } + + return true; + }, canAddRow: function(m) { // User can only add if there is at least one column with name. var columns = m.get('columns'); @@ -607,7 +708,7 @@ define('pgadmin.node.table', [ id: 'typname', label: gettext('Of type'), type: 'text', control: 'node-ajax-options', mode: ['properties', 'create', 'edit'], disabled: 'checkOfType', url: 'get_oftype', group: gettext('Advanced'), - deps: ['coll_inherits'], transform: function(data, cell) { + deps: ['coll_inherits', 'is_partitioned'], transform: function(data, cell) { var control = cell || this, m = control.model; m.of_types_tables = data; @@ -710,11 +811,217 @@ define('pgadmin.node.table', [ type: 'switch', mode: ['create', 'edit'], deps: ['typname'], disabled: 'isLikeDisable', group: gettext('Like') }] + },{ + id: 'partition_type', label:gettext('Partition Type'), + editable: false, type: 'select2', select2: {allowClear: false}, + group: 'partition', deps: ['is_partitioned'], + options:[{ + label: 'Range', value: 'range' + },{ + label: 'List', value: 'list' + }], + mode:['create'], + visible: function(m) { + if(!_.isUndefined(m.node_info) && !_.isUndefined(m.node_info.server) + && !_.isUndefined(m.node_info.server.version) && + m.node_info.server.version >= 100000) + return true; + + return false; + }, + disabled: function(m) { + if (!m.isNew() || !m.get('is_partitioned')) + return true; + return false; + } + },{ + id: 'partition_keys', label:gettext('Partition Keys'), + model: Backform.PartitionKeyModel, + subnode: Backform.PartitionKeyModel, + editable: true, type: 'collection', + group: 'partition', mode: ['create'], + deps: ['is_partitioned', 'partition_type'], + canEdit: false, canDelete: true, + control: 'sub-node-collection', + canAdd: function(m) { + if (m.isNew() && m.get('is_partitioned')) + return true; + return false; + }, + canAddRow: function(m) { + var columns = m.get('columns'); + var max_row_count = 1000; + + if (m.get('partition_type') && m.get('partition_type') == 'list') + max_row_count = 1; + + return (m.get('partition_keys') && + m.get('partition_keys').length < max_row_count && + _.some(columns.pluck('name')) + ); + }, + visible: function(m) { + if(!_.isUndefined(m.node_info) && !_.isUndefined(m.node_info.server) + && !_.isUndefined(m.node_info.server.version) && + m.node_info.server.version >= 100000) + return true; + + return false; + }, + disabled: function(m) { + if (m.get('partition_keys') && m.get('partition_keys').models.length > 0) { + setTimeout(function () { + var coll = m.get('partition_keys'); + coll.remove(coll.filter(function(model) { + return true; + })); + }, 10); + } + } + },{ + id: 'partition_scheme', label: gettext('Partition Scheme'), + type: 'note', group: 'partition', mode: ['edit'], + visible: function(m) { + if(!_.isUndefined(m.node_info) && !_.isUndefined(m.node_info.server) + && !_.isUndefined(m.node_info.server.version) && + m.node_info.server.version >= 100000) + return true; + + return false; + }, + disabled: function(m) { + if (!m.isNew()) { + this.text = m.get('partition_scheme'); + } + } + },{ + id: 'partition_key_note', label: gettext('Partition Keys'), + type: 'note', group: 'partition', mode: ['create'], + text: [ + '
  ', + gettext('Partition table supports two types of keys:'), + '
  • ', + gettext('Column: User can select any column from the list of available columns.'), + '
  • ', + gettext('Expression: User can specify expression to create partition key.'), + '

    ', + gettext('Example'), + ':', + gettext("Let's say, we want to create a partition table based per year for the column 'saledate', having datatype 'date/timestamp', then we need to specify the expression as 'extract(YEAR from saledate)' as partition key."), + '

' + ].join(''), + visible: function(m) { + if(!_.isUndefined(m.node_info) && !_.isUndefined(m.node_info.server) + && !_.isUndefined(m.node_info.server.version) && + m.node_info.server.version >= 100000) + return true; + + return false; + } + }, { + id: 'partitions', label:gettext('Partitions'), + model: Backform.PartitionsModel, + subnode: Backform.PartitionsModel, + editable: true, type: 'collection', + group: 'partition', mode: ['edit', 'create'], + deps: ['is_partitioned', 'partition_type'], + canEdit: false, canDelete: true, + customDeleteTitle: gettext('Detach Partition'), + customDeleteMsg: gettext('Are you sure you wish to detach this partition?'), + columns:['is_attach', 'partition_name', 'values_from', 'values_to', 'values_in'], + control: Backform.SubNodeCollectionControl.extend({ + row: Backgrid.PartitionRow, + initialize: function() { + Backform.SubNodeCollectionControl.prototype.initialize.apply(this, arguments); + var self = this; + if (!this.model.isNew()) { + var node = this.field.get('schema_node'), + node_info = this.field.get('node_info'); + + // Make ajax call to get the tables to be attached + $.ajax({ + url: node.generate_url.apply( + node, [ + null, 'get_attach_tables', this.field.get('node_data'), + true, node_info + ]), + + type: 'GET', + async: false, + success: function(res) { + if (res.success == 1) { + self.model.table_options = res.data; + } + else { + alertify.alert( + 'Error fetching tables to be attached', res.data.result + ); + } + }, + error: function(e) { + var errmsg = $.parseJSON(e.responseText); + alertify.alert('Error fetching tables to be attached.', errmsg.errormsg); + } + }); + } + } + } + ), + canAdd: function(m) { + if (m.get('is_partitioned')) + return true; + return false; + }, + visible: function(m) { + if(!_.isUndefined(m.node_info) && !_.isUndefined(m.node_info.server) + && !_.isUndefined(m.node_info.server.version) && + m.node_info.server.version >= 100000) + return true; + + return false; + }, + disabled: function(m) { + if (m.isNew() && m.get('partitions') && m.get('partitions').models.length > 0) { + setTimeout(function () { + var coll = m.get('partitions'); + coll.remove(coll.filter(function(model) { + return true; + })); + }, 10); + } + } + },{ + id: 'partition_note', label: gettext('Partitions'), + type: 'note', group: 'partition', + text: [ + '
    ', + '
  • ', + gettext('Create a table: User can create multiple partitions while creating new partitioned table. Operation switch is disabled in this scenario.'), + '
  • ', + gettext('Edit existing table: User can create/attach/detach multiple partitions. In attach operation user can select table from the list of suitable tables to be attached.'), + '
  • ', + gettext('From/To/In input: Values for these fields must be quoted with single quote. For more than one partition key values must be comma(,) separated.'), + '
    ', + gettext('Example'), + ':
    • ', + gettext("From/To: Enabled for range partition. Consider partitioned table with multiple keys of type Integer, then values should be specified like '100','200'."), + '
    • ', + gettext('In: Enabled for list partition. Values must be comma(,) separated and quoted with single quote.'), + '
' + ].join(''), + visible: function(m) { + if(!_.isUndefined(m.node_info) && !_.isUndefined(m.node_info.server) + && !_.isUndefined(m.node_info.server.version) && + m.node_info.server.version >= 100000) + return true; + + return false; + } },{ // Here - we will create tab control for storage parameters // (auto vacuum). type: 'nested', control: 'tab', group: gettext('Parameter'), - mode: ['edit', 'create'], + mode: ['edit', 'create'], deps: ['is_partitioned'], schema: Backform.VacuumSettingsSchema },{ id: 'relacl_str', label: gettext('Privileges'), disabled: 'inSchema', @@ -741,35 +1048,46 @@ define('pgadmin.node.table', [ msg = undefined, name = this.get('name'), schema = this.get('schema'), - relowner = this.get('relowner'); + relowner = this.get('relowner'), + is_partitioned = this.get('is_partitioned'), + partition_keys = this.get('partition_keys'); - // If nothing to validate or VacuumSetting keys then - // return from here - if ( keys && (keys.length == 0 - || _.indexOf(keys, 'autovacuum_enabled') != -1 - || _.indexOf(keys, 'toast_autovacuum_enabled') != -1) ) { - return null; - } - - // Have to clear existing validation before initiating current state validation only - this.errorModel.clear(); - - if (_.isUndefined(name) || _.isNull(name) || - String(name).replace(/^\s+|\s+$/g, '') == '') { + if ( + _.isUndefined(name) || _.isNull(name) || + String(name).replace(/^\s+|\s+$/g, '') == '' + ) { msg = gettext('Table name cannot be empty.'); this.errorModel.set('name', msg); return msg; - } else if (_.isUndefined(schema) || _.isNull(schema) || - String(schema).replace(/^\s+|\s+$/g, '') == '') { + } + this.errorModel.unset('name'); + if ( + _.isUndefined(schema) || _.isNull(schema) || + String(schema).replace(/^\s+|\s+$/g, '') == '' + ) { msg = gettext('Table schema cannot be empty.'); this.errorModel.set('schema', msg); return msg; - } else if (_.isUndefined(relowner) || _.isNull(relowner) || - String(relowner).replace(/^\s+|\s+$/g, '') == '') { + } + this.errorModel.unset('schema'); + if ( + _.isUndefined(relowner) || _.isNull(relowner) || + String(relowner).replace(/^\s+|\s+$/g, '') == '' + ) { msg = gettext('Table owner cannot be empty.'); this.errorModel.set('relowner', msg); return msg; } + this.errorModel.unset('relowner'); + if ( + is_partitioned && this.isNew() && + !_.isNull(partition_keys) && partition_keys.length <= 0 + ) { + msg = gettext('Please specify at least one key for partitioned table.'); + this.errorModel.set('partition_keys', msg); + return msg; + } + this.errorModel.unset('partition_keys'); return null; }, // We will disable everything if we are under catalog node @@ -797,7 +1115,15 @@ define('pgadmin.node.table', [ }, // Oftype is defined? checkInheritance: function(m) { - // coll_inherits || typname + // Disabled if it is partitioned table + if (m.get('is_partitioned')) { + setTimeout( function() { + m.set('coll_inherits', []); + }, 10); + return true; + } + + // coll_inherits || typname if(!m.inSchema.apply(this, [m]) && ( _.isUndefined(m.get('typname')) || _.isNull(m.get('typname')) || @@ -841,7 +1167,15 @@ define('pgadmin.node.table', [ }, // We will disable it if Inheritance is defined checkOfType: function(m) { - //coll_inherits || typname + // Disabled if it is partitioned table + if (m.get('is_partitioned')) { + setTimeout( function() { + m.set('typname', undefined); + }, 10); + return true; + } + + //coll_inherits || typname if(!m.inSchemaWithModelCheck.apply(this, [m]) && (_.isUndefined(m.get('coll_inherits')) || _.isNull(m.get('coll_inherits')) || @@ -967,6 +1301,116 @@ define('pgadmin.node.table', [ return false; } } + }, + onTableUpdated: function(_node, _oldNodeData, _newNodeData) { + if ( + _newNodeData.is_partitioned && 'affected_partitions' in _newNodeData + ) { + var partitions = _newNodeData.affected_partitions, + idx, node_info, self = this, + newPartitionsIDs = [], + insertChildTreeNodes = [], + insertChildrenNodes = function() { + if (!insertChildTreeNodes.length) + return; + var option = insertChildTreeNodes.pop(); + pgBrowser.addChildTreeNodes( + option.treeHierarchy, option.parent, option.type, + option.childrenIDs, insertChildrenNodes + ); + }; + + if ('detached' in partitions && partitions.detached.length > 0) { + // Remove it from the partition collections node first + pgBrowser.removeChildTreeNodesById( + _node, 'coll-partition', _.map( + partitions.detached, function(_d) { return parseInt(_d.oid); } + ) + ); + + var schemaNode = pgBrowser.findParentTreeNodeByType( + _node, 'schema' + ), + detachedBySchema = _.groupBy( + partitions.detached, + function(_d) { return parseInt(_d.schema_id); } + ), childIDs; + + for (var key in detachedBySchema) { + schemaNode = pgBrowser.findSiblingTreeNode(schemaNode, key); + + if (schemaNode) { + childIDs = _.map( + detachedBySchema[key], + function(_d) { return parseInt(_d.oid); } + ); + + var tablesCollNode = pgBrowser.findChildCollectionTreeNode( + schemaNode, 'coll-table' + ); + + if (tablesCollNode) { + insertChildTreeNodes.push({ + 'parent': tablesCollNode, + 'type': 'table', + 'treeHierarchy': pgAdmin.Browser.Nodes.schema.getTreeNodeHierarchy(schemaNode), + 'childrenIDs': _.clone(childIDs) + }); + } + } + } + } + + if ('attached' in partitions && partitions.attached.length > 0) { + var schemaNode = pgBrowser.findParentTreeNodeByType( + _node, 'schema' + ), + attachedBySchema = _.groupBy( + partitions.attached, + function(_d) { return parseInt(_d.schema_id); } + ), childIDs; + + for (var key in attachedBySchema) { + schemaNode = pgBrowser.findSiblingTreeNode(schemaNode, key); + + if (schemaNode) { + childIDs = _.map( + attachedBySchema[key], + function(_d) { return parseInt(_d.oid); } + ); + // Remove it from the table collections node first + pgBrowser.removeChildTreeNodesById( + schemaNode, 'coll-table', childIDs + ); + } + newPartitionsIDs = newPartitionsIDs.concat(childIDs); + } + } + + if ('created' in partitions && partitions.created.length > 0) { + _.each(partitions.created, function(_data) { + newPartitionsIDs.push(_data.oid); + }); + } + + if (newPartitionsIDs.length) { + node_info = self.getTreeNodeHierarchy(_node); + + var partitionsCollNode = pgBrowser.findChildCollectionTreeNode( + _node, 'coll-partition' + ); + + if (partitionsCollNode) { + insertChildTreeNodes.push({ + 'parent': partitionsCollNode, + 'type': 'partition', + 'treeHierarchy': self.getTreeNodeHierarchy(_node), + 'childrenIDs': newPartitionsIDs + }); + } + } + insertChildrenNodes(); + } } }); } diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/attach.sql b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/attach.sql new file mode 100644 index 000000000..0661d9cfe --- /dev/null +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/attach.sql @@ -0,0 +1,2 @@ +ALTER TABLE {{conn|qtIdent(data.parent_schema, data.partitioned_table_name)}} ATTACH PARTITION {{conn|qtIdent(data.schema, data.name)}} + {{ data.partition_value }}; diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/backend_support.sql b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/backend_support.sql new file mode 100644 index 000000000..5fa1d7e5b --- /dev/null +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/backend_support.sql @@ -0,0 +1,9 @@ +{#=============Checks if it is partitioned table========#} +{% if tid %} +SELECT + CASE WHEN c.relkind = 'p' THEN True ELSE False END As ptable +FROM + pg_class c +WHERE + c.oid = {{ tid }}::oid +{% endif %} diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/create.sql b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/create.sql new file mode 100644 index 000000000..b5e9404e4 --- /dev/null +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/create.sql @@ -0,0 +1,30 @@ +{% import 'table/sql/macros/constraints.macro' as CONSTRAINTS %} +{#===========================================#} +{#====== MAIN TABLE TEMPLATE STARTS HERE ======#} +{#===========================================#} +{### CREATE TABLE STATEMENT FOR partitions ###} +CREATE {% if data.relpersistence %}UNLOGGED {% endif %}TABLE {{conn|qtIdent(data.schema, data.name)}}{% if data.relispartition is defined and data.relispartition %} PARTITION OF {{conn|qtIdent(data.parent_schema, data.partitioned_table_name)}}{% endif %} + +{# Macro to render for constraints #} +{% if data.primary_key|length > 0 or data.unique_constraint|length > 0 or data.foreign_key|length > 0 or data.check_constraint|length > 0 or data.exclude_constraint|length > 0 %} +( {% endif %} +{% if data.primary_key|length > 0 %}{{CONSTRAINTS.PRIMARY_KEY(conn, data.primary_key[0])}}{% endif %}{% if data.unique_constraint|length > 0 %}{% if data.primary_key|length > 0 %},{% endif %} +{{CONSTRAINTS.UNIQUE(conn, data.unique_constraint)}}{% endif %}{% if data.foreign_key|length > 0 %}{% if data.primary_key|length > 0 or data.unique_constraint|length > 0 %},{% endif %} +{{CONSTRAINTS.FOREIGN_KEY(conn, data.foreign_key)}}{% endif %}{% if data.check_constraint|length > 0 %}{% if data.primary_key|length > 0 or data.unique_constraint|length > 0 or data.foreign_key|length > 0 %},{% endif %} +{{CONSTRAINTS.CHECK(conn, data.check_constraint)}}{% endif %}{% if data.exclude_constraint|length > 0 %}{% if data.primary_key|length > 0 or data.unique_constraint|length > 0 or data.foreign_key|length > 0 or data.check_constraint|length > 0 %},{% endif %} +{{CONSTRAINTS.EXCLUDE(conn, data.exclude_constraint)}}{% endif %} +{% if data.primary_key|length > 0 or data.unique_constraint|length > 0 or data.foreign_key|length > 0 or data.check_constraint|length > 0 or data.exclude_constraint|length > 0 %} + +) +{% endif %} + {{ data.partition_value }}{% if data.is_partitioned is defined and data.is_partitioned %} + + PARTITION BY {{ data.partition_scheme }}{% endif %}; + + +{### Alter SQL for Owner ###} +{% if data.relowner %} + +ALTER TABLE {{conn|qtIdent(data.schema, data.name)}} + OWNER to {{conn|qtIdent(data.relowner)}}; +{% endif %} diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/detach.sql b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/detach.sql new file mode 100644 index 000000000..4b413db1a --- /dev/null +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/detach.sql @@ -0,0 +1 @@ +ALTER TABLE {{conn|qtIdent(data.parent_schema, data.partitioned_table_name)}} DETACH PARTITION {{conn|qtIdent(data.schema, data.name)}}; diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/get_attach_tables.sql b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/get_attach_tables.sql new file mode 100644 index 000000000..476dc3fc6 --- /dev/null +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/get_attach_tables.sql @@ -0,0 +1,23 @@ +SELECT oid, quote_ident(nspname)||'.'||quote_ident(relname) AS table_name FROM +(SELECT + r.oid, r.relname, n.nspname, array_agg(a.attname) attnames, array_agg(a.atttypid) atttypes +FROM + (SELECT oid, relname, relnamespace FROM pg_catalog.pg_class + WHERE relkind in ('r', 'p') AND NOT relispartition) r + JOIN (SELECT oid AS nspoid, nspname FROM + pg_catalog.pg_namespace WHERE nspname NOT LIKE E'pg\_%') n + ON (r.relnamespace = n.nspoid) + JOIN (SELECT attrelid, attname, atttypid FROM + pg_catalog.pg_attribute WHERE attnum > 0 ORDER BY attrelid, attnum) a + ON (r.oid = a.attrelid) +GROUP BY r.oid, r.relname, r.relnamespace, n.nspname) all_tables +JOIN +(SELECT + attrelid, array_agg(attname) attnames, array_agg(atttypid) atttypes +FROM + (SELECT * FROM pg_catalog.pg_attribute + WHERE attrelid = {{ tid }} AND attnum > 0 + ORDER BY attrelid, attnum) attributes +GROUP BY attrelid) current_table ON current_table.attrelid != all_tables.oid + AND current_table.attnames = all_tables.attnames + AND current_table.atttypes = all_tables.atttypes \ No newline at end of file diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/nodes.sql b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/nodes.sql new file mode 100644 index 000000000..bd828cd7d --- /dev/null +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/nodes.sql @@ -0,0 +1,15 @@ +SELECT rel.oid, rel.relname AS name, + (SELECT count(*) FROM pg_trigger WHERE tgrelid=rel.oid AND tgisinternal = FALSE) AS triggercount, + (SELECT count(*) FROM pg_trigger WHERE tgrelid=rel.oid AND tgisinternal = FALSE AND tgenabled = 'O') AS has_enable_triggers, + pg_get_expr(rel.relpartbound, rel.oid) AS partition_value, + rel.relnamespace AS schema_id, + nsp.nspname AS schema_name, + (CASE WHEN rel.relkind = 'p' THEN true ELSE false END) AS is_partitioned, + (CASE WHEN rel.relkind = 'p' THEN pg_get_partkeydef(rel.oid::oid) ELSE '' END) AS partition_scheme +FROM + (SELECT * FROM pg_inherits WHERE inhparent = {{ tid }}::oid) inh + LEFT JOIN pg_class rel ON inh.inhrelid = rel.oid + LEFT JOIN pg_namespace nsp ON rel.relnamespace = nsp.oid + WHERE rel.relispartition + {% if ptid %} AND rel.oid = {{ ptid }}::OID {% endif %} + ORDER BY rel.relname; diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/properties.sql b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/properties.sql new file mode 100644 index 000000000..85f4476eb --- /dev/null +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/partition/sql/10_plus/properties.sql @@ -0,0 +1,82 @@ +SELECT rel.oid, rel.relname AS name, rel.reltablespace AS spcoid,rel.relacl AS relacl_str, + (CASE WHEN length(spc.spcname) > 0 THEN spc.spcname ELSE + (SELECT sp.spcname FROM pg_database dtb + JOIN pg_tablespace sp ON dtb.dattablespace=sp.oid + WHERE dtb.oid = {{ did }}::oid) + END) as spcname, + (select nspname FROM pg_namespace WHERE oid = {{scid}}::oid ) as parent_schema, + nsp.nspname as schema, + pg_get_userbyid(rel.relowner) AS relowner, rel.relhasoids, rel.relispartition, + rel.relhassubclass, rel.reltuples, des.description, con.conname, con.conkey, + EXISTS(select 1 FROM pg_trigger + JOIN pg_proc pt ON pt.oid=tgfoid AND pt.proname='logtrigger' + JOIN pg_proc pc ON pc.pronamespace=pt.pronamespace AND pc.proname='slonyversion' + WHERE tgrelid=rel.oid) AS isrepl, + (SELECT count(*) FROM pg_trigger WHERE tgrelid=rel.oid AND tgisinternal = FALSE) AS triggercount, + (SELECT ARRAY(SELECT CASE WHEN (nspname NOT LIKE E'pg\_%') THEN + quote_ident(nspname)||'.'||quote_ident(c.relname) + ELSE quote_ident(c.relname) END AS inherited_tables + FROM pg_inherits i + JOIN pg_class c ON c.oid = i.inhparent + JOIN pg_namespace n ON n.oid=c.relnamespace + WHERE i.inhrelid = rel.oid ORDER BY inhseqno)) AS coll_inherits, + (SELECT count(*) + FROM pg_inherits i + JOIN pg_class c ON c.oid = i.inhparent + JOIN pg_namespace n ON n.oid=c.relnamespace + WHERE i.inhrelid = rel.oid) AS inherited_tables_cnt, + (CASE WHEN rel.relpersistence = 'u' THEN true ELSE false END) AS relpersistence, + substring(array_to_string(rel.reloptions, ',') FROM 'fillfactor=([0-9]*)') AS fillfactor, + (CASE WHEN (substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_enabled=([a-z|0-9]*)') = 'true') + THEN true ELSE false END) AS autovacuum_enabled, + substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_vacuum_threshold=([0-9]*)') AS autovacuum_vacuum_threshold, + substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_vacuum_scale_factor=([0-9]*[.][0-9]*)') AS autovacuum_vacuum_scale_factor, + substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_analyze_threshold=([0-9]*)') AS autovacuum_analyze_threshold, + substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_analyze_scale_factor=([0-9]*[.][0-9]*)') AS autovacuum_analyze_scale_factor, + substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_vacuum_cost_delay=([0-9]*)') AS autovacuum_vacuum_cost_delay, + substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_vacuum_cost_limit=([0-9]*)') AS autovacuum_vacuum_cost_limit, + substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_freeze_min_age=([0-9]*)') AS autovacuum_freeze_min_age, + substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_freeze_max_age=([0-9]*)') AS autovacuum_freeze_max_age, + substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_freeze_table_age=([0-9]*)') AS autovacuum_freeze_table_age, + (CASE WHEN (substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_enabled=([a-z|0-9]*)') = 'true') + THEN true ELSE false END) AS toast_autovacuum_enabled, + substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_vacuum_threshold=([0-9]*)') AS toast_autovacuum_vacuum_threshold, + substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_vacuum_scale_factor=([0-9]*[.][0-9]*)') AS toast_autovacuum_vacuum_scale_factor, + substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_analyze_threshold=([0-9]*)') AS toast_autovacuum_analyze_threshold, + substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_analyze_scale_factor=([0-9]*[.][0-9]*)') AS toast_autovacuum_analyze_scale_factor, + substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_vacuum_cost_delay=([0-9]*)') AS toast_autovacuum_vacuum_cost_delay, + substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_vacuum_cost_limit=([0-9]*)') AS toast_autovacuum_vacuum_cost_limit, + substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_freeze_min_age=([0-9]*)') AS toast_autovacuum_freeze_min_age, + substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_freeze_max_age=([0-9]*)') AS toast_autovacuum_freeze_max_age, + substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_freeze_table_age=([0-9]*)') AS toast_autovacuum_freeze_table_age, + array_to_string(rel.reloptions, ',') AS table_vacuum_settings_str, + array_to_string(tst.reloptions, ',') AS toast_table_vacuum_settings_str, + rel.reloptions AS reloptions, tst.reloptions AS toast_reloptions, rel.reloftype, typ.typname, + (CASE WHEN rel.reltoastrelid = 0 THEN false ELSE true END) AS hastoasttable, + -- Added for pgAdmin4 + (CASE WHEN (substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_enabled=([a-z|0-9]*)'))::boolean THEN true ELSE false END) AS autovacuum_custom, + (CASE WHEN (substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_enabled=([a-z|0-9]*)'))::boolean AND rel.reltoastrelid != 0 THEN true ELSE false END) AS toast_autovacuum, + + (SELECT array_agg(provider || '=' || label) FROM pg_seclabels sl1 WHERE sl1.objoid=rel.oid AND sl1.objsubid=0) AS seclabels, + (CASE WHEN rel.oid <= {{ datlastsysoid}}::oid THEN true ElSE false END) AS is_sys_table, + -- Added for partition table + (CASE WHEN rel.relkind = 'p' THEN true ELSE false END) AS is_partitioned, + (CASE WHEN rel.relkind = 'p' THEN pg_get_partkeydef(rel.oid::oid) ELSE '' END) AS partition_scheme, + {% if ptid %} + (CASE WHEN rel.relispartition THEN pg_get_expr(rel.relpartbound, {{ ptid }}::oid) ELSE '' END) AS partition_value, + (SELECT relname FROM pg_class WHERE oid = {{ tid }}::oid) AS partitioned_table_name + {% else %} + pg_get_expr(rel.relpartbound, rel.oid) AS partition_value + {% endif %} + +FROM pg_class rel + LEFT OUTER JOIN pg_tablespace spc on spc.oid=rel.reltablespace + LEFT OUTER JOIN pg_description des ON (des.objoid=rel.oid AND des.objsubid=0 AND des.classoid='pg_class'::regclass) + LEFT OUTER JOIN pg_constraint con ON con.conrelid=rel.oid AND con.contype='p' + LEFT OUTER JOIN pg_class tst ON tst.oid = rel.reltoastrelid + LEFT JOIN pg_type typ ON rel.reloftype=typ.oid + LEFT JOIN pg_inherits inh ON inh.inhrelid = rel.oid + LEFT JOIN pg_namespace nsp ON rel.relnamespace = nsp.oid +WHERE rel.relispartition AND inh.inhparent = {{ tid }}::oid +{% if ptid %} AND rel.oid = {{ ptid }}::oid {% endif %} +ORDER BY rel.relname; diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/acl.sql b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/acl.sql new file mode 100644 index 000000000..eadf9d58a --- /dev/null +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/acl.sql @@ -0,0 +1,46 @@ +{### SQL to fetch privileges for tablespace ###} +SELECT 'relacl' as deftype, COALESCE(gt.rolname, 'PUBLIC') grantee, g.rolname grantor, + array_agg(privilege_type) as privileges, array_agg(is_grantable) as grantable +FROM + (SELECT + d.grantee, d.grantor, d.is_grantable, + CASE d.privilege_type + WHEN 'CONNECT' THEN 'c' + WHEN 'CREATE' THEN 'C' + WHEN 'DELETE' THEN 'd' + WHEN 'EXECUTE' THEN 'X' + WHEN 'INSERT' THEN 'a' + WHEN 'REFERENCES' THEN 'x' + WHEN 'SELECT' THEN 'r' + WHEN 'TEMPORARY' THEN 'T' + WHEN 'TRIGGER' THEN 't' + WHEN 'TRUNCATE' THEN 'D' + WHEN 'UPDATE' THEN 'w' + WHEN 'USAGE' THEN 'U' + ELSE 'UNKNOWN' + END AS privilege_type + FROM + (SELECT rel.relacl + FROM pg_class rel + LEFT OUTER JOIN pg_tablespace spc on spc.oid=rel.reltablespace + LEFT OUTER JOIN pg_constraint con ON con.conrelid=rel.oid AND con.contype='p' + LEFT OUTER JOIN pg_class tst ON tst.oid = rel.reltoastrelid + LEFT JOIN pg_type typ ON rel.reloftype=typ.oid + WHERE rel.relkind IN ('r','s','t','p') AND rel.relnamespace = {{ scid }}::oid + AND rel.oid = {{ tid }}::oid + ) acl, + (SELECT (d).grantee AS grantee, (d).grantor AS grantor, (d).is_grantable + AS is_grantable, (d).privilege_type AS privilege_type FROM (SELECT + aclexplode(rel.relacl) as d + FROM pg_class rel + LEFT OUTER JOIN pg_tablespace spc on spc.oid=rel.reltablespace + LEFT OUTER JOIN pg_constraint con ON con.conrelid=rel.oid AND con.contype='p' + LEFT OUTER JOIN pg_class tst ON tst.oid = rel.reltoastrelid + LEFT JOIN pg_type typ ON rel.reloftype=typ.oid + WHERE rel.relkind IN ('r','s','t','p') AND rel.relnamespace = {{ scid }}::oid + AND rel.oid = {{ tid }}::oid + ) a) d + ) d + LEFT JOIN pg_catalog.pg_roles g ON (d.grantor = g.oid) + LEFT JOIN pg_catalog.pg_roles gt ON (d.grantee = gt.oid) +GROUP BY g.rolname, gt.rolname diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/get_inherits.sql b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/get_inherits.sql new file mode 100644 index 000000000..f28f29882 --- /dev/null +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/get_inherits.sql @@ -0,0 +1,17 @@ +{% import 'table/sql/macros/db_catalogs.macro' as CATALOG %} +SELECT c.oid, c.relname , nspname, +CASE WHEN nspname NOT LIKE E'pg\_%' THEN + quote_ident(nspname)||'.'||quote_ident(c.relname) +ELSE quote_ident(c.relname) +END AS inherits +FROM pg_class c +JOIN pg_namespace n +ON n.oid=c.relnamespace +WHERE relkind='r' AND NOT relispartition +{% if not show_system_objects %} +{{ CATALOG.VALID_CATALOGS(server_type) }} +{% endif %} +{% if tid %} +AND c.oid != tid +{% endif %} +ORDER BY relnamespace, c.relname \ No newline at end of file diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/get_oid.sql b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/get_oid.sql new file mode 100644 index 000000000..f63069814 --- /dev/null +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/get_oid.sql @@ -0,0 +1,5 @@ +SELECT rel.oid as tid +FROM pg_class rel +WHERE rel.relkind IN ('r','s','t','p') +AND rel.relnamespace = {{ scid }}::oid +AND rel.relname = {{data.name|qtLiteral}} \ No newline at end of file diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/get_table.sql b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/get_table.sql new file mode 100644 index 000000000..6952c8d85 --- /dev/null +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/get_table.sql @@ -0,0 +1,8 @@ +SELECT + rel.relname AS name +FROM + pg_class rel +WHERE + rel.relkind IN ('r','s','t','p') + AND rel.relnamespace = {{ scid }}::oid + AND rel.oid = {{ tid }}::oid; \ No newline at end of file diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/nodes.sql b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/nodes.sql new file mode 100644 index 000000000..fbf9c42ca --- /dev/null +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/nodes.sql @@ -0,0 +1,9 @@ +SELECT rel.oid, rel.relname AS name, + (SELECT count(*) FROM pg_trigger WHERE tgrelid=rel.oid AND tgisinternal = FALSE) AS triggercount, + (SELECT count(*) FROM pg_trigger WHERE tgrelid=rel.oid AND tgisinternal = FALSE AND tgenabled = 'O') AS has_enable_triggers, + (CASE WHEN rel.relkind = 'p' THEN true ELSE false END) AS is_partitioned +FROM pg_class rel + WHERE rel.relkind IN ('r','s','t','p') AND rel.relnamespace = {{ scid }}::oid + AND NOT rel.relispartition + {% if tid %} AND rel.oid = {{tid}}::OID {% endif %} + ORDER BY rel.relname; diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/properties.sql b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/properties.sql new file mode 100644 index 000000000..1d589df62 --- /dev/null +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/10_plus/properties.sql @@ -0,0 +1,73 @@ +SELECT rel.oid, rel.relname AS name, rel.reltablespace AS spcoid,rel.relacl AS relacl_str, + (CASE WHEN length(spc.spcname) > 0 THEN spc.spcname ELSE + (SELECT sp.spcname FROM pg_database dtb + JOIN pg_tablespace sp ON dtb.dattablespace=sp.oid + WHERE dtb.oid = {{ did }}::oid) + END) as spcname, + (select nspname FROM pg_namespace WHERE oid = {{scid}}::oid ) as schema, + pg_get_userbyid(rel.relowner) AS relowner, rel.relhasoids, rel.relkind, + (CASE WHEN rel.relkind = 'p' THEN true ELSE false END) AS is_partitioned, + rel.relhassubclass, rel.reltuples, des.description, con.conname, con.conkey, + EXISTS(select 1 FROM pg_trigger + JOIN pg_proc pt ON pt.oid=tgfoid AND pt.proname='logtrigger' + JOIN pg_proc pc ON pc.pronamespace=pt.pronamespace AND pc.proname='slonyversion' + WHERE tgrelid=rel.oid) AS isrepl, + (SELECT count(*) FROM pg_trigger WHERE tgrelid=rel.oid AND tgisinternal = FALSE) AS triggercount, + (SELECT ARRAY(SELECT CASE WHEN (nspname NOT LIKE E'pg\_%') THEN + quote_ident(nspname)||'.'||quote_ident(c.relname) + ELSE quote_ident(c.relname) END AS inherited_tables + FROM pg_inherits i + JOIN pg_class c ON c.oid = i.inhparent + JOIN pg_namespace n ON n.oid=c.relnamespace + WHERE i.inhrelid = rel.oid ORDER BY inhseqno)) AS coll_inherits, + (SELECT count(*) + FROM pg_inherits i + JOIN pg_class c ON c.oid = i.inhparent + JOIN pg_namespace n ON n.oid=c.relnamespace + WHERE i.inhrelid = rel.oid) AS inherited_tables_cnt, + (CASE WHEN rel.relpersistence = 'u' THEN true ELSE false END) AS relpersistence, + substring(array_to_string(rel.reloptions, ',') FROM 'fillfactor=([0-9]*)') AS fillfactor, + (CASE WHEN (substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_enabled=([a-z|0-9]*)') = 'true') + THEN true ELSE false END) AS autovacuum_enabled, + substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_vacuum_threshold=([0-9]*)') AS autovacuum_vacuum_threshold, + substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_vacuum_scale_factor=([0-9]*[.][0-9]*)') AS autovacuum_vacuum_scale_factor, + substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_analyze_threshold=([0-9]*)') AS autovacuum_analyze_threshold, + substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_analyze_scale_factor=([0-9]*[.][0-9]*)') AS autovacuum_analyze_scale_factor, + substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_vacuum_cost_delay=([0-9]*)') AS autovacuum_vacuum_cost_delay, + substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_vacuum_cost_limit=([0-9]*)') AS autovacuum_vacuum_cost_limit, + substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_freeze_min_age=([0-9]*)') AS autovacuum_freeze_min_age, + substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_freeze_max_age=([0-9]*)') AS autovacuum_freeze_max_age, + substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_freeze_table_age=([0-9]*)') AS autovacuum_freeze_table_age, + (CASE WHEN (substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_enabled=([a-z|0-9]*)') = 'true') + THEN true ELSE false END) AS toast_autovacuum_enabled, + substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_vacuum_threshold=([0-9]*)') AS toast_autovacuum_vacuum_threshold, + substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_vacuum_scale_factor=([0-9]*[.][0-9]*)') AS toast_autovacuum_vacuum_scale_factor, + substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_analyze_threshold=([0-9]*)') AS toast_autovacuum_analyze_threshold, + substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_analyze_scale_factor=([0-9]*[.][0-9]*)') AS toast_autovacuum_analyze_scale_factor, + substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_vacuum_cost_delay=([0-9]*)') AS toast_autovacuum_vacuum_cost_delay, + substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_vacuum_cost_limit=([0-9]*)') AS toast_autovacuum_vacuum_cost_limit, + substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_freeze_min_age=([0-9]*)') AS toast_autovacuum_freeze_min_age, + substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_freeze_max_age=([0-9]*)') AS toast_autovacuum_freeze_max_age, + substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_freeze_table_age=([0-9]*)') AS toast_autovacuum_freeze_table_age, + array_to_string(rel.reloptions, ',') AS table_vacuum_settings_str, + array_to_string(tst.reloptions, ',') AS toast_table_vacuum_settings_str, + rel.reloptions AS reloptions, tst.reloptions AS toast_reloptions, rel.reloftype, typ.typname, + (CASE WHEN rel.reltoastrelid = 0 THEN false ELSE true END) AS hastoasttable, + -- Added for pgAdmin4 + (CASE WHEN (substring(array_to_string(rel.reloptions, ',') FROM 'autovacuum_enabled=([a-z|0-9]*)'))::boolean THEN true ELSE false END) AS autovacuum_custom, + (CASE WHEN (substring(array_to_string(tst.reloptions, ',') FROM 'autovacuum_enabled=([a-z|0-9]*)'))::boolean AND rel.reltoastrelid != 0 THEN true ELSE false END) AS toast_autovacuum, + + (SELECT array_agg(provider || '=' || label) FROM pg_seclabels sl1 WHERE sl1.objoid=rel.oid AND sl1.objsubid=0) AS seclabels, + (CASE WHEN rel.oid <= {{ datlastsysoid}}::oid THEN true ElSE false END) AS is_sys_table + -- Added for partition table + {% if tid %}, (CASE WHEN rel.relkind = 'p' THEN pg_get_partkeydef({{ tid }}::oid) ELSE '' END) AS partition_scheme {% endif %} +FROM pg_class rel + LEFT OUTER JOIN pg_tablespace spc on spc.oid=rel.reltablespace + LEFT OUTER JOIN pg_description des ON (des.objoid=rel.oid AND des.objsubid=0 AND des.classoid='pg_class'::regclass) + LEFT OUTER JOIN pg_constraint con ON con.conrelid=rel.oid AND con.contype='p' + LEFT OUTER JOIN pg_class tst ON tst.oid = rel.reltoastrelid + LEFT JOIN pg_type typ ON rel.reloftype=typ.oid +WHERE rel.relkind IN ('r','s','t','p') AND rel.relnamespace = {{ scid }}::oid +AND NOT rel.relispartition +{% if tid %} AND rel.oid = {{ tid }}::oid {% endif %} +ORDER BY rel.relname; diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/default/create.sql b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/default/create.sql index f8e858fbe..5ec8ffc2c 100644 --- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/default/create.sql +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/default/create.sql @@ -57,7 +57,8 @@ CREATE {% if data.relpersistence %}UNLOGGED {% endif %}TABLE {{conn|qtIdent(data {{CONSTRAINTS.EXCLUDE(conn, data.exclude_constraint)}}{% endif %} {% if data.like_relation or data.coll_inherits or data.columns|length > 0 or data.primary_key|length > 0 or data.unique_constraint|length > 0 or data.foreign_key|length > 0 or data.check_constraint|length > 0 or data.exclude_constraint|length > 0 %} -) +){% if data.relkind is defined and data.relkind == 'p' %} PARTITION BY {{ data.partition_scheme }} {% endif %} + {% endif %} {### If we are inheriting it from another table(s) ###} {% if data.coll_inherits %} diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/default/get_schema_oid.sql b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/default/get_schema_oid.sql index 4d329d213..8df84b578 100644 --- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/default/get_schema_oid.sql +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/templates/table/sql/default/get_schema_oid.sql @@ -1,8 +1,9 @@ {# ===== fetch new assigned schema oid ===== #} SELECT - c.relnamespace as scid + c.relnamespace as scid, nsp.nspname as nspname FROM pg_class c +LEFT JOIN pg_namespace nsp ON nsp.oid = c.relnamespace WHERE {% if tid %} c.oid = {{tid}}::oid; diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/tests/test_table_add.py b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/tests/test_table_add.py index 0703cba3f..9e4846c52 100644 --- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/tests/test_table_add.py +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/tests/test_table_add.py @@ -14,6 +14,7 @@ from pgadmin.browser.server_groups.servers.databases.schemas.tests import \ utils as schema_utils from pgadmin.browser.server_groups.servers.databases.tests import utils as \ database_utils +from pgadmin.browser.server_groups.servers.tests import utils as server_utils from pgadmin.utils.route import BaseTestGenerator from regression import parent_node_dict from regression.python_test_utils import test_utils as utils @@ -23,7 +24,19 @@ class TableAddTestCase(BaseTestGenerator): """ This class will add new collation under schema node. """ scenarios = [ # Fetching default URL for table node. - ('Fetch table Node URL', dict(url='/browser/table/obj/')) + ('Create Table', dict(url='/browser/table/obj/')), + ('Create Range partitioned table with 2 partitions', + dict(url='/browser/table/obj/', + server_min_version=100000, + partition_type='range' + ) + ), + ('Create List partitioned table with 2 partitions', + dict(url='/browser/table/obj/', + server_min_version=100000, + partition_type='list' + ) + ) ] def setUp(self): @@ -43,6 +56,19 @@ class TableAddTestCase(BaseTestGenerator): if not schema_response: raise Exception("Could not find the schema to add a table.") + self.is_partition = False + if hasattr(self, 'server_min_version'): + server_con = server_utils.connect_server(self, self.server_id) + if not server_con["info"] == "Server connected.": + raise Exception("Could not connect to server to add " + "partitioned table.") + if server_con["data"]["version"] < self.server_min_version: + message = "Partitioned table are not supported by " \ + "PPAS/PG 10.0 and below." + self.skipTest(message) + else: + self.is_partition = True + def runTest(self): """ This function will add table under schema node. """ db_user = self.server["username"] @@ -68,7 +94,7 @@ class TableAddTestCase(BaseTestGenerator): "seclabels": [] }, {"name": "DOJ", - "cltype": "date[]", + "cltype": "date", "attacl": [], "is_primary_key": False, "attoptions": [], @@ -76,7 +102,7 @@ class TableAddTestCase(BaseTestGenerator): } ], "exclude_constraint": [], - "fillfactor": "11", + "fillfactor": "", "hastoasttable": True, "like_constraints": True, "like_default_value": True, @@ -166,6 +192,35 @@ class TableAddTestCase(BaseTestGenerator): } ] } + + if self.is_partition: + data['partition_type'] = self.partition_type + data['is_partitioned'] = True + if self.partition_type == 'range': + data['partitions'] = \ + [{'values_from': "'2010-01-01'", + 'values_to': "'2010-12-31'", + 'is_attach': False, + 'partition_name': 'emp_2010' + }, + {'values_from': "'2011-01-01'", + 'values_to': "'2011-12-31'", + 'is_attach': False, + 'partition_name': 'emp_2011' + }] + else: + data['partitions'] = \ + [{'values_in': "'2012-01-01', '2012-12-31'", + 'is_attach': False, + 'partition_name': 'emp_2012' + }, + {'values_in': "'2013-01-01', '2013-12-31'", + 'is_attach': False, + 'partition_name': 'emp_2013' + }] + data['partition_keys'] = \ + [{'key_type': 'column', 'pt_column': 'DOJ'}] + # Add table response = self.tester.post( self.url + str(utils.SERVER_GROUP) + '/' + diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/tests/test_table_delete.py b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/tests/test_table_delete.py index 1b2f9f55a..9749fed6f 100644 --- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/tests/test_table_delete.py +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/tests/test_table_delete.py @@ -23,7 +23,7 @@ class TableDeleteTestCase(BaseTestGenerator): """This class will delete new table under schema node.""" scenarios = [ # Fetching default URL for table node. - ('Fetch table Node URL', dict(url='/browser/table/obj/')) + ('Delete Table', dict(url='/browser/table/obj/')) ] def setUp(self): diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/tests/test_table_put.py b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/tests/test_table_put.py index bfbf884f0..ef476d7d0 100644 --- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/tests/test_table_put.py +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/tests/test_table_put.py @@ -14,6 +14,7 @@ from pgadmin.browser.server_groups.servers.databases.schemas.tests import \ utils as schema_utils from pgadmin.browser.server_groups.servers.databases.tests import utils as \ database_utils +from pgadmin.browser.server_groups.servers.tests import utils as server_utils from pgadmin.utils.route import BaseTestGenerator from regression import parent_node_dict from regression.python_test_utils import test_utils as utils @@ -24,7 +25,49 @@ class TableUpdateTestCase(BaseTestGenerator): """This class will add new collation under schema node.""" scenarios = [ # Fetching default URL for table node. - ('Fetch table Node URL', dict(url='/browser/table/obj/')) + ('Update Table', dict(url='/browser/table/obj/')), + ('Create partitions of existing range partitioned table', + dict(url='/browser/table/obj/', + server_min_version=100000, + partition_type='range', + mode='create' + ) + ), + ('Create partitions of existing list partitioned table', + dict(url='/browser/table/obj/', + server_min_version=100000, + partition_type='list', + mode='create' + ) + ), + ('Detach partition from existing range partitioned table', + dict(url='/browser/table/obj/', + server_min_version=100000, + partition_type='range', + mode='detach' + ) + ), + ('Detach partition from existing list partitioned table', + dict(url='/browser/table/obj/', + server_min_version=100000, + partition_type='list', + mode='detach' + ) + ), + ('Attach partition to existing range partitioned table', + dict(url='/browser/table/obj/', + server_min_version=100000, + partition_type='range', + mode='attach' + ) + ), + ('Attach partition to existing list partitioned table', + dict(url='/browser/table/obj/', + server_min_version=100000, + partition_type='list', + mode='attach' + ) + ) ] def setUp(self): @@ -44,9 +87,31 @@ class TableUpdateTestCase(BaseTestGenerator): if not schema_response: raise Exception("Could not find the schema to add a table.") self.table_name = "test_table_put_%s" % (str(uuid.uuid4())[1:6]) - self.table_id = tables_utils.create_table(self.server, self.db_name, - self.schema_name, - self.table_name) + + self.is_partition = False + if hasattr(self, 'server_min_version'): + server_con = server_utils.connect_server(self, self.server_id) + if not server_con["info"] == "Server connected.": + raise Exception("Could not connect to server to add " + "partitioned table.") + if server_con["data"]["version"] < self.server_min_version: + message = "Partitioned table are not supported by " \ + "PPAS/PG 10.0 and below." + self.skipTest(message) + else: + self.is_partition = True + + self.table_id = tables_utils.create_table_for_partition( + self.server, + self.db_name, + self.schema_name, + self.table_name, + 'partitioned', + self.partition_type) + else: + self.table_id = tables_utils.create_table(self.server, self.db_name, + self.schema_name, + self.table_name) def runTest(self): """This function will fetch added table under schema node.""" @@ -54,10 +119,18 @@ class TableUpdateTestCase(BaseTestGenerator): self.table_id) if not table_response: raise Exception("Could not find the table to update.") - data = { - "description": "This is test comment for table", - "id": self.table_id - } + + if self.is_partition: + data = {"id": self.table_id} + tables_utils.set_partition_data( + self.server, self.db_name, self.schema_name, self.table_name, + self.partition_type, data, self.mode) + else: + data = { + "description": "This is test comment for table", + "id": self.table_id + } + response = self.tester.put( self.url + str(utils.SERVER_GROUP) + '/' + str(self.server_id) + '/' + str(self.db_id) + '/' + diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/tests/utils.py b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/tests/utils.py index 456f0202e..0c7aede27 100644 --- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/tests/utils.py +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/tests/utils.py @@ -85,3 +85,153 @@ def verify_table(server, db_name, table_id): except Exception: traceback.print_exc(file=sys.stderr) raise + + +def create_table_for_partition(server, db_name, schema_name, table_name, + table_type, partition_type, partition_name=None): + """ + This function creates partitioned/partition/regular table + under provided schema. + + :param server: server details + :param db_name: database name + :param schema_name: schema name + :param table_name: table name + :param table_type: regular/partitioned/partition + :param partition_type: partition table type (range/list) + :param partition_name: Partition Name + :return table_id: table id + """ + try: + connection = utils.get_db_connection(db_name, + server['username'], + server['db_password'], + server['host'], + server['port']) + old_isolation_level = connection.isolation_level + connection.set_isolation_level(0) + pg_cursor = connection.cursor() + + query = '' + if table_type == 'partitioned': + if partition_type == 'range': + query = "CREATE TABLE %s.%s(country text, sales bigint, " \ + "saledate date) PARTITION BY RANGE(saledate)" % \ + (schema_name, table_name) + else: + query = "CREATE TABLE %s.%s(country text, sales bigint, " \ + "saledate date) PARTITION BY LIST(saledate)" % \ + (schema_name, table_name) + elif table_type == 'partition': + if partition_type == 'range': + query = "CREATE TABLE %s.%s PARTITION OF %s.%s " \ + "FOR VALUES FROM ('2012-01-01') TO ('2012-12-31')" % \ + (schema_name, partition_name, schema_name, table_name) + else: + query = "CREATE TABLE %s.%s PARTITION OF %s.%s " \ + "FOR VALUES IN ('2013-01-01')" % \ + (schema_name, partition_name, schema_name, table_name) + + # To fetch OID table name is actually partition name + table_name = partition_name + elif table_type == 'regular': + query = "CREATE TABLE %s.%s(country text, sales bigint," \ + "saledate date NOT NULL)" % (schema_name, table_name) + + pg_cursor.execute(query) + connection.set_isolation_level(old_isolation_level) + connection.commit() + # Get 'oid' from newly created table + pg_cursor.execute("select oid from pg_class where relname='%s'" % + table_name) + table = pg_cursor.fetchone() + table_id = '' + if table: + table_id = table[0] + connection.close() + return table_id + except Exception: + traceback.print_exc(file=sys.stderr) + raise + + +def set_partition_data(server, db_name, schema_name, table_name, + partition_type, data, mode): + """ + This function is used to set the partitions data on the basis of + partition type and action. + + :param server: server details + :param db_name: Database Name + :param schema_name: Schema Name + :param table_name: Table Name + :param partition_type: range or list + :param data: Data + :param mode: create/detach + :return: + """ + + data['partitions'] = dict() + if partition_type == 'range' and mode == 'create': + data['partitions'].update( + {'added': [{'values_from': "'2014-01-01'", + 'values_to': "'2014-12-31'", + 'is_attach': False, + 'partition_name': 'sale_2014'}, + {'values_from': "'2015-01-01'", + 'values_to': "'2015-12-31'", + 'is_attach': False, + 'partition_name': 'sale_2015' + }] + } + ) + elif partition_type == 'list' and mode == 'create': + data['partitions'].update( + {'added': [{'values_in': "'2016-01-01', '2016-12-31'", + 'is_attach': False, + 'partition_name': 'sale_2016'}, + {'values_in': "'2017-01-01', '2017-12-31'", + 'is_attach': False, + 'partition_name': 'sale_2017' + }] + } + ) + elif partition_type == 'range' and mode == 'detach': + partition_id = create_table_for_partition(server, db_name, schema_name, + table_name, 'partition', + partition_type, 'sale_2012') + data['partitions'].update( + {'deleted': [{'oid': partition_id}] + } + ) + elif partition_type == 'list' and mode == 'detach': + partition_id = create_table_for_partition(server, db_name, schema_name, + table_name, 'partition', + partition_type, 'sale_2013') + data['partitions'].update( + {'deleted': [{'oid': partition_id}] + } + ) + elif partition_type == 'range' and mode == 'attach': + partition_id = create_table_for_partition(server, db_name, schema_name, + 'attach_sale_2010', 'regular', + partition_type) + data['partitions'].update( + {'added': [{'values_from': "'2010-01-01'", + 'values_to': "'2010-12-31'", + 'is_attach': True, + 'partition_name': partition_id + }] + } + ) + elif partition_type == 'list' and mode == 'attach': + partition_id = create_table_for_partition(server, db_name, schema_name, + 'attach_sale_2011', 'regular', + partition_type) + data['partitions'].update( + {'added': [{'values_in': "'2011-01-01'", + 'is_attach': True, + 'partition_name': partition_id + }] + } + ) diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/triggers/templates/trigger/js/trigger.js b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/triggers/templates/trigger/js/trigger.js index 1623fe547..87f1eb330 100644 --- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/triggers/templates/trigger/js/trigger.js +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/triggers/templates/trigger/js/trigger.js @@ -30,13 +30,15 @@ define('pgadmin.node.trigger', [ node: 'trigger', label: gettext('Triggers'), type: 'coll-trigger', + getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy, columns: ['name', 'description'] }); }; if (!pgBrowser.Nodes['trigger']) { - pgAdmin.Browser.Nodes['trigger'] = pgAdmin.Browser.Node.extend({ - parent_type: ['table', 'view'], + pgAdmin.Browser.Nodes['trigger'] = pgBrowser.Node.extend({ + getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy, + parent_type: ['table', 'view', 'partition'], collection_type: ['coll-table', 'coll-view'], type: 'trigger', label: gettext('Trigger'), @@ -71,6 +73,12 @@ define('pgadmin.node.trigger', [ category: 'create', priority: 4, label: gettext('Trigger...'), icon: 'wcTabIcon icon-trigger', data: {action: 'create', check: true}, enable: 'canCreate' + },{ + name: 'create_trigger_onPartition', node: 'partition', module: this, + applies: ['object', 'context'], callback: 'show_obj_properties', + category: 'create', priority: 4, label: gettext('Trigger...'), + icon: 'wcTabIcon icon-trigger', data: {action: 'create', check: true}, + enable: 'canCreate' },{ name: 'enable_trigger', node: 'trigger', module: this, applies: ['object', 'context'], callback: 'enable_trigger', @@ -206,6 +214,17 @@ define('pgadmin.node.trigger', [ mode: ['create','edit', 'properties'], deps: ['is_constraint_trigger'], disabled: function(m) { + // Disabled if table is a partitioned table. + if (_.has(m, 'node_info') && _.has(m.node_info, 'table') && + _.has(m.node_info.table, 'is_partitioned') && m.node_info.table.is_partitioned) + { + setTimeout(function(){ + m.set('is_row_trigger', false); + },10); + + return true; + } + // If constraint trigger is set to True then row trigger will // automatically set to True and becomes disable var is_constraint_trigger = m.get('is_constraint_trigger'); @@ -232,7 +251,19 @@ define('pgadmin.node.trigger', [ id: 'is_constraint_trigger', label: gettext('Constraint trigger?'), type: 'switch', disabled: 'inSchemaWithModelCheck', mode: ['create','edit', 'properties'], - group: gettext('Definition') + group: gettext('Definition'), + disabled: function(m) { + // Disabled if table is a partitioned table. + if (_.has(m, 'node_info') && _.has(m.node_info, 'table') && + _.has(m.node_info.table, 'is_partitioned') && m.node_info.table.is_partitioned) + { + setTimeout(function(){ + m.set('is_constraint_trigger', false); + },10); + + return true; + } + } },{ id: 'tgdeferrable', label: gettext('Deferrable?'), type: 'switch', group: gettext('Definition'), diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/utils.py b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/utils.py new file mode 100644 index 000000000..9a93d755c --- /dev/null +++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/utils.py @@ -0,0 +1,2178 @@ +########################################################################## +# +# pgAdmin 4 - PostgreSQL Tools +# +# Copyright (C) 2013 - 2017, The pgAdmin Development Team +# This software is released under the PostgreSQL Licence +# +########################################################################## + +""" Implements Utility class for Table and Partitioned Table. """ + +import re +from functools import wraps +import simplejson as json +from flask import render_template, jsonify, request +from flask_babel import gettext +from pgadmin.utils.ajax import make_json_response, internal_server_error, \ + make_response as ajax_response +from pgadmin.browser.server_groups.servers.databases.schemas.utils \ + import DataTypeReader, trigger_definition, parse_rule_definition +from pgadmin.browser.server_groups.servers.utils import parse_priv_from_db, \ + parse_priv_to_db +from pgadmin.browser.utils import PGChildNodeView +from pgadmin.utils import IS_PY2 +from pgadmin.utils.driver import get_driver +from config import PG_DEFAULT_DRIVER + + +class BaseTableView(PGChildNodeView): + """ + This class is base class for tables and partitioned tables. + + Methods: + ------- + * check_precondition() + - This function will behave as a decorator which will checks + database connection before running view, it will also attaches + manager,conn & template_path properties to self + + * _formatter(data, tid) + - It will return formatted output of query result + as per client model format + + * _columns_formatter(tid, data): + - It will return formatted output of query result + as per client model format for column node + + * _index_constraints_formatter(self, did, tid, data): + - It will return formatted output of query result + as per client model format for index constraint node + + * _cltype_formatter(type): (staticmethod) + - We need to remove [] from type and append it + after length/precision so we will send flag for + sql template. + + * get_table_dependents(self, tid): + - This function get the dependents and return ajax response + for the table node. + + * get_table_dependencies(self, tid): + - This function get the dependencies and return ajax response + for the table node. + + * get_table_statistics(self, tid): + - Returns the statistics for a particular table if tid is specified, + otherwise it will return statistics for all the tables in that + schema. + * get_reverse_engineered_sql(self, did, scid, tid, main_sql, data): + - This function will creates reverse engineered sql for + the table object. + + * reset_statistics(self, scid, tid): + - This function will reset statistics of table. + """ + @staticmethod + def check_precondition(f): + """ + This function will behave as a decorator which will checks + database connection before running view, it will also attaches + manager,conn & template_path properties to self + """ + + @wraps(f) + def wrap(*args, **kwargs): + # Here args[0] will hold self & kwargs will hold gid,sid,did + self = args[0] + driver = get_driver(PG_DEFAULT_DRIVER) + did = kwargs['did'] + self.manager = driver.connection_manager(kwargs['sid']) + self.conn = self.manager.connection(did=kwargs['did']) + self.qtIdent = driver.qtIdent + self.qtTypeIdent = driver.qtTypeIdent + # We need datlastsysoid to check if current table is system table + self.datlastsysoid = self.manager.db_info[ + did + ]['datlastsysoid'] if self.manager.db_info is not None and \ + did in self.manager.db_info else 0 + + ver = self.manager.version + # Set the template path for the SQL scripts + self.table_template_path = 'table/sql/#{0}#'.format(ver) + self.partition_template_path = 'partition/sql/#{0}#'.format(ver) + + # Template for Column ,check constraint and exclusion + # constraint node + self.column_template_path = 'column/sql/#{0}#'.format(ver) + self.check_constraint_template_path = \ + 'check_constraint/sql/#{0}#'.format(ver) + self.exclusion_constraint_template_path = \ + 'exclusion_constraint/sql/#{0}#'.format(ver) + + # Template for PK & Unique constraint node + self.index_constraint_template_path = 'index_constraint/sql' + + # Template for foreign key constraint node + self.foreign_key_template_path = 'foreign_key/sql/#{0}#'.format(ver) + + # Template for index node + self.index_template_path = 'index/sql/#{0}#'.format(ver) + + # Template for trigger node + self.trigger_template_path = 'trigger/sql/#{0}#'.format(ver) + + # Template for rules node + self.rules_template_path = 'rules/sql' + + # Supported ACL for table + self.acl = ['a', 'r', 'w', 'd', 'D', 'x', 't'] + + # Supported ACL for columns + self.column_acl = ['a', 'r', 'w', 'x'] + + return f(*args, **kwargs) + + return wrap + + def _columns_formatter(self, tid, data): + """ + Args: + tid: Table OID + data: dict of query result + + Returns: + It will return formatted output of query result + as per client model format for column node + """ + for column in data['columns']: + + # We need to format variables according to client js collection + if 'attoptions' in column and column['attoptions'] is not None: + spcoptions = [] + for spcoption in column['attoptions']: + k, v = spcoption.split('=') + spcoptions.append({'name': k, 'value': v}) + + column['attoptions'] = spcoptions + + # Need to format security labels according to client js collection + if 'seclabels' in column and column['seclabels'] is not None: + seclabels = [] + for seclbls in column['seclabels']: + k, v = seclbls.split('=') + seclabels.append({'provider': k, 'label': v}) + + column['seclabels'] = seclabels + + if 'attnum' in column and column['attnum'] is not None \ + and column['attnum'] > 0: + # We need to parse & convert ACL coming from database to + # json format + SQL = render_template("/".join( + [self.column_template_path, 'acl.sql']), + tid=tid, clid=column['attnum'] + ) + status, acl = self.conn.execute_dict(SQL) + + if not status: + return internal_server_error(errormsg=acl) + + # We will set get privileges from acl sql so we don't need + # it from properties sql + column['attacl'] = [] + + for row in acl['rows']: + priv = parse_priv_from_db(row) + column.setdefault(row['deftype'], []).append(priv) + + # we are receiving request when in edit mode + # we will send filtered types related to current type + present_type = column['cltype'] + + type_id = column['atttypid'] + + fulltype = self.get_full_type( + column['typnspname'], column['typname'], + column['isdup'], column['attndims'], column['atttypmod'] + ) + + length = False + precision = False + if 'elemoid' in column: + length, precision, typeval = \ + self.get_length_precision(column['elemoid']) + + # Set length and precision to None + column['attlen'] = None + column['attprecision'] = None + + # If we have length & precision both + if length and precision: + matchObj = re.search(r'(\d+),(\d+)', fulltype) + if matchObj: + column['attlen'] = matchObj.group(1) + column['attprecision'] = matchObj.group(2) + elif length: + # If we have length only + matchObj = re.search(r'(\d+)', fulltype) + if matchObj: + column['attlen'] = matchObj.group(1) + column['attprecision'] = None + + + SQL = render_template("/".join([self.column_template_path, + 'is_referenced.sql']), + tid=tid, clid=column['attnum']) + + status, is_reference = self.conn.execute_scalar(SQL) + + edit_types_list = list() + # We will need present type in edit mode + + if column['typnspname'] == "pg_catalog" \ + or column['typnspname'] == "public": + edit_types_list.append(present_type) + else: + t = self.qtTypeIdent(self.conn, column['typnspname'], + present_type) + edit_types_list.append(t) + column['cltype'] = t + + if int(is_reference) == 0: + SQL = render_template("/".join([self.column_template_path, + 'edit_mode_types.sql']), + type_id=type_id) + status, rset = self.conn.execute_2darray(SQL) + + for row in rset['rows']: + edit_types_list.append(row['typname']) + else: + edit_types_list.append(present_type) + + column['edit_types'] = edit_types_list + column['cltype'] = DataTypeReader.parse_type_name(column['cltype']) + + if 'indkey' in column: + # Current column + attnum = str(column['attnum']) + + # Single/List of primary key column(s) + indkey = str(column['indkey']) + + # We will check if column is in primary column(s) + if attnum in indkey.split(" "): + column['is_primary_key'] = True + else: + column['is_primary_key'] = False + + return data + + def _index_constraints_formatter(self, did, tid, data): + """ + Args: + tid: Table OID + data: dict of query result + + Returns: + It will return formatted output of query result + as per client model format for index constraint node + """ + + # We will fetch all the index constraints for the table + index_constraints = { + 'p': 'primary_key', 'u': 'unique_constraint' + } + + for ctype in index_constraints.keys(): + data[index_constraints[ctype]] = [] + + sql = render_template("/".join([self.index_constraint_template_path, + 'properties.sql']), + did=did, tid=tid, + constraint_type=ctype) + status, res = self.conn.execute_dict(sql) + + if not status: + return internal_server_error(errormsg=res) + + for row in res['rows']: + result = row + sql = render_template( + "/".join([self.index_constraint_template_path, + 'get_constraint_cols.sql']), + cid=row['oid'], + colcnt=row['indnatts']) + status, res = self.conn.execute_dict(sql) + + if not status: + return internal_server_error(errormsg=res) + + columns = [] + for r in res['rows']: + columns.append({"column": r['column'].strip('"')}) + + result['columns'] = columns + + # If not exists then create list and/or append into + # existing list [ Adding into main data dict] + data.setdefault(index_constraints[ctype], []).append(result) + + return data + + def _foreign_key_formatter(self, tid, data): + """ + Args: + tid: Table OID + data: dict of query result + + Returns: + It will return formatted output of query result + as per client model format for foreign key constraint node + """ + + # We will fetch all the index constraints for the table + sql = render_template("/".join([self.foreign_key_template_path, + 'properties.sql']), + tid=tid) + + status, result = self.conn.execute_dict(sql) + + if not status: + return internal_server_error(errormsg=result) + + for fk in result['rows']: + + sql = render_template("/".join([self.foreign_key_template_path, + 'get_constraint_cols.sql']), + tid=tid, + keys=zip(fk['confkey'], fk['conkey']), + confrelid=fk['confrelid']) + + status, res = self.conn.execute_dict(sql) + + if not status: + return internal_server_error(errormsg=res) + + columns = [] + cols = [] + for row in res['rows']: + columns.append({"local_column": row['conattname'], + "references": fk['confrelid'], + "referenced": row['confattname']}) + cols.append(row['conattname']) + + fk['columns'] = columns + + SQL = render_template("/".join([self.foreign_key_template_path, + 'get_parent.sql']), + tid=fk['columns'][0]['references']) + + status, rset = self.conn.execute_2darray(SQL) + if not status: + return internal_server_error(errormsg=rset) + + fk['remote_schema'] = rset['rows'][0]['schema'] + fk['remote_table'] = rset['rows'][0]['table'] + + coveringindex = self.search_coveringindex(tid, cols) + + fk['coveringindex'] = coveringindex + if coveringindex: + fk['autoindex'] = True + fk['hasindex'] = True + else: + fk['autoindex'] = False + fk['hasindex'] = False + # If not exists then create list and/or append into + # existing list [ Adding into main data dict] + data.setdefault('foreign_key', []).append(fk) + + return data + + def _check_constraint_formatter(self, tid, data): + """ + Args: + tid: Table OID + data: dict of query result + + Returns: + It will return formatted output of query result + as per client model format for check constraint node + """ + + # We will fetch all the index constraints for the table + SQL = render_template("/".join([self.check_constraint_template_path, + 'properties.sql']), + tid=tid) + + status, res = self.conn.execute_dict(SQL) + + if not status: + return internal_server_error(errormsg=res) + # If not exists then create list and/or append into + # existing list [ Adding into main data dict] + + data['check_constraint'] = res['rows'] + + return data + + def _exclusion_constraint_formatter(self, did, tid, data): + """ + Args: + tid: Table OID + data: dict of query result + + Returns: + It will return formatted output of query result + as per client model format for exclusion constraint node + """ + + # We will fetch all the index constraints for the table + sql = render_template("/".join([self.exclusion_constraint_template_path, + 'properties.sql']), + did=did, tid=tid) + + status, result = self.conn.execute_dict(sql) + + if not status: + return internal_server_error(errormsg=result) + + for ex in result['rows']: + + sql = render_template("/".join( + [self.exclusion_constraint_template_path, + 'get_constraint_cols.sql']), + cid=ex['oid'], + colcnt=ex['indnatts']) + + status, res = self.conn.execute_dict(sql) + + if not status: + return internal_server_error(errormsg=res) + + columns = [] + for row in res['rows']: + if row['options'] & 1: + order = False + nulls_order = True if (row['options'] & 2) else False + else: + order = True + nulls_order = True if (row['options'] & 2) else False + + columns.append({"column": row['coldef'].strip('"'), + "oper_class": row['opcname'], + "order": order, + "nulls_order": nulls_order, + "operator": row['oprname'], + "col_type": row['datatype'] + }) + + ex['columns'] = columns + # If not exists then create list and/or append into + # existing list [ Adding into main data dict] + data.setdefault('exclude_constraint', []).append(ex) + + return data + + def _formatter(self, did, scid, tid, data): + """ + Args: + data: dict of query result + scid: schema oid + tid: table oid + + Returns: + It will return formatted output of query result + as per client model format + """ + # Need to format security labels according to client js collection + if 'seclabels' in data and data['seclabels'] is not None: + seclabels = [] + for seclbls in data['seclabels']: + k, v = seclbls.split('=') + seclabels.append({'provider': k, 'label': v}) + + data['seclabels'] = seclabels + + # We need to parse & convert ACL coming from database to json format + SQL = render_template("/".join([self.table_template_path, 'acl.sql']), + tid=tid, scid=scid) + status, acl = self.conn.execute_dict(SQL) + if not status: + return internal_server_error(errormsg=acl) + + # We will set get privileges from acl sql so we don't need + # it from properties sql + for row in acl['rows']: + priv = parse_priv_from_db(row) + if row['deftype'] in data: + data[row['deftype']].append(priv) + else: + data[row['deftype']] = [priv] + + # We will add Auto vacuum defaults with out result for grid + data['vacuum_table'] = self.parse_vacuum_data(self.conn, data, 'table') + data['vacuum_toast'] = self.parse_vacuum_data(self.conn, data, 'toast') + + # Fetch columns for the table logic + # + # 1) Check if of_type and inherited tables are present? + # 2) If yes then Fetch all the columns for of_type and inherited tables + # 3) Add columns in columns collection + # 4) Find all the columns for tables and filter out columns which are + # not inherited from any table & format them one by one + + # Get of_type table columns and add it into columns dict + if data['typname']: + SQL = render_template("/".join([self.table_template_path, + 'get_columns_for_table.sql']), + tname=data['typname']) + + status, res = self.conn.execute_dict(SQL) + if not status: + return internal_server_error(errormsg=res) + data['columns'] = res['rows'] + + # Get inherited table(s) columns and add it into columns dict + elif data['coll_inherits'] and len(data['coll_inherits']) > 0: + columns = [] + # Return all tables which can be inherited & do not show + # system columns + SQL = render_template("/".join([self.table_template_path, + 'get_inherits.sql']), + show_system_objects=False + ) + status, rset = self.conn.execute_2darray(SQL) + if not status: + return internal_server_error(errormsg=rset) + + for row in rset['rows']: + if row['inherits'] in data['coll_inherits']: + # Fetch columns using inherited table OID + SQL = render_template("/".join( + [self.table_template_path, + 'get_columns_for_table.sql']), + tid=row['oid'] + ) + status, res = self.conn.execute_dict(SQL) + if not status: + return internal_server_error(errormsg=res) + columns.extend(res['rows'][:]) + data['columns'] = columns + + # We will fetch all the columns for the table using + # columns properties.sql, so we need to set template path + SQL = render_template("/".join([self.column_template_path, + 'properties.sql']), + tid=tid, + show_sys_objects=False + ) + + status, res = self.conn.execute_dict(SQL) + if not status: + return internal_server_error(errormsg=res) + all_columns = res['rows'] + + # Filter inherited columns from all columns + if 'columns' in data and len(data['columns']) > 0 \ + and len(all_columns) > 0: + for row in data['columns']: + for i, col in enumerate(all_columns): + # If both name are same then remove it + # as it is inherited from other table + if col['name'] == row['name']: + # Remove same column from all_columns as + # already have it columns collection + del all_columns[i] + + # If any column is added then update columns collection + if len(all_columns) > 0: + data['columns'] += all_columns + # If no inherited columns found then add all columns + elif len(all_columns) > 0: + data['columns'] = all_columns + + if 'columns' in data and len(data['columns']) > 0: + data = self._columns_formatter(tid, data) + + # Here we will add constraint in our output + data = self._index_constraints_formatter(did, tid, data) + data = self._foreign_key_formatter(tid, data) + data = self._check_constraint_formatter(tid, data) + data = self._exclusion_constraint_formatter(did, tid, data) + + return data + + @staticmethod + def _cltype_formatter(data_type): + """ + + Args: + data_type: Type string + + Returns: + We need to remove [] from type and append it + after length/precision so we will send flag for + sql template + """ + if '[]' in data_type: + return data_type[:-2], True + else: + return data_type, False + + def get_table_dependents(self, tid): + """ + This function get the dependents and return ajax response + for the table node. + + Args: + tid: Table ID + """ + # Specific condition for column which we need to append + where = "WHERE dep.refobjid={0}::OID".format(tid) + + dependents_result = self.get_dependents( + self.conn, tid + ) + + # Specific sql to run againt column to fetch dependents + SQL = render_template("/".join([self.table_template_path, + 'depend.sql']), where=where) + + status, res = self.conn.execute_dict(SQL) + if not status: + return internal_server_error(errormsg=res) + + for row in res['rows']: + ref_name = row['refname'] + if ref_name is None: + continue + + dep_type = '' + dep_str = row['deptype'] + if dep_str == 'a': + dep_type = 'auto' + elif dep_str == 'n': + dep_type = 'normal' + elif dep_str == 'i': + dep_type = 'internal' + + dependents_result.append({'type': 'sequence', 'name': ref_name, + 'field': dep_type}) + + return ajax_response( + response=dependents_result, + status=200 + ) + + def get_table_dependencies(self, tid): + """ + This function get the dependencies and return ajax response + for the table node. + + Args: + tid: Table ID + + """ + dependencies_result = self.get_dependencies( + self.conn, tid + ) + + return ajax_response( + response=dependencies_result, + status=200 + ) + + def get_table_statistics(self, scid, tid): + """ + Statistics + + Args: + scid: Schema Id + tid: Table Id + + Returns the statistics for a particular table if tid is specified, + otherwise it will return statistics for all the tables in that + schema. + """ + + # Fetch schema name + status, schema_name = self.conn.execute_scalar( + render_template( + "/".join([self.table_template_path, 'get_schema.sql']), + conn=self.conn, scid=scid + ) + ) + if not status: + return internal_server_error(errormsg=schema_name) + + if tid is None: + status, res = self.conn.execute_dict( + render_template( + "/".join([self.table_template_path, + 'coll_table_stats.sql']), conn=self.conn, + schema_name=schema_name + ) + ) + else: + # For Individual table stats + + # Check if pgstattuple extension is already created? + # if created then only add extended stats + status, is_pgstattuple = self.conn.execute_scalar(""" + SELECT (count(extname) > 0) AS is_pgstattuple + FROM pg_extension + WHERE extname='pgstattuple' + """) + if not status: + return internal_server_error(errormsg=is_pgstattuple) + + # Fetch Table name + status, table_name = self.conn.execute_scalar( + render_template( + "/".join([self.table_template_path, 'get_table.sql']), + conn=self.conn, scid=scid, tid=tid + ) + ) + if not status: + return internal_server_error(errormsg=table_name) + + status, res = self.conn.execute_dict( + render_template( + "/".join([self.table_template_path, 'stats.sql']), + conn=self.conn, schema_name=schema_name, + table_name=table_name, + is_pgstattuple=is_pgstattuple, tid=tid + ) + ) + + if not status: + return internal_server_error(errormsg=res) + + return make_json_response( + data=res, + status=200 + ) + + def get_reverse_engineered_sql(self, did, scid, tid, main_sql, data): + """ + This function will creates reverse engineered sql for + the table object + + Args: + did: Database ID + scid: Schema ID + tid: Table ID + main_sql: List contains all the reversed engineered sql + data: Table's Data + """ + """ + ##################################### + # 1) Reverse engineered sql for TABLE + ##################################### + """ + + # Table & Schema declaration so that we can use them in child nodes + schema = data['schema'] + table = data['name'] + + data = self._formatter(did, scid, tid, data) + + # Now we have all lis of columns which we need + # to include in our create definition, Let's format them + if 'columns' in data: + for c in data['columns']: + if 'attacl' in c: + c['attacl'] = parse_priv_to_db(c['attacl'], self.column_acl) + + # check type for '[]' in it + if 'cltype' in c: + c['cltype'], c['hasSqrBracket'] = \ + self._cltype_formatter(c['cltype']) + + sql_header = u"-- Table: {0}\n\n-- ".format( + self.qtIdent(self.conn, data['schema'], data['name'])) + + sql_header += render_template("/".join([self.table_template_path, + 'delete.sql']), + data=data, conn=self.conn) + + sql_header = sql_header.strip('\n') + sql_header += '\n' + + # Add into main sql + main_sql.append(sql_header) + + # Parse privilege data + if 'relacl' in data: + data['relacl'] = parse_priv_to_db(data['relacl'], self.acl) + + # if table is partitions then + if 'relispartition' in data and data['relispartition']: + table_sql = render_template("/".join([self.partition_template_path, + 'create.sql']), + data=data, conn=self.conn) + else: + table_sql = render_template("/".join([self.table_template_path, + 'create.sql']), + data=data, conn=self.conn) + + # Add into main sql + table_sql = re.sub('\n{2,}', '\n\n', table_sql) + main_sql.append(table_sql.strip('\n')) + + """ + ###################################### + # 2) Reverse engineered sql for INDEX + ###################################### + """ + + SQL = render_template("/".join([self.index_template_path, + 'nodes.sql']), tid=tid) + status, rset = self.conn.execute_2darray(SQL) + if not status: + return internal_server_error(errormsg=rset) + + for row in rset['rows']: + + SQL = render_template("/".join([self.index_template_path, + 'properties.sql']), + did=did, tid=tid, idx=row['oid'], + datlastsysoid=self.datlastsysoid) + + status, res = self.conn.execute_dict(SQL) + if not status: + return internal_server_error(errormsg=res) + + data = dict(res['rows'][0]) + # Adding parent into data dict, will be using it while creating sql + data['schema'] = schema + data['table'] = table + # We also need to fecth columns of index + SQL = render_template("/".join([self.index_template_path, + 'column_details.sql']), + idx=row['oid']) + status, rset = self.conn.execute_2darray(SQL) + if not status: + return internal_server_error(errormsg=rset) + + # 'attdef' comes with quotes from query so we need to strip them + # 'options' we need true/false to render switch + # ASC(false)/DESC(true) + columns = [] + cols = [] + for col_row in rset['rows']: + # We need all data as collection for ColumnsModel + cols_data = { + 'colname': col_row['attdef'].strip('"'), + 'collspcname': col_row['collnspname'], + 'op_class': col_row['opcname'], + } + if col_row['options'][0] == 'DESC': + cols_data['sort_order'] = True + columns.append(cols_data) + + # We need same data as string to display in properties window + # If multiple column then separate it by colon + cols_str = col_row['attdef'] + if col_row['collnspname']: + cols_str += ' COLLATE ' + col_row['collnspname'] + if col_row['opcname']: + cols_str += ' ' + col_row['opcname'] + if col_row['options'][0] == 'DESC': + cols_str += ' DESC' + cols.append(cols_str) + + # Push as collection + data['columns'] = columns + # Push as string + data['cols'] = ', '.join(cols) + + sql_header = u"\n-- Index: {0}\n\n-- ".format(data['name']) + + sql_header += render_template("/".join([self.index_template_path, + 'delete.sql']), + data=data, conn=self.conn) + + index_sql = render_template("/".join([self.index_template_path, + 'create.sql']), + data=data, conn=self.conn) + index_sql += "\n" + index_sql += render_template("/".join([self.index_template_path, + 'alter.sql']), + data=data, conn=self.conn) + + # Add into main sql + index_sql = re.sub('\n{2,}', '\n\n', index_sql) + main_sql.append(sql_header + '\n\n' + index_sql.strip('\n')) + + """ + ######################################## + # 3) Reverse engineered sql for TRIGGERS + ######################################## + """ + SQL = render_template("/".join([self.trigger_template_path, + 'nodes.sql']), tid=tid) + status, rset = self.conn.execute_2darray(SQL) + if not status: + return internal_server_error(errormsg=rset) + + for row in rset['rows']: + trigger_sql = '' + + SQL = render_template("/".join([self.trigger_template_path, + 'properties.sql']), + tid=tid, trid=row['oid'], + datlastsysoid=self.datlastsysoid) + + status, res = self.conn.execute_dict(SQL) + if not status: + return internal_server_error(errormsg=res) + + data = dict(res['rows'][0]) + # Adding parent into data dict, will be using it while creating sql + data['schema'] = schema + data['table'] = table + + if data['tgnargs'] > 1: + # We know that trigger has more than 1 arguments, + # let's join them + data['tgargs'] = ', '.join(data['tgargs']) + + if len(data['tgattr']) > 1: + columns = ', '.join(data['tgattr'].split(' ')) + + SQL = render_template("/".join([self.trigger_template_path, + 'get_columns.sql']), + tid=tid, clist=columns) + + status, rset = self.conn.execute_2darray(SQL) + if not status: + return internal_server_error(errormsg=rset) + # 'tgattr' contains list of columns from table used in trigger + columns = [] + + for col_row in rset['rows']: + columns.append({'column': col_row['name']}) + + data['columns'] = columns + + data = trigger_definition(data) + + sql_header = u"\n-- Trigger: {0}\n\n-- ".format(data['name']) + + sql_header += render_template("/".join([self.trigger_template_path, + 'delete.sql']), + data=data, conn=self.conn) + + # If the request for new object which do not have did + trigger_sql = render_template("/".join([self.trigger_template_path, + 'create.sql']), + data=data, conn=self.conn) + + trigger_sql = sql_header + '\n\n' + trigger_sql.strip('\n') + + # If trigger is disabled then add sql code for the same + if not data['is_enable_trigger']: + trigger_sql += '\n\n' + trigger_sql += render_template("/".join([ + self.trigger_template_path, + 'enable_disable_trigger.sql']), + data=data, conn=self.conn) + + # Add into main sql + trigger_sql = re.sub('\n{2,}', '\n\n', trigger_sql) + main_sql.append(trigger_sql) + + """ + ##################################### + # 4) Reverse engineered sql for RULES + ##################################### + """ + + SQL = render_template("/".join( + [self.rules_template_path, 'properties.sql']), tid=tid) + + status, rset = self.conn.execute_2darray(SQL) + if not status: + return internal_server_error(errormsg=rset) + + for row in rset['rows']: + rules_sql = '\n' + SQL = render_template("/".join( + [self.rules_template_path, 'properties.sql'] + ), rid=row['oid'], datlastsysoid=self.datlastsysoid) + + status, res = self.conn.execute_dict(SQL) + if not status: + return internal_server_error(errormsg=res) + + res_data = parse_rule_definition(res) + rules_sql += render_template("/".join( + [self.rules_template_path, 'create.sql']), + data=res_data, display_comments=True) + + # Add into main sql + rules_sql = re.sub('\n{2,}', '\n\n', rules_sql) + main_sql.append(rules_sql) + + """ + ########################################## + # 5) Reverse engineered sql for PARTITIONS + ########################################## + """ + if 'is_partitioned' in data and data['is_partitioned']: + SQL = render_template("/".join([self.partition_template_path, + 'nodes.sql']), + scid=scid, tid=tid) + status, rset = self.conn.execute_2darray(SQL) + if not status: + return internal_server_error(errormsg=rset) + + sql_header = u"\n-- Partitions SQL" + partition_sql = '' + for row in rset['rows']: + part_data = dict() + part_data['partitioned_table_name'] = data['name'] + part_data['parent_schema'] = data['schema'] + part_data['schema'] = row['schema_name'] + part_data['relispartition'] = True + part_data['name'] = row['name'] + part_data['partition_value'] = row['partition_value'] + part_data['is_partitioned'] = row ['is_partitioned'] + part_data['partition_scheme'] = row['partition_scheme'] + + partition_sql += render_template("/".join( + [self.partition_template_path, 'create.sql']), + data=part_data, conn=self.conn) + + # Add into main sql + partition_sql = re.sub('\n{2,}', '\n\n', partition_sql) + main_sql.append(sql_header + '\n\n' + partition_sql.strip('\n')) + + sql = '\n'.join(main_sql) + + return ajax_response(response=sql.strip('\n')) + + def reset_statistics(self, scid, tid): + """ + This function will reset statistics of table + + Args: + scid: Schema ID + tid: Table ID + """ + try: + SQL = render_template("/".join([self.table_template_path, + 'reset_stats.sql']), + tid=tid) + status, res = self.conn.execute_scalar(SQL) + if not status: + return internal_server_error(errormsg=res) + + return make_json_response( + success=1, + info=gettext("Table statistics have been reset"), + data={ + 'id': tid, + 'scid': scid + } + ) + + except Exception as e: + return internal_server_error(errormsg=str(e)) + + def get_partition_scheme(self, data): + partition_scheme = None + if 'partition_type' in data \ + and data['partition_type'] == 'range': + partition_scheme = 'RANGE (' + elif 'partition_type' in data \ + and data['partition_type'] == 'list': + partition_scheme = 'LIST (' + + for row in data['partition_keys']: + if row['key_type'] == 'column': + partition_scheme += self.qtIdent( + self.conn, row['pt_column']) + ', ' + elif row['key_type'] == 'expression': + partition_scheme += row['expression'] + ', ' + + # Remove extra space and comma + if len(data['partition_keys']) > 0: + partition_scheme = partition_scheme[:-2] + partition_scheme += ')' + + return partition_scheme + + @staticmethod + def validate_constrains(key, data): + + if key == 'primary_key' or key == 'unique_constraint': + if 'columns' in data and len(data['columns']) > 0: + return True + else: + return False + elif key == 'foreign_key': + if 'oid' not in data: + for arg in ['columns']: + if arg not in data: + return False + elif isinstance(data[arg], list) and len(data[arg]) < 1: + return False + + if 'autoindex' in data and data['autoindex'] and \ + ('coveringindex' not in data or + data['coveringindex'] == ''): + return False + + return True + + elif key == 'check_constraint': + for arg in ['consrc']: + if arg not in data or data[arg] == '': + return False + return True + + elif key == 'exclude_constraint': + pass + + return True + + @staticmethod + def check_and_convert_name_to_string(data): + """ + This function will check and covert table to string incase + it is numeric + + Args: + data: data dict + + Returns: + Updated data dict + """ + # For Python2, it can be int, long, float + if IS_PY2 and hasattr(str, 'decode'): + if isinstance(data['name'], (int, long, float)): + data['name'] = str(data['name']) + else: + # For Python3, it can be int, float + if isinstance(data['name'], (int, float)): + data['name'] = str(data['name']) + return data + + def get_index_constraint_sql(self, did, tid, data): + """ + Args: + tid: Table ID + data: data dict coming from the client + + Returns: + This function will generate modified sql for index constraints + (Primary Key & Unique) + """ + sql = [] + # We will fetch all the index constraints for the table + index_constraints = { + 'p': 'primary_key', 'u': 'unique_constraint' + } + + for ctype in index_constraints.keys(): + # Check if constraint is in data + # If yes then we need to check for add/change/delete + if index_constraints[ctype] in data: + constraint = data[index_constraints[ctype]] + # If constraint(s) is/are deleted + if 'deleted' in constraint: + for c in constraint['deleted']: + c['schema'] = data['schema'] + c['table'] = data['name'] + + # Sql for drop + sql.append( + render_template("/".join( + [self.index_constraint_template_path, + 'delete.sql']), + data=c, conn=self.conn).strip('\n') + ) + + if 'changed' in constraint: + for c in constraint['changed']: + c['schema'] = data['schema'] + c['table'] = data['name'] + + properties_sql = render_template("/".join( + [self.index_constraint_template_path, 'properties.sql']), + did=did, tid=tid, cid=c['oid'], constraint_type=ctype) + status, res = self.conn.execute_dict(properties_sql) + if not status: + return internal_server_error(errormsg=res) + + old_data = res['rows'][0] + # Sql to update object + sql.append( + render_template("/".join([ + self.index_constraint_template_path, + 'update.sql']), data=c, o_data=old_data, + conn=self.conn).strip('\n') + ) + + if 'added' in constraint: + for c in constraint['added']: + c['schema'] = data['schema'] + c['table'] = data['name'] + + # Sql to add object + if self.validate_constrains(index_constraints[ctype], c): + sql.append( + render_template( + "/".join([self.index_constraint_template_path, + 'create.sql']), + data=c, conn=self.conn, + constraint_name='PRIMARY KEY' + if ctype == 'p' else 'UNIQUE' + ).strip('\n') + ) + else: + sql.append( + gettext( + '-- definition incomplete for {0} constraint'.format(index_constraints[ctype]) + ) + ) + if len(sql) > 0: + # Join all the sql(s) as single string + return '\n\n'.join(sql) + else: + return None + + def get_foreign_key_sql(self, tid, data): + """ + Args: + tid: Table ID + data: data dict coming from the client + + Returns: + This function will generate modified sql for foreign key + """ + sql = [] + # Check if constraint is in data + # If yes then we need to check for add/change/delete + if 'foreign_key' in data: + constraint = data['foreign_key'] + # If constraint(s) is/are deleted + if 'deleted' in constraint: + for c in constraint['deleted']: + c['schema'] = data['schema'] + c['table'] = data['name'] + + # Sql for drop + sql.append( + render_template("/".join( + [self.foreign_key_template_path, + 'delete.sql']), + data=c, conn=self.conn).strip('\n') + ) + + if 'changed' in constraint: + for c in constraint['changed']: + c['schema'] = data['schema'] + c['table'] = data['name'] + + properties_sql = render_template("/".join( + [self.foreign_key_template_path, 'properties.sql']), + tid=tid, cid=c['oid']) + status, res = self.conn.execute_dict(properties_sql) + if not status: + return internal_server_error(errormsg=res) + + old_data = res['rows'][0] + # Sql to update object + sql.append( + render_template("/".join([ + self.foreign_key_template_path, + 'update.sql']), data=c, o_data=old_data, + conn=self.conn).strip('\n') + ) + + if not self.validate_constrains('foreign_key', c): + sql.append( + gettext( + '-- definition incomplete for foreign_key constraint' + ) + ) + return '\n\n'.join(sql) + + if 'columns' in c: + cols = [] + for col in c['columns']: + cols.append(col['local_column']) + + coveringindex = self.search_coveringindex(tid, cols) + + if coveringindex is None and 'autoindex' in c and c['autoindex'] and \ + ('coveringindex' in c and + c['coveringindex'] != ''): + sql.append(render_template( + "/".join([self.foreign_key_template_path, 'create_index.sql']), + data=c, conn=self.conn).strip('\n') + ) + + if 'added' in constraint: + for c in constraint['added']: + c['schema'] = data['schema'] + c['table'] = data['name'] + + # Sql to add object + # Columns + + if not self.validate_constrains('foreign_key', c): + sql.append( + gettext( + '-- definition incomplete for foreign_key constraint' + ) + ) + return '\n\n'.join(sql) + + SQL = render_template("/".join([self.foreign_key_template_path, + 'get_parent.sql']), + tid=c['columns'][0]['references']) + status, rset = self.conn.execute_2darray(SQL) + if not status: + return internal_server_error(errormsg=rset) + + c['remote_schema'] = rset['rows'][0]['schema'] + c['remote_table'] = rset['rows'][0]['table'] + + sql.append( + render_template( + "/".join([self.foreign_key_template_path, + 'create.sql']), + data=c, conn=self.conn + ).strip('\n') + ) + + if c['autoindex']: + sql.append( + render_template( + "/".join([self.foreign_key_template_path, + 'create_index.sql']), + data=c, conn=self.conn).strip('\n') + ) + + if len(sql) > 0: + # Join all the sql(s) as single string + return '\n\n'.join(sql) + else: + return None + + def get_check_constraint_sql(self, tid, data): + """ + Args: + tid: Table ID + data: data dict coming from the client + + Returns: + This function will generate modified sql for check constraint + """ + sql = [] + # Check if constraint is in data + # If yes then we need to check for add/change/delete + if 'check_constraint' in data: + constraint = data['check_constraint'] + # If constraint(s) is/are deleted + if 'deleted' in constraint: + for c in constraint['deleted']: + c['schema'] = data['schema'] + c['table'] = data['name'] + + # Sql for drop + sql.append( + render_template("/".join( + [self.check_constraint_template_path, + 'delete.sql']), + data=c, conn=self.conn).strip('\n') + ) + + if 'changed' in constraint: + for c in constraint['changed']: + c['schema'] = data['schema'] + c['table'] = data['name'] + + properties_sql = render_template("/".join( + [self.check_constraint_template_path, 'properties.sql']), + tid=tid, cid=c['oid']) + status, res = self.conn.execute_dict(properties_sql) + if not status: + return internal_server_error(errormsg=res) + + old_data = res['rows'][0] + # Sql to update object + sql.append( + render_template("/".join([ + self.check_constraint_template_path, + 'update.sql']), data=c, o_data=old_data, + conn=self.conn).strip('\n') + ) + + if 'added' in constraint: + for c in constraint['added']: + c['schema'] = data['schema'] + c['table'] = data['name'] + + if not self.validate_constrains('check_constraint', c): + sql.append( + gettext( + '-- definition incomplete for check_constraint' + ) + ) + return '\n\n'.join(sql) + + sql.append( + render_template( + "/".join([self.check_constraint_template_path, + 'create.sql']), + data=c, conn=self.conn + ).strip('\n') + ) + + if len(sql) > 0: + # Join all the sql(s) as single string + return '\n\n'.join(sql) + else: + return None + + def get_exclusion_constraint_sql(self, did, tid, data): + """ + Args: + tid: Table ID + data: data dict coming from the client + + Returns: + This function will generate modified sql for exclusion constraint + """ + sql = [] + # Check if constraint is in data + # If yes then we need to check for add/change/delete + if 'exclude_constraint' in data: + constraint = data['exclude_constraint'] + # If constraint(s) is/are deleted + if 'deleted' in constraint: + for c in constraint['deleted']: + c['schema'] = data['schema'] + c['table'] = data['name'] + + # Sql for drop + sql.append( + render_template("/".join( + [self.exclusion_constraint_template_path, + 'delete.sql']), + data=c, conn=self.conn).strip('\n') + ) + + if 'changed' in constraint: + for c in constraint['changed']: + c['schema'] = data['schema'] + c['table'] = data['name'] + + properties_sql = render_template("/".join( + [self.exclusion_constraint_template_path, 'properties.sql']), + did=did, tid=tid, cid=c['oid']) + status, res = self.conn.execute_dict(properties_sql) + if not status: + return internal_server_error(errormsg=res) + + old_data = res['rows'][0] + # Sql to update object + sql.append( + render_template("/".join([ + self.exclusion_constraint_template_path, + 'update.sql']), data=c, o_data=old_data, + conn=self.conn).strip('\n') + ) + + if 'added' in constraint: + for c in constraint['added']: + c['schema'] = data['schema'] + c['table'] = data['name'] + + if not self.validate_constrains('exclude_constraint', c): + sql.append( + gettext( + '-- definition incomplete for exclusion_constraint' + ) + ) + return '\n\n'.join(sql) + + sql.append( + render_template( + "/".join([self.exclusion_constraint_template_path, + 'create.sql']), + data=c, conn=self.conn + ).strip('\n') + ) + + if len(sql) > 0: + # Join all the sql(s) as single string + return u'\n\n'.join(sql) + else: + return None + + def get_sql(self, did, scid, tid, data, res): + """ + This function will generate create/update sql from model data + coming from client + """ + if tid is not None: + old_data = res['rows'][0] + old_data = self._formatter(did, scid, tid, old_data) + + # We will convert privileges coming from client required + if 'relacl' in data: + for mode in ['added', 'changed', 'deleted']: + if mode in data['relacl']: + data['relacl'][mode] = parse_priv_to_db( + data['relacl'][mode], self.acl + ) + + # If name is not present in request data + if 'name' not in data: + data['name'] = old_data['name'] + + data = BaseTableView.check_and_convert_name_to_string(data) + + # If name if not present + if 'schema' not in data: + data['schema'] = old_data['schema'] + + # Filter out new tables from list, we will send complete list + # and not newly added tables in the list from client + # so we will filter new tables here + if 'coll_inherits' in data: + p_len = len(old_data['coll_inherits']) + c_len = len(data['coll_inherits']) + # If table(s) added + if c_len > p_len: + data['coll_inherits_added'] = list( + set(data['coll_inherits']) - set(old_data['coll_inherits']) + ) + # If table(s)removed + elif c_len < p_len: + data['coll_inherits_removed'] = list( + set(old_data['coll_inherits']) - set(data['coll_inherits']) + ) + # Safe side verification,In case it happens.. + # If user removes and adds same number of table + # eg removed one table and added one new table + elif c_len == p_len: + data['coll_inherits_added'] = list( + set(data['coll_inherits']) - set(old_data['coll_inherits']) + ) + data['coll_inherits_removed'] = list( + set(old_data['coll_inherits']) - set(data['coll_inherits']) + ) + + SQL = render_template("/".join([self.table_template_path, 'update.sql']), + o_data=old_data, data=data, conn=self.conn) + # Removes training new lines + SQL = SQL.strip('\n') + '\n\n' + + # Parse/Format columns & create sql + if 'columns' in data: + # Parse the data coming from client + data = self._parse_format_columns(data, mode='edit') + + columns = data['columns'] + column_sql = '\n' + + # If column(s) is/are deleted + if 'deleted' in columns: + for c in columns['deleted']: + c['schema'] = data['schema'] + c['table'] = data['name'] + # Sql for drop column + if 'inheritedfrom' not in c: + column_sql += render_template("/".join( + [self.column_template_path, 'delete.sql']), + data=c, conn=self.conn).strip('\n') + '\n\n' + + # If column(s) is/are changed + # Here we will be needing previous properties of column + # so that we can compare & update it + if 'changed' in columns: + for c in columns['changed']: + c['schema'] = data['schema'] + c['table'] = data['name'] + if 'attacl' in c: + c['attacl'] = parse_priv_to_db(c['attacl'], + self.column_acl) + + properties_sql = render_template("/".join([self.column_template_path, + 'properties.sql']), + tid=tid, + clid=c['attnum'], + show_sys_objects=self.blueprint.show_system_objects + ) + + status, res = self.conn.execute_dict(properties_sql) + if not status: + return internal_server_error(errormsg=res) + old_data = res['rows'][0] + + old_data['cltype'], old_data['hasSqrBracket'] = self._cltype_formatter(old_data['cltype']) + old_data = BaseTableView.convert_length_precision_to_string(old_data) + + fulltype = self.get_full_type( + old_data['typnspname'], old_data['typname'], + old_data['isdup'], old_data['attndims'], old_data['atttypmod'] + ) + + # If we have length & precision both + matchObj = re.search(r'(\d+),(\d+)', fulltype) + if matchObj: + old_data['attlen'] = int(matchObj.group(1)) + old_data['attprecision'] = int(matchObj.group(2)) + else: + # If we have length only + matchObj = re.search(r'(\d+)', fulltype) + if matchObj: + old_data['attlen'] = int(matchObj.group(1)) + old_data['attprecision'] = None + else: + old_data['attlen'] = None + old_data['attprecision'] = None + + old_data['cltype'] = DataTypeReader.parse_type_name(old_data['cltype']) + + # Sql for alter column + if 'inheritedfrom' not in c: + column_sql += render_template("/".join( + [self.column_template_path, 'update.sql']), + data=c, o_data=old_data, conn=self.conn).strip('\n') + '\n\n' + + # If column(s) is/are added + if 'added' in columns: + for c in columns['added']: + c['schema'] = data['schema'] + c['table'] = data['name'] + # Sql for create column + if 'attacl' in c: + c['attacl'] = parse_priv_to_db(c['attacl'], + self.column_acl) + + c = BaseTableView.convert_length_precision_to_string(c) + + if 'inheritedfrom' not in c: + column_sql += render_template("/".join( + [self.column_template_path, 'create.sql']), + data=c, conn=self.conn).strip('\n') + '\n\n' + + # Combine all the SQL together + SQL += column_sql.strip('\n') + + # Check for partitions + if 'partitions' in data: + partitions = data['partitions'] + partitions_sql = '\n' + + # If partition(s) is/are deleted + if 'deleted' in partitions: + for row in partitions['deleted']: + temp_data = dict() + schema_name, table_name = \ + self.get_schema_and_table_name(row['oid']) + + temp_data['parent_schema'] = data['schema'] + temp_data['partitioned_table_name'] = data['name'] + temp_data['schema'] = schema_name + temp_data['name'] = table_name + + # Sql for detach partition + partitions_sql += render_template("/".join( + [self.partition_template_path, 'detach.sql']), + data=temp_data, conn=self.conn).strip('\n') + '\n\n' + + # If partition(s) is/are added + if 'added' in partitions: + temp_data = dict() + temp_data['schema'] = data['schema'] + temp_data['name'] = data['name'] + # get the partition type + temp_data['partition_type'] = \ + old_data['partition_scheme'].split()[0].lower() + temp_data['partitions'] = partitions['added'] + + partitions_sql += \ + self.get_partitions_sql(temp_data).strip('\n') + '\n\n' + + # Combine all the SQL together + SQL += partitions_sql.strip('\n') + + # Check if index constraints are added/changed/deleted + index_constraint_sql = self.get_index_constraint_sql(did, tid, data) + # If we have index constraint sql then ad it in main sql + if index_constraint_sql is not None: + SQL += '\n' + index_constraint_sql + + # Check if foreign key(s) is/are added/changed/deleted + foreign_key_sql = self.get_foreign_key_sql(tid, data) + # If we have foreign key sql then ad it in main sql + if foreign_key_sql is not None: + SQL += '\n' + foreign_key_sql + + # Check if check constraint(s) is/are added/changed/deleted + check_constraint_sql = self.get_check_constraint_sql(tid, data) + # If we have check constraint sql then ad it in main sql + if check_constraint_sql is not None: + SQL += '\n' + check_constraint_sql + + # Check if exclusion constraint(s) is/are added/changed/deleted + exclusion_constraint_sql = self.get_exclusion_constraint_sql(did, tid, data) + # If we have check constraint sql then ad it in main sql + if exclusion_constraint_sql is not None: + SQL += '\n' + exclusion_constraint_sql + + else: + res = None + required_args = [ + 'name' + ] + + for arg in required_args: + if arg not in data: + return gettext('-- definition incomplete') + + # validate constraint data. + for key in ['primary_key', 'unique_constraint', + 'foreign_key', 'check_constraint', + 'exclude_constraint']: + if key in data and len(data[key]) > 0: + for constraint in data[key]: + if not self.validate_constrains(key, constraint): + return gettext( + '-- definition incomplete for {0}'.format(key) + ) + + # We will convert privileges coming from client required + # in server side format + if 'relacl' in data: + data['relacl'] = parse_priv_to_db(data['relacl'], self.acl) + + # Parse & format columns + data = self._parse_format_columns(data) + data = BaseTableView.check_and_convert_name_to_string(data) + + if 'foreign_key' in data: + for c in data['foreign_key']: + SQL = render_template("/".join( + [self.foreign_key_template_path, + 'get_parent.sql']), + tid=c['columns'][0]['references']) + status, rset = self.conn.execute_2darray(SQL) + if not status: + return internal_server_error(errormsg=rset) + + c['remote_schema'] = rset['rows'][0]['schema'] + c['remote_table'] = rset['rows'][0]['table'] + + partitions_sql = '' + if 'is_partitioned' in data and data['is_partitioned']: + data['relkind'] = 'p' + # create partition scheme + data['partition_scheme'] = self.get_partition_scheme(data) + partitions_sql = self.get_partitions_sql(data) + + SQL = render_template("/".join([self.table_template_path, + 'create.sql']), + data=data, conn=self.conn) + + # Append SQL for partitions + SQL += '\n' + partitions_sql + + SQL = re.sub('\n{2,}', '\n\n', SQL) + SQL = SQL.strip('\n') + + return SQL, data['name'] if 'name' in data else old_data['name'] + + def update(self, gid, sid, did, scid, tid, data, res, parent_id=None): + """ + This function will update an existing table object + + Args: + gid: Server Group ID + sid: Server ID + did: Database ID + scid: Schema ID + tid: Table ID + data: Data to update + res: Table properties + parent_id: parent table id if current table is partition of parent + table else none + """ + try: + SQL, name = self.get_sql(did, scid, tid, data, res) + + SQL = SQL.strip('\n').strip(' ') + status, rest = self.conn.execute_scalar(SQL) + if not status: + return internal_server_error(errormsg=rest) + + SQL = render_template("/".join([self.table_template_path, + 'get_schema_oid.sql']), tid=tid) + status, rest = self.conn.execute_2darray(SQL) + if not status: + return internal_server_error(errormsg=rest) + + if not parent_id: + parent_id = scid + + # Check for partitions + partitions_oid = dict() + if 'partitions' in data: + # Fetch oid of schema for all detached partitions + if 'deleted' in data['partitions']: + detached = [] + for row in data['partitions']['deleted']: + status, pscid = self.conn.execute_scalar( + render_template( + "/".join([ + self.table_template_path, 'get_schema_oid.sql' + ]), + tid=row['oid'] + ) + ) + if not status: + return internal_server_error(errormsg=pscid) + + detached.append({'oid': row['oid'], 'schema_id': pscid}) + partitions_oid['detached'] = detached + + # Fetch oid and schema oid for all created/attached partitions + if 'added' in data['partitions']: + created = [] + attached = [] + for row in data['partitions']['added']: + if row['is_attach']: + status, pscid = self.conn.execute_scalar( + render_template( + "/".join([ + self.table_template_path, 'get_schema_oid.sql' + ]), + tid=row['partition_name'] + ) + ) + if not status: + return internal_server_error(errormsg=pscid) + + attached.append({ + 'oid': row['partition_name'], + 'schema_id': pscid + }) + + else: + tmp_data = dict() + tmp_data['name'] = row['partition_name'] + SQL = render_template( + "/".join([ + self.table_template_path, 'get_oid.sql' + ]), + scid=scid, data=tmp_data + ) + + status, ptid = self.conn.execute_scalar(SQL) + if not status: + return internal_server_error(errormsg=ptid) + + created.append({ + 'oid': ptid, + 'schema_id': scid + }) + + partitions_oid['created'] = created + partitions_oid['attached'] = attached + + return jsonify( + node=self.blueprint.generate_browser_node( + tid, + parent_id, + name, + icon="icon-partition" if ( + 'is_partitioned' in res['rows'][0] and res['rows'][0]['is_partitioned'] + ) or self.node_type == 'partition' else "icon-table", + is_partitioned=True if res['rows'][0]['relkind'] == 'p' else False, + parent_schema_id=scid, + schema_id=rest['rows'][0]['scid'], + schema_name=rest['rows'][0]['nspname'], + affected_partitions=partitions_oid + ) + ) + except Exception as e: + return internal_server_error(errormsg=str(e)) + + def properties(self, gid, sid, did, scid, tid, res): + """ + This function will show the properties of the selected table node. + + Args: + gid: Server Group ID + sid: Server ID + did: Database ID + scid: Schema ID + scid: Schema ID + tid: Table ID + res: Table/Partition table properties + + Returns: + JSON of selected table node + """ + data = res['rows'][0] + + data['vacuum_settings_str'] = "" + + if data['table_vacuum_settings_str'] is not None: + data['vacuum_settings_str'] += data[ + 'table_vacuum_settings_str'].replace(',', '\n') + + if data['toast_table_vacuum_settings_str'] is not None: + data['vacuum_settings_str'] += '\n' + '\n'.join( + ['toast_' + setting for setting in data[ + 'toast_table_vacuum_settings_str' + ].split(',')] + ) + data['vacuum_settings_str'] = data[ + 'vacuum_settings_str' + ].replace("=", " = ") + + data = self._formatter(did, scid, tid, data) + + # Fetch partition of this table if it is partitioned table. + if 'is_partitioned' in data and data['is_partitioned']: + # get the partition type + data['partition_type'] = data['partition_scheme'].split()[0].lower() + + partitions = [] + SQL = render_template("/".join([self.partition_template_path, + 'nodes.sql']), + scid=scid, tid=tid) + status, rset = self.conn.execute_2darray(SQL) + if not status: + return internal_server_error(errormsg=rset) + + for row in rset['rows']: + partition_name = row['name'] + # if schema name is different then display schema + # qualified name on UI. + if data['schema'] != row['schema_name']: + partition_name = row['schema_name'] + '.' + row['name'] + + if data['partition_type'] == 'range': + range_part = row['partition_value'].split( + 'FOR VALUES FROM (')[1].split(') TO') + range_from = range_part[0] + range_to = range_part[1][2:-1] + + partitions.append({ + 'oid': row['oid'], + 'partition_name': partition_name, + 'values_from': range_from, + 'values_to': range_to + }) + else: + range_part = \ + row['partition_value'].split('FOR VALUES IN (')[1] + + range_in = range_part[:-1] + partitions.append({ + 'oid': row['oid'], + 'partition_name': partition_name, + 'values_in': range_in + }) + + data['partitions'] = partitions + + return ajax_response( + response=data, + status=200 + ) + + def get_partitions_sql(self, partitions): + """ + This function will iterate all the partitions and create SQL. + + :param partitions: List of partitions + """ + sql = '' + + for row in partitions['partitions']: + part_data = dict() + part_data['partitioned_table_name'] = partitions['name'] + part_data['parent_schema'] = partitions['schema'] + + if 'is_attach' in row and row['is_attach']: + schema_name, table_name = \ + self.get_schema_and_table_name(row['partition_name']) + + part_data['schema'] = schema_name + part_data['name'] = table_name + else: + part_data['schema'] = partitions['schema'] + part_data['relispartition'] = True + part_data['name'] = row['partition_name'] + + if partitions['partition_type'] == 'range': + range_from = row['values_from'].split(',') + range_to = row['values_to'].split(',') + + from_str = ', '.join("{0}".format(item) for item in range_from) + to_str = ', '.join("{0}".format(item) for item in range_to) + + part_data['partition_value'] = 'FOR VALUES FROM (' + from_str \ + + ') TO (' + to_str + ')' + else: + range_in = row['values_in'].split(',') + in_str = ', '.join("{0}".format(item) for item in range_in) + part_data['partition_value'] = 'FOR VALUES IN (' + in_str + ')' + + if 'is_attach' in row and row['is_attach']: + partition_sql = render_template( + "/".join([self.partition_template_path, 'attach.sql']), + data=part_data, conn=self.conn + ) + else: + partition_sql = render_template( + "/".join([self.partition_template_path, 'create.sql']), + data=part_data, conn=self.conn + ) + + sql += partition_sql + '\n' + + return sql + + def truncate(self, gid, sid, did, scid, tid, res): + """ + This function will truncate the table object + + Args: + gid: Server Group ID + sid: Server ID + did: Database ID + scid: Schema ID + tid: Table ID + """ + # Below will decide if it's simple drop or drop with cascade call + data = request.form if request.form else json.loads( + request.data, encoding='utf-8' + ) + # Convert str 'true' to boolean type + is_cascade = json.loads(data['cascade']) + + data = res['rows'][0] + + SQL = render_template("/".join([self.table_template_path, + 'truncate.sql']), + data=data, cascade=is_cascade) + status, res = self.conn.execute_scalar(SQL) + if not status: + return internal_server_error(errormsg=res) + + return make_json_response( + success=1, + info=gettext("Table truncated"), + data={ + 'id': tid, + 'scid': scid + } + ) + + def get_schema_and_table_name(self, tid): + """ + This function will fetch the schema qualified name of the + given table id. + + :param tid: Table Id. + """ + # Get schema oid + status, scid = self.conn.execute_scalar( + render_template("/".join([self.table_template_path, + 'get_schema_oid.sql']), tid=tid)) + if not status: + return internal_server_error(errormsg=scid) + + # Fetch schema name + status, schema_name = self.conn.execute_scalar( + render_template("/".join([self.table_template_path, + 'get_schema.sql']), conn=self.conn, + scid=scid) + ) + if not status: + return internal_server_error(errormsg=schema_name) + + # Fetch Table name + status, table_name = self.conn.execute_scalar( + render_template( + "/".join([self.table_template_path, 'get_table.sql']), + conn=self.conn, scid=scid, tid=tid + ) + ) + if not status: + return internal_server_error(errormsg=table_name) + + return schema_name, table_name + + @staticmethod + def convert_length_precision_to_string(data): + """ + This function is used to convert length & precision to string + to handle case like when user gives 0 as length + + Args: + data: Data from client + + Returns: + Converted data + """ + if 'attlen' in data and data['attlen'] is not None: + data['attlen'] = str(data['attlen']) + if 'attprecision' in data and data['attprecision'] is not None: + data['attprecision'] = str(data['attprecision']) + return data + + def search_coveringindex(self, tid, cols): + """ + + Args: + tid: Table id + cols: column list + + Returns: + + """ + + cols = set(cols) + SQL = render_template("/".join([self.foreign_key_template_path, + 'get_constraints.sql']), + tid=tid) + status, constraints = self.conn.execute_dict(SQL) + + if not status: + raise Exception(constraints) + + for costrnt in constraints['rows']: + + sql = render_template( + "/".join([self.foreign_key_template_path, 'get_cols.sql']), + cid=costrnt['oid'], + colcnt=costrnt['indnatts']) + status, rest = self.conn.execute_dict(sql) + + if not status: + return internal_server_error(errormsg=rest) + + indexcols = set() + for r in rest['rows']: + indexcols.add(r['column'].strip('"')) + + if len(cols - indexcols) == len(indexcols - cols) == 0: + return costrnt["idxname"] + + return None diff --git a/web/pgadmin/browser/templates/browser/js/browser.js b/web/pgadmin/browser/templates/browser/js/browser.js index 9ca3a16d0..0cccf9c87 100644 --- a/web/pgadmin/browser/templates/browser/js/browser.js +++ b/web/pgadmin/browser/templates/browser/js/browser.js @@ -20,7 +20,7 @@ define( $ = $ || window.jQuery || window.$; Bootstrap = Bootstrap || window.Bootstrap; - pgAdmin.Browser = pgAdmin.Browser || {}; + var pgBrowser = pgAdmin.Browser = pgAdmin.Browser || {}; var panelEvents = {}; panelEvents[wcDocker.EVENT.VISIBILITY_CHANGED] = function() { @@ -1756,6 +1756,201 @@ define( } }, + removeChildTreeNodesById: function(_parentNode, _collType, _childIds) { + var tree = pgBrowser.tree; + if(_parentNode && _collType) { + var children = tree.children(_parentNode), + idx = 0, size = children.length, + childNode, childNodeData; + + _parentNode = null; + + for (; idx < size; idx++) { + childNode = children.eq(idx); + childNodeData = tree.itemData(childNode); + + if (childNodeData._type == _collType) { + _parentNode = childNode; + break; + } + } + } + + if (_parentNode) { + var children = tree.children(_parentNode), + idx = 0, size = children.length, + childNode, childNodeData, + prevChildNode; + + for (; idx < size; idx++) { + childNode = children.eq(idx); + childNodeData = tree.itemData(childNode); + + if (_childIds.indexOf(childNodeData._id) != -1) { + pgBrowser.removeTreeNode(childNode, false, _parentNode); + } + } + return true; + } + return false; + }, + + removeTreeNode: function(_node, _selectNext, _parentNode) { + var tree = pgBrowser.tree, + nodeToSelect = null; + + if (!_node) + return false; + + if (_selectNext) { + nodeToSelect = tree.next(_node); + if (!nodeToSelect || !nodeToSelect.length) { + nodeToSelect = tree.prev(_node); + + if (!nodeToSelect || !nodeToSelect.length) { + if (!_parentNode) { + nodeToSelect = tree.parent(_node); + } else { + nodeToSelect = _parentNode; + } + } + } + if (nodeToSelect) + tree.select(nodeToSelect); + } + tree.remove(_node); + return true; + }, + + findSiblingTreeNode: function(_node, _id) { + var tree = pgBrowser.tree, + parentNode = tree.parent(_node), + siblings = tree.children(parentNode), + idx = 0, nodeData, node; + + for(; idx < siblings.length; idx++) { + node = siblings.eq(idx); + nodeData = tree.itemData(node); + + if (nodeData && nodeData._id == _id) + return node; + } + return null; + }, + + findParentTreeNodeByType: function(_node, _parentType) { + var tree = pgBrowser.tree, + nodeData, + node = _node; + + do { + nodeData = tree.itemData(node); + if (nodeData && nodeData._type == _parentType) + return node; + node = tree.hasParent(node) ? tree.parent(node) : null; + } while (node); + + return null; + }, + + findChildCollectionTreeNode: function(_node, _collType) { + var tree = pgBrowser.tree, + nodeData, idx = 0, + node = _node, + children = _node && tree.children(_node); + + if (!children || !children.length) + return null; + + for(; idx < children.length; idx++) { + node = children.eq(idx); + nodeData = tree.itemData(node); + + if (nodeData && nodeData._type == _collType) + return node; + } + return null; + }, + + addChildTreeNodes: function(_treeHierarchy, _node, _type, _arrayIds, _callback) { + var module = _type in pgBrowser.Nodes && pgBrowser.Nodes[_type], + childTreeInfo = _arrayIds.length && _.extend( + {}, _.mapObject( + _treeHierarchy, function(_val, _key) { + _val.priority -= 1; return _val; + }) + ), + arrayChildNodeData = [], + fetchNodeInfo = function(_callback) { + if (!_arrayIds.length) { + if (_callback) { + _callback(); + } + return; + } + + var childDummyInfo = { + '_id': _arrayIds.pop(), '_type': _type, 'priority': 0 + }, + childNodeUrl; + childTreeInfo[_type] = childDummyInfo; + + childNodeUrl = module.generate_url( + null, 'nodes', childDummyInfo, true, childTreeInfo + ); + console.debug("Fetching node information using: ", childNodeUrl); + + $.ajax({ + url: childNodeUrl, + dataType: "json", + success: function(res) { + if (res.success) { + arrayChildNodeData.push(res.data); + } + fetchNodeInfo(_callback); + }, + error: function(xhr, status, error) { + try { + var err = $.parseJSON(xhr.responseText); + if (err.success == 0) { + var alertifyWrapper = new AlertifyWrapper(); + alertifyWrapper.error(err.errormsg); + } + } catch (e) {} + fetchNodeInfo(_callback); + } + }); + }; + + + if (!module) { + console.warning( + "Developer: Couldn't find the module for the given child: ", + _.clone(arguments) + ); + return; + } + + if (pgBrowser.tree.wasLoad(_node) || pgBrowser.tree.isLeaf(_node)) { + fetchNodeInfo(function() { + console.log('Append this nodes:', arrayChildNodeData); + _.each(arrayChildNodeData, function(_nodData) { + pgBrowser.Events.trigger( + 'pgadmin:browser:tree:add', _nodData, _treeHierarchy + ); + }); + + if (_callback) { + _callback(); + } + }); + } else { + if (_callback) { + _callback(); + } + } + }, + _refreshNode: function(_ctx, _d) { var traverseNodes = function(_d) { var _ctx = this, idx = 0, ctx, d, diff --git a/web/pgadmin/browser/templates/browser/js/node.js b/web/pgadmin/browser/templates/browser/js/node.js index 3273b2bec..d7d321ed7 100644 --- a/web/pgadmin/browser/templates/browser/js/node.js +++ b/web/pgadmin/browser/templates/browser/js/node.js @@ -652,18 +652,7 @@ define([ if (res.success == 0) { pgBrowser.report_error(res.errormsg, res.info); } else { - var n = t.next(i); - if (!n || !n.length) { - n = t.prev(i); - if (!n || !n.length) { - n = t.parent(i); - t.setInode(n, true); - } - } - t.remove(i); - if (n.length) { - t.select(n); - } + pgBrowser.removeTreeNode(i, true); } return true; }, @@ -1320,9 +1309,14 @@ define([ pgBrowser.Events.trigger( 'pgadmin:browser:tree:update', _old, _new, info, { - success: function() { + success: function(_item, _newNodeData, _oldNodeData) { pgBrowser.Events.trigger( - 'pgadmin:browser:node:updated', _new + 'pgadmin:browser:node:updated', _item, _newNodeData, + _oldNodeData + ); + pgBrowser.Events.trigger( + 'pgadmin:browser:node:' + _newNodeData._type + ':updated', + _item, _newNodeData, _oldNodeData ); } } diff --git a/web/pgadmin/tools/backup/templates/backup/js/backup.js b/web/pgadmin/tools/backup/templates/backup/js/backup.js index c43dcd2ba..3b5845597 100644 --- a/web/pgadmin/tools/backup/templates/backup/js/backup.js +++ b/web/pgadmin/tools/backup/templates/backup/js/backup.js @@ -270,7 +270,7 @@ TODO LIST FOR BACKUP: // Define list of nodes on which backup context menu option appears var backup_supported_nodes = [ - 'database', 'schema', 'table' + 'database', 'schema', 'table', 'partition' ]; /** diff --git a/web/pgadmin/tools/datagrid/__init__.py b/web/pgadmin/tools/datagrid/__init__.py index 67f67293e..95c83ddf9 100644 --- a/web/pgadmin/tools/datagrid/__init__.py +++ b/web/pgadmin/tools/datagrid/__init__.py @@ -127,6 +127,10 @@ def initialize_datagrid(cmd_type, obj_type, sid, did, obj_id): return internal_server_error(errormsg=str(msg)) try: + # if object type is partition then it is nothing but a table. + if obj_type == 'partition': + obj_type = 'table' + # Get the object as per the object type command_obj = ObjectRegistry.get_object(obj_type, conn_id=conn_id, sid=sid, did=did, obj_id=obj_id, cmd_type=cmd_type, @@ -201,12 +205,14 @@ def panel(trans_id, is_query_tool, editor_title): else: new_browser_tab = 'false' - return render_template("datagrid/index.html", _=gettext, uniqueId=trans_id, - is_query_tool=is_query_tool, - editor_title=editor_title, script_type_url=sURL, - is_desktop_mode=app.PGADMIN_RUNTIME, - is_linux=is_linux_platform, - is_new_browser_tab=new_browser_tab) + return render_template( + "datagrid/index.html", _=gettext, uniqueId=trans_id, + is_query_tool=is_query_tool, + editor_title=editor_title, script_type_url=sURL, + is_desktop_mode=app.PGADMIN_RUNTIME, + is_linux=is_linux_platform, + is_new_browser_tab=new_browser_tab + ) @blueprint.route( @@ -346,6 +352,8 @@ def validate_filter(sid, did, obj_id): @login_required def script(): """render the required javascript""" - return Response(response=render_template("datagrid/js/datagrid.js", _=gettext), - status=200, - mimetype="application/javascript") + return Response( + response=render_template("datagrid/js/datagrid.js", _=gettext), + status=200, mimetype="application/javascript" + ) + diff --git a/web/pgadmin/tools/datagrid/templates/datagrid/js/datagrid.js b/web/pgadmin/tools/datagrid/templates/datagrid/js/datagrid.js index eb86a1233..cda5c5300 100644 --- a/web/pgadmin/tools/datagrid/templates/datagrid/js/datagrid.js +++ b/web/pgadmin/tools/datagrid/templates/datagrid/js/datagrid.js @@ -29,7 +29,7 @@ define([ // Define list of nodes on which view data option appears var supported_nodes = [ 'table', 'view', 'mview', - 'foreign-table', 'catalog_object' + 'foreign-table', 'catalog_object', 'partition' ], /* Enable/disable View data menu in tools based diff --git a/web/pgadmin/tools/maintenance/templates/maintenance/js/maintenance.js b/web/pgadmin/tools/maintenance/templates/maintenance/js/maintenance.js index 5ab3c44bd..0e51fd9a2 100644 --- a/web/pgadmin/tools/maintenance/templates/maintenance/js/maintenance.js +++ b/web/pgadmin/tools/maintenance/templates/maintenance/js/maintenance.js @@ -150,7 +150,7 @@ define([ var maintenance_supported_nodes = [ 'database', 'table', 'primary_key', - 'unique_constraint', 'index' + 'unique_constraint', 'index', 'partition' ]; /** @@ -180,7 +180,7 @@ define([ var menus = [{ name: 'maintenance', module: this, - applies: ['tools'], callback: 'callback_maintenace', + applies: ['tools'], callback: 'callback_maintenance', priority: 10, label: gettext('Maintenance...'), icon: 'fa fa-wrench', enable: menu_enabled }]; @@ -190,7 +190,7 @@ define([ menus.push({ name: 'maintenance_context_' + maintenance_supported_nodes[idx], node: maintenance_supported_nodes[idx], module: this, - applies: ['context'], callback: 'callback_maintenace', + applies: ['context'], callback: 'callback_maintenance', priority: 10, label: gettext('Maintenance...'), icon: 'fa fa-wrench', enable: menu_enabled }); @@ -201,7 +201,7 @@ define([ /* Open the dialog for the maintenance functionality */ - callback_maintenace: function(args, item) { + callback_maintenance: function(args, item) { var i = item || pgBrowser.tree.selected(), server_data = null; @@ -320,7 +320,10 @@ define([ if (treeInfo.schema != undefined) { schema = treeInfo.schema._label; } - if (treeInfo.table != undefined) { + + if (treeInfo.partition != undefined) { + table = treeInfo.partition._label; + } else if (treeInfo.table != undefined) { table = treeInfo.table._label; } diff --git a/web/pgadmin/tools/restore/templates/restore/js/restore.js b/web/pgadmin/tools/restore/templates/restore/js/restore.js index c23d9e368..0da0004d8 100644 --- a/web/pgadmin/tools/restore/templates/restore/js/restore.js +++ b/web/pgadmin/tools/restore/templates/restore/js/restore.js @@ -228,7 +228,8 @@ define([ var restore_supported_nodes = [ 'database', 'schema', 'table', 'function', - 'trigger', 'index' + 'trigger', 'index', + 'partition' ]; /** diff --git a/web/pgadmin/utils/__init__.py b/web/pgadmin/utils/__init__.py index 550bad2e7..701a38c31 100644 --- a/web/pgadmin/utils/__init__.py +++ b/web/pgadmin/utils/__init__.py @@ -29,6 +29,7 @@ class PgAdminModule(Blueprint): kwargs.setdefault('template_folder', 'templates') kwargs.setdefault('static_folder', 'static') self.submodules = [] + self.parentmodules = [] super(PgAdminModule, self).__init__(name, import_name, **kwargs) @@ -59,6 +60,8 @@ class PgAdminModule(Blueprint): super(PgAdminModule, self).register(app, options, first_registration) for module in self.submodules: + if first_registration: + module.parentmodules.append(self) app.register_blueprint(module) def get_own_stylesheets(self): diff --git a/web/pgadmin/utils/exception.py b/web/pgadmin/utils/exception.py index 57c32244b..77d59da0f 100644 --- a/web/pgadmin/utils/exception.py +++ b/web/pgadmin/utils/exception.py @@ -40,3 +40,11 @@ class ConnectionLost(HTTPException): 'conn_id': self.conn_id } ) + + def __str__(self): + return "Connection (id #{2}) lost for the server (#{0}) on " \ + "database ({1})".format(self.sid, self.db, self.conn_id) + + def __repr__(self): + return "Connection (id #{2}) lost for the server (#{0}) on " \ + "database ({1})".format(self.sid, self.db, self.conn_id)