diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/static/js/schema.js b/web/pgadmin/browser/server_groups/servers/databases/schemas/static/js/schema.js
index 0e9099745..ef496e73e 100644
--- a/web/pgadmin/browser/server_groups/servers/databases/schemas/static/js/schema.js
+++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/static/js/schema.js
@@ -145,6 +145,16 @@ define('pgadmin.node.schema', [
group: gettext('Table'), mode: ['edit', 'create'],
type: 'switch',
disabled: function(m) {
+ // If table is partitioned table then disabled it.
+ if (m.top && m.top.get('is_partitioned')) {
+ // We also need to unset rest of all
+ setTimeout(function() {
+ m.set('autovacuum_custom', false);
+ }, 10);
+
+ return true;
+ }
+
if(!m.top.inSchema.apply(this, [m])) {
return false;
}
@@ -459,6 +469,32 @@ define('pgadmin.node.schema', [
return true;
}
});
+
+ pgBrowser.tableChildTreeNodeHierarchy = function(i) {
+ var idx = 0,
+ res = {},
+ t = pgBrowser.tree;
+
+ do {
+ d = t.itemData(i);
+ if (
+ d._type in pgBrowser.Nodes && pgBrowser.Nodes[d._type].hasId
+ ) {
+ if (d._type === 'partition' || d._type === 'table') {
+ if (!('table' in res)) {
+ res['table'] = _.extend({}, d, {'priority': idx});
+ idx -= 1;
+ }
+ } else {
+ res[d._type] = _.extend({}, d, {'priority': idx});
+ idx -= 1;
+ }
+ }
+ i = t.hasParent(i) ? t.parent(i) : null;
+ } while (i);
+
+ return res;
+ };
}
// Switch Cell with Deps (specifically for table children)
diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/__init__.py b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/__init__.py
index f6fa65ef2..17d97075a 100644
--- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/__init__.py
+++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/__init__.py
@@ -11,22 +11,16 @@
import simplejson as json
import re
-from functools import wraps
import pgadmin.browser.server_groups.servers.databases as database
-from flask import render_template, request, jsonify
+from flask import render_template, request, jsonify, url_for
from flask_babel import gettext
from pgadmin.browser.server_groups.servers.databases.schemas.utils \
- import SchemaChildModule, DataTypeReader, VacuumSettings, \
- trigger_definition, parse_rule_definition
-from pgadmin.browser.server_groups.servers.utils import parse_priv_from_db, \
- parse_priv_to_db
-from pgadmin.browser.utils import PGChildNodeView
+ import SchemaChildModule, DataTypeReader, VacuumSettings
+from pgadmin.browser.server_groups.servers.utils import parse_priv_to_db
from pgadmin.utils.ajax import make_json_response, internal_server_error, \
make_response as ajax_response, gone
-from pgadmin.utils.driver import get_driver
-
-from config import PG_DEFAULT_DRIVER
+from .utils import BaseTableView
class TableModule(SchemaChildModule):
@@ -79,11 +73,22 @@ class TableModule(SchemaChildModule):
"""
return database.DatabaseModule.NODE_TYPE
+ def get_own_javascripts(self):
+ scripts = SchemaChildModule.get_own_javascripts(self)
+
+ scripts.append({
+ 'name': 'pgadmin.browser.table.partition.utils',
+ 'path': url_for('browser.index') + 'table/static/js/partition.utils',
+ 'when': 'database', 'is_template': False
+ })
+
+ return scripts
+
blueprint = TableModule(__name__)
-class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
+class TableView(BaseTableView, DataTypeReader, VacuumSettings):
"""
This class is responsible for generating routes for Table node
@@ -96,11 +101,6 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
- This property defines (if javascript) exists for this node.
Override this property for your own logic
- * check_precondition()
- - This function will behave as a decorator which will checks
- database connection before running view, it will also attaches
- manager,conn & template_path properties to self
-
* list()
- This function is used to list all the Table nodes within that
collection.
@@ -149,10 +149,6 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
- This function will generate dependent list to show it in dependent
pane for the selected node.
- * _formatter(data, tid)
- - It will return formatted output of query result
- as per client model format
-
* get_types(self, gid, sid, did, scid)
- This function will return list of types available for columns node
via AJAX response
@@ -178,19 +174,6 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
* get_toast_table_vacuum(gid, sid, did, scid=None, tid=None)
- Fetch the default values for toast table auto-vacuum
- * _columns_formatter(tid, data):
- - It will return formatted output of query result
- as per client model format for column node
-
- * _index_constraints_formatter(self, did, tid, data):
- - It will return formatted output of query result
- as per client model format for index constraint node
-
- * _cltype_formatter(type): (staticmethod)
- - We need to remove [] from type and append it
- after length/precision so we will send flag for
- sql template
-
* _parse_format_columns(self, data, mode=None):
- This function will parse and return formatted list of columns
added by user
@@ -252,71 +235,16 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
'get_access_methods': [{}, {'get': 'get_access_methods'}],
'get_oper_class': [{}, {'get': 'get_oper_class'}],
'get_operator': [{}, {'get': 'get_operator'}],
+ 'get_attach_tables': [
+ {'get': 'get_attach_tables'},
+ {'get': 'get_attach_tables'}],
'select_sql': [{'get': 'select_sql'}],
'insert_sql': [{'get': 'insert_sql'}],
'update_sql': [{'get': 'update_sql'}],
'delete_sql': [{'get': 'delete_sql'}]
-
})
- def check_precondition(f):
- """
- This function will behave as a decorator which will checks
- database connection before running view, it will also attaches
- manager,conn & template_path properties to self
- """
-
- @wraps(f)
- def wrap(*args, **kwargs):
- # Here args[0] will hold self & kwargs will hold gid,sid,did
- self = args[0]
- driver = get_driver(PG_DEFAULT_DRIVER)
- did = kwargs['did']
- self.manager = driver.connection_manager(kwargs['sid'])
- self.conn = self.manager.connection(did=kwargs['did'])
- self.qtIdent = driver.qtIdent
- self.qtTypeIdent = driver.qtTypeIdent
- # We need datlastsysoid to check if current table is system table
- self.datlastsysoid = self.manager.db_info[
- did
- ]['datlastsysoid'] if self.manager.db_info is not None and \
- did in self.manager.db_info else 0
-
- ver = self.manager.version
- # Set the template path for the SQL scripts
- self.template_path = 'table/sql/#{0}#'.format(ver)
-
- # Template for Column ,check constraint and exclusion constraint node
- self.column_template_path = 'column/sql/#{0}#'.format(ver)
- self.check_constraint_template_path = 'check_constraint/sql/#{0}#'.format(ver)
- self.exclusion_constraint_template_path = 'exclusion_constraint/sql/#{0}#'.format(ver)
-
- # Template for PK & Unique constraint node
- self.index_constraint_template_path = 'index_constraint/sql'
-
- # Template for foreign key constraint node
- self.foreign_key_template_path = 'foreign_key/sql/#{0}#'.format(ver)
-
- # Template for index node
- self.index_template_path = 'index/sql/#{0}#'.format(ver)
-
- # Template for trigger node
- self.trigger_template_path = 'trigger/sql/#{0}#'.format(ver)
-
- # Template for rules node
- self.rules_template_path = 'rules/sql'
-
- # Supported ACL for table
- self.acl = ['a', 'r', 'w', 'd', 'D', 'x', 't']
-
- # Supported ACL for columns
- self.column_acl = ['a', 'r', 'w', 'x']
-
- return f(*args, **kwargs)
-
- return wrap
-
- @check_precondition
+ @BaseTableView.check_precondition
def list(self, gid, sid, did, scid):
"""
This function is used to list all the table nodes within that collection.
@@ -330,10 +258,11 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
Returns:
JSON of available table nodes
"""
- SQL = render_template("/".join([self.template_path,
- 'properties.sql']),
- did=did, scid=scid,
- datlastsysoid=self.datlastsysoid)
+ SQL = render_template(
+ "/".join([self.table_template_path, 'properties.sql']),
+ did=did, scid=scid,
+ datlastsysoid=self.datlastsysoid
+ )
status, res = self.conn.execute_dict(SQL)
if not status:
@@ -343,7 +272,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
status=200
)
- @check_precondition
+ @BaseTableView.check_precondition
def node(self, gid, sid, did, scid, tid):
"""
This function is used to list all the table nodes within that collection.
@@ -359,9 +288,10 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
JSON of available table nodes
"""
res = []
- SQL = render_template("/".join([self.template_path,
- 'nodes.sql']),
- scid=scid, tid=tid)
+ SQL = render_template(
+ "/".join([self.table_template_path, 'nodes.sql']),
+ scid=scid, tid=tid
+ )
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
@@ -372,9 +302,10 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
rset['rows'][0]['oid'],
scid,
rset['rows'][0]['name'],
- icon="icon-table",
+ icon="icon-partition" if 'is_partitioned' in rset['rows'][0] and rset['rows'][0]['is_partitioned'] else "icon-table",
tigger_count=rset['rows'][0]['triggercount'],
- has_enable_triggers=rset['rows'][0]['has_enable_triggers']
+ has_enable_triggers=rset['rows'][0]['has_enable_triggers'],
+ is_partitioned=rset['rows'][0]['is_partitioned'] if 'is_partitioned' in rset['rows'][0] else False
)
return make_json_response(
@@ -382,8 +313,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
status=200
)
-
- @check_precondition
+ @BaseTableView.check_precondition
def nodes(self, gid, sid, did, scid):
"""
This function is used to list all the table nodes within that collection.
@@ -398,9 +328,10 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
JSON of available table nodes
"""
res = []
- SQL = render_template("/".join([self.template_path,
- 'nodes.sql']),
- scid=scid)
+ SQL = render_template(
+ "/".join([self.table_template_path, 'nodes.sql']),
+ scid=scid
+ )
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
@@ -411,9 +342,10 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
row['oid'],
scid,
row['name'],
- icon="icon-table",
+ icon="icon-partition" if 'is_partitioned' in row and row['is_partitioned'] else "icon-table",
tigger_count=row['triggercount'],
- has_enable_triggers=row['has_enable_triggers']
+ has_enable_triggers=row['has_enable_triggers'],
+ is_partitioned=row['is_partitioned'] if 'is_partitioned' in row else False
))
return make_json_response(
@@ -421,7 +353,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
status=200
)
- @check_precondition
+ @BaseTableView.check_precondition
def get_all_tables(self, gid, sid, did, scid, tid=None):
"""
Args:
@@ -435,9 +367,12 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
Returns the lits of tables required for constraints.
"""
try:
- SQL = render_template("/".join([self.template_path,
- 'get_tables_for_constraints.sql']),
- show_sysobj=self.blueprint.show_system_objects)
+ SQL = render_template(
+ "/".join([
+ self.table_template_path, 'get_tables_for_constraints.sql'
+ ]),
+ show_sysobj=self.blueprint.show_system_objects
+ )
status, res = self.conn.execute_dict(SQL)
@@ -452,7 +387,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
except Exception as e:
return internal_server_error(errormsg=str(e))
- @check_precondition
+ @BaseTableView.check_precondition
def get_table_vacuum(self, gid, sid, did, scid=None, tid=None):
"""
Fetch the default values for table auto-vacuum
@@ -468,7 +403,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
status=200
)
- @check_precondition
+ @BaseTableView.check_precondition
def get_toast_table_vacuum(self, gid, sid, did, scid=None, tid=None):
"""
Fetch the default values for toast table auto-vacuum
@@ -484,7 +419,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
status=200
)
- @check_precondition
+ @BaseTableView.check_precondition
def get_access_methods(self, gid, sid, did, scid, tid=None):
"""
This function returns access methods.
@@ -501,9 +436,10 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
"""
res = [{'label': '', 'value': ''}]
- sql = render_template(
- "/".join([self.exclusion_constraint_template_path,
- 'get_access_methods.sql']))
+ sql = render_template("/".join([
+ self.exclusion_constraint_template_path, 'get_access_methods.sql'
+ ]))
+
status, rest = self.conn.execute_2darray(sql)
if not status:
@@ -518,7 +454,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
status=200
)
- @check_precondition
+ @BaseTableView.check_precondition
def get_oper_class(self, gid, sid, did, scid, tid=None):
"""
@@ -537,9 +473,12 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
try:
if data and 'indextype' in data:
SQL = render_template(
- "/".join([self.exclusion_constraint_template_path,
- 'get_oper_class.sql']),
- indextype=data['indextype'])
+ "/".join([
+ self.exclusion_constraint_template_path,
+ 'get_oper_class.sql'
+ ]),
+ indextype=data['indextype']
+ )
status, res = self.conn.execute_2darray(SQL)
@@ -555,7 +494,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
except Exception as e:
return internal_server_error(errormsg=str(e))
- @check_precondition
+ @BaseTableView.check_precondition
def get_operator(self, gid, sid, did, scid, tid=None):
"""
@@ -574,10 +513,13 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
try:
if data and 'col_type' in data:
SQL = render_template(
- "/".join([self.exclusion_constraint_template_path,
- 'get_operator.sql']),
+ "/".join([
+ self.exclusion_constraint_template_path,
+ 'get_operator.sql'
+ ]),
type=data['col_type'],
- show_sysobj=self.blueprint.show_system_objects)
+ show_sysobj=self.blueprint.show_system_objects
+ )
status, res = self.conn.execute_2darray(SQL)
@@ -593,506 +535,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
except Exception as e:
return internal_server_error(errormsg=str(e))
- def _columns_formatter(self, tid, data):
- """
- Args:
- tid: Table OID
- data: dict of query result
-
- Returns:
- It will return formatted output of query result
- as per client model format for column node
- """
- for column in data['columns']:
-
- # We need to format variables according to client js collection
- if 'attoptions' in column and column['attoptions'] is not None:
- spcoptions = []
- for spcoption in column['attoptions']:
- k, v = spcoption.split('=')
- spcoptions.append({'name': k, 'value': v})
-
- column['attoptions'] = spcoptions
-
- # Need to format security labels according to client js collection
- if 'seclabels' in column and column['seclabels'] is not None:
- seclabels = []
- for seclbls in column['seclabels']:
- k, v = seclbls.split('=')
- seclabels.append({'provider': k, 'label': v})
-
- column['seclabels'] = seclabels
-
- if 'attnum' in column and column['attnum'] is not None and \
- column['attnum'] > 0:
- # We need to parse & convert ACL coming from database to json format
- SQL = render_template("/".join([self.column_template_path, 'acl.sql']),
- tid=tid, clid=column['attnum'])
- status, acl = self.conn.execute_dict(SQL)
-
- if not status:
- return internal_server_error(errormsg=acl)
-
- # We will set get privileges from acl sql so we don't need
- # it from properties sql
- column['attacl'] = []
-
- for row in acl['rows']:
- priv = parse_priv_from_db(row)
- column.setdefault(row['deftype'], []).append(priv)
-
- # we are receiving request when in edit mode
- # we will send filtered types related to current type
- present_type = column['cltype']
-
- type_id = column['atttypid']
-
- fulltype = self.get_full_type(
- column['typnspname'], column['typname'],
- column['isdup'], column['attndims'], column['atttypmod']
- )
-
- length = False
- precision = False
- if 'elemoid' in column:
- length, precision, typeval = self.get_length_precision(column['elemoid'])
-
- # Set length and precision to None
- column['attlen'] = None
- column['attprecision'] = None
-
- # If we have length & precision both
- if length and precision:
- matchObj = re.search(r'(\d+),(\d+)', fulltype)
- if matchObj:
- column['attlen'] = matchObj.group(1)
- column['attprecision'] = matchObj.group(2)
- elif length:
- # If we have length only
- matchObj = re.search(r'(\d+)', fulltype)
- if matchObj:
- column['attlen'] = matchObj.group(1)
- column['attprecision'] = None
-
-
- SQL = render_template("/".join([self.column_template_path,
- 'is_referenced.sql']),
- tid=tid, clid=column['attnum'])
-
- status, is_reference = self.conn.execute_scalar(SQL)
-
- edit_types_list = list()
- # We will need present type in edit mode
-
- if column['typnspname'] == "pg_catalog" or column['typnspname'] == "public":
- edit_types_list.append(present_type)
- else:
- t = self.qtTypeIdent(self.conn, column['typnspname'], present_type)
- edit_types_list.append(t)
- column['cltype'] = t
-
- if int(is_reference) == 0:
- SQL = render_template("/".join([self.column_template_path,
- 'edit_mode_types.sql']),
- type_id=type_id)
- status, rset = self.conn.execute_2darray(SQL)
-
- for row in rset['rows']:
- edit_types_list.append(row['typname'])
- else:
- edit_types_list.append(present_type)
-
- column['edit_types'] = edit_types_list
- column['cltype'] = DataTypeReader.parse_type_name(column['cltype'])
-
- if 'indkey' in column:
- # Current column
- attnum = str(column['attnum'])
-
- # Single/List of primary key column(s)
- indkey = str(column['indkey'])
-
- # We will check if column is in primary column(s)
- if attnum in indkey.split(" "):
- column['is_primary_key'] = True
- else:
- column['is_primary_key'] = False
-
- return data
-
- def _index_constraints_formatter(self, did, tid, data):
- """
- Args:
- tid: Table OID
- data: dict of query result
-
- Returns:
- It will return formatted output of query result
- as per client model format for index constraint node
- """
-
- # We will fetch all the index constraints for the table
- index_constraints = {
- 'p': 'primary_key', 'u': 'unique_constraint'
- }
-
- for ctype in index_constraints.keys():
- data[index_constraints[ctype]] = []
-
- sql = render_template("/".join([self.index_constraint_template_path,
- 'properties.sql']),
- did=did, tid=tid,
- constraint_type=ctype)
- status, res = self.conn.execute_dict(sql)
-
- if not status:
- return internal_server_error(errormsg=res)
-
- for row in res['rows']:
- result = row
- sql = render_template(
- "/".join([self.index_constraint_template_path,
- 'get_constraint_cols.sql']),
- cid=row['oid'],
- colcnt=row['indnatts'])
- status, res = self.conn.execute_dict(sql)
-
- if not status:
- return internal_server_error(errormsg=res)
-
- columns = []
- for r in res['rows']:
- columns.append({"column": r['column'].strip('"')})
-
- result['columns'] = columns
-
- # If not exists then create list and/or append into
- # existing list [ Adding into main data dict]
- data.setdefault(index_constraints[ctype], []).append(result)
-
- return data
-
- def _foreign_key_formatter(self, tid, data):
- """
- Args:
- tid: Table OID
- data: dict of query result
-
- Returns:
- It will return formatted output of query result
- as per client model format for foreign key constraint node
- """
-
- # We will fetch all the index constraints for the table
- sql = render_template("/".join([self.foreign_key_template_path,
- 'properties.sql']),
- tid=tid)
-
- status, result = self.conn.execute_dict(sql)
-
- if not status:
- return internal_server_error(errormsg=result)
-
- for fk in result['rows']:
-
- sql = render_template("/".join([self.foreign_key_template_path,
- 'get_constraint_cols.sql']),
- tid=tid,
- keys=zip(fk['confkey'], fk['conkey']),
- confrelid=fk['confrelid'])
-
- status, res = self.conn.execute_dict(sql)
-
- if not status:
- return internal_server_error(errormsg=res)
-
- columns = []
- cols = []
- for row in res['rows']:
- columns.append({"local_column": row['conattname'],
- "references": fk['confrelid'],
- "referenced": row['confattname']})
- cols.append(row['conattname'])
-
- fk['columns'] = columns
-
- SQL = render_template("/".join([self.foreign_key_template_path,
- 'get_parent.sql']),
- tid=fk['columns'][0]['references'])
-
- status, rset = self.conn.execute_2darray(SQL)
- if not status:
- return internal_server_error(errormsg=rset)
-
- fk['remote_schema'] = rset['rows'][0]['schema']
- fk['remote_table'] = rset['rows'][0]['table']
-
- coveringindex = self.search_coveringindex(tid, cols)
-
- fk['coveringindex'] = coveringindex
- if coveringindex:
- fk['autoindex'] = True
- fk['hasindex'] = True
- else:
- fk['autoindex'] = False
- fk['hasindex'] = False
- # If not exists then create list and/or append into
- # existing list [ Adding into main data dict]
- data.setdefault('foreign_key', []).append(fk)
-
- return data
-
- def _check_constraint_formatter(self, tid, data):
- """
- Args:
- tid: Table OID
- data: dict of query result
-
- Returns:
- It will return formatted output of query result
- as per client model format for check constraint node
- """
-
- # We will fetch all the index constraints for the table
- SQL = render_template("/".join([self.check_constraint_template_path,
- 'properties.sql']),
- tid=tid)
-
- status, res = self.conn.execute_dict(SQL)
-
- if not status:
- return internal_server_error(errormsg=res)
- # If not exists then create list and/or append into
- # existing list [ Adding into main data dict]
-
- data['check_constraint'] = res['rows']
-
- return data
-
- def _exclusion_constraint_formatter(self, did, tid, data):
- """
- Args:
- tid: Table OID
- data: dict of query result
-
- Returns:
- It will return formatted output of query result
- as per client model format for exclusion constraint node
- """
-
- # We will fetch all the index constraints for the table
- sql = render_template("/".join([self.exclusion_constraint_template_path,
- 'properties.sql']),
- did=did, tid=tid)
-
- status, result = self.conn.execute_dict(sql)
-
- if not status:
- return internal_server_error(errormsg=result)
-
- for ex in result['rows']:
-
- sql = render_template("/".join([self.exclusion_constraint_template_path,
- 'get_constraint_cols.sql']),
- cid=ex['oid'],
- colcnt=ex['indnatts'])
-
- status, res = self.conn.execute_dict(sql)
-
- if not status:
- return internal_server_error(errormsg=res)
-
- columns = []
- for row in res['rows']:
- if row['options'] & 1:
- order = False
- nulls_order = True if (row['options'] & 2) else False
- else:
- order = True
- nulls_order = True if (row['options'] & 2) else False
-
- columns.append({"column": row['coldef'].strip('"'),
- "oper_class": row['opcname'],
- "order": order,
- "nulls_order": nulls_order,
- "operator": row['oprname'],
- "col_type": row['datatype']
- })
-
- ex['columns'] = columns
- # If not exists then create list and/or append into
- # existing list [ Adding into main data dict]
- data.setdefault('exclude_constraint', []).append(ex)
-
- return data
-
- def search_coveringindex(self, tid, cols):
- """
-
- Args:
- tid: Table id
- cols: column list
-
- Returns:
-
- """
-
- cols = set(cols)
- SQL = render_template("/".join([self.foreign_key_template_path,
- 'get_constraints.sql']),
- tid=tid)
- status, constraints = self.conn.execute_dict(SQL)
-
- if not status:
- raise Exception(constraints)
-
- for costrnt in constraints['rows']:
-
- sql = render_template(
- "/".join([self.foreign_key_template_path, 'get_cols.sql']),
- cid=costrnt['oid'],
- colcnt=costrnt['indnatts'])
- status, rest = self.conn.execute_dict(sql)
-
- if not status:
- return internal_server_error(errormsg=rest)
-
- indexcols = set()
- for r in rest['rows']:
- indexcols.add(r['column'].strip('"'))
-
- if len(cols - indexcols) == len(indexcols - cols) == 0:
- return costrnt["idxname"]
-
- return None
-
- def _formatter(self, did, scid, tid, data):
- """
- Args:
- data: dict of query result
- scid: schema oid
- tid: table oid
-
- Returns:
- It will return formatted output of query result
- as per client model format
- """
- # Need to format security labels according to client js collection
- if 'seclabels' in data and data['seclabels'] is not None:
- seclabels = []
- for seclbls in data['seclabels']:
- k, v = seclbls.split('=')
- seclabels.append({'provider': k, 'label': v})
-
- data['seclabels'] = seclabels
-
- # We need to parse & convert ACL coming from database to json format
- SQL = render_template("/".join([self.template_path, 'acl.sql']),
- tid=tid, scid=scid)
- status, acl = self.conn.execute_dict(SQL)
- if not status:
- return internal_server_error(errormsg=acl)
-
- # We will set get privileges from acl sql so we don't need
- # it from properties sql
- for row in acl['rows']:
- priv = parse_priv_from_db(row)
- if row['deftype'] in data:
- data[row['deftype']].append(priv)
- else:
- data[row['deftype']] = [priv]
-
- # We will add Auto vacuum defaults with out result for grid
- data['vacuum_table'] = self.parse_vacuum_data(self.conn, data, 'table')
- data['vacuum_toast'] = self.parse_vacuum_data(self.conn, data, 'toast')
-
- # Fetch columns for the table logic
- #
- # 1) Check if of_type and inherited tables are present?
- # 2) If yes then Fetch all the columns for of_type and inherited tables
- # 3) Add columns in columns collection
- # 4) Find all the columns for tables and filter out columns which are
- # not inherited from any table & format them one by one
-
- # Get of_type table columns and add it into columns dict
- if data['typname']:
- SQL = render_template("/".join([self.template_path,
- 'get_columns_for_table.sql']),
- tname=data['typname'])
-
- status, res = self.conn.execute_dict(SQL)
- if not status:
- return internal_server_error(errormsg=res)
- data['columns'] = res['rows']
-
- # Get inherited table(s) columns and add it into columns dict
- elif data['coll_inherits'] and len(data['coll_inherits']) > 0:
- columns = []
- # Return all tables which can be inherited & do not show
- # system columns
- SQL = render_template("/".join([self.template_path, 'get_inherits.sql']),
- show_system_objects=False
- )
- status, rset = self.conn.execute_2darray(SQL)
- if not status:
- return internal_server_error(errormsg=rset)
-
- for row in rset['rows']:
- if row['inherits'] in data['coll_inherits']:
- # Fetch columns using inherited table OID
- SQL = render_template("/".join([self.template_path,
- 'get_columns_for_table.sql']),
- tid=row['oid'])
- status, res = self.conn.execute_dict(SQL)
- if not status:
- return internal_server_error(errormsg=res)
- columns.extend(res['rows'][:])
- data['columns'] = columns
-
- # We will fetch all the columns for the table using
- # columns properties.sql, so we need to set template path
- SQL = render_template("/".join([self.column_template_path,
- 'properties.sql']),
- tid=tid,
- show_sys_objects=False
- )
-
- status, res = self.conn.execute_dict(SQL)
- if not status:
- return internal_server_error(errormsg=res)
- all_columns = res['rows']
-
- # Filter inherited columns from all columns
- if 'columns' in data and len(data['columns']) > 0 \
- and len(all_columns) > 0:
- for row in data['columns']:
- for i, col in enumerate(all_columns):
- # If both name are same then remove it
- # as it is inherited from other table
- if col['name'] == row['name']:
- # Remove same column from all_columns as
- # already have it columns collection
- del all_columns[i]
-
- # If any column is added then update columns collection
- if len(all_columns) > 0:
- data['columns'] += all_columns
- # If no inherited columns found then add all columns
- elif len(all_columns) > 0:
- data['columns'] = all_columns
-
- if 'columns' in data and len(data['columns']) > 0:
- data = self._columns_formatter(tid, data)
-
- # Here we will add constraint in our output
- data = self._index_constraints_formatter(did, tid, data)
- data = self._foreign_key_formatter(tid, data)
- data = self._check_constraint_formatter(tid, data)
- data = self._exclusion_constraint_formatter(did, tid, data)
-
- return data
-
- @check_precondition
+ @BaseTableView.check_precondition
def properties(self, gid, sid, did, scid, tid):
"""
This function will show the properties of the selected table node.
@@ -1109,10 +552,11 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
JSON of selected table node
"""
- SQL = render_template("/".join([self.template_path,
- 'properties.sql']),
- did=did, scid=scid, tid=tid,
- datlastsysoid=self.datlastsysoid)
+ SQL = render_template(
+ "/".join([self.table_template_path, 'properties.sql']),
+ did=did, scid=scid, tid=tid,
+ datlastsysoid=self.datlastsysoid
+ )
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
@@ -1120,41 +564,22 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
if len(res['rows']) == 0:
return gone(gettext("The specified table could not be found."))
- data = res['rows'][0]
+ return super(TableView, self).properties(
+ gid, sid, did, scid, tid, res)
- data['vacuum_settings_str'] = ""
-
- if data['table_vacuum_settings_str'] is not None:
- data['vacuum_settings_str'] += data[
- 'table_vacuum_settings_str'].replace(',', '\n')
-
- if data['toast_table_vacuum_settings_str'] is not None:
- data['vacuum_settings_str'] += '\n' + '\n'.join(
- ['toast_' + setting for setting in data[
- 'toast_table_vacuum_settings_str'
- ].split(',')]
- )
- data['vacuum_settings_str'] = data[
- 'vacuum_settings_str'
- ].replace("=", " = ")
-
- data = self._formatter(did, scid, tid, data)
-
- return ajax_response(
- response=data,
- status=200
- )
-
- @check_precondition
+ @BaseTableView.check_precondition
def types(self, gid, sid, did, scid, tid=None, clid=None):
"""
Returns:
This function will return list of types available for column node
for node-ajax-control
"""
- condition = render_template("/".join([self.template_path,
- 'get_types_where_condition.sql']),
- show_system_objects=self.blueprint.show_system_objects)
+ condition = render_template(
+ "/".join([
+ self.table_template_path, 'get_types_where_condition.sql'
+ ]),
+ show_system_objects=self.blueprint.show_system_objects
+ )
status, types = self.get_types(self.conn, condition, True)
@@ -1166,7 +591,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
status=200
)
- @check_precondition
+ @BaseTableView.check_precondition
def get_columns(self, gid, sid, did, scid, tid=None):
"""
Returns the Table Columns.
@@ -1189,13 +614,19 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
data = request.args if request.args else None
try:
if data and 'tid' in data:
- SQL = render_template("/".join([self.template_path,
- 'get_columns_for_table.sql']),
- tid=data['tid'])
+ SQL = render_template(
+ "/".join([
+ self.table_template_path, 'get_columns_for_table.sql'
+ ]),
+ tid=data['tid']
+ )
elif data and 'tname' in data:
- SQL = render_template("/".join([self.template_path,
- 'get_columns_for_table.sql']),
- tname=data['tname'])
+ SQL = render_template(
+ "/".join([
+ self.table_template_path, 'get_columns_for_table.sql'
+ ]),
+ tname=data['tname']
+ )
if SQL:
status, res = self.conn.execute_dict(SQL)
@@ -1211,7 +642,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
except Exception as e:
return internal_server_error(errormsg=str(e))
- @check_precondition
+ @BaseTableView.check_precondition
def get_oftype(self, gid, sid, did, scid, tid=None):
"""
Returns:
@@ -1220,10 +651,12 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
"""
res = [{'label': '', 'value': ''}]
try:
- SQL = render_template("/".join([self.template_path,
- 'get_oftype.sql']), scid=scid,
- server_type=self.manager.server_type,
- show_sys_objects=self.blueprint.show_system_objects)
+ SQL = render_template(
+ "/".join([self.table_template_path, 'get_oftype.sql']),
+ scid=scid,
+ server_type=self.manager.server_type,
+ show_sys_objects=self.blueprint.show_system_objects
+ )
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=res)
@@ -1241,7 +674,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
except Exception as e:
return internal_server_error(errormsg=str(e))
- @check_precondition
+ @BaseTableView.check_precondition
def get_inherits(self, gid, sid, did, scid, tid=None):
"""
Returns:
@@ -1250,10 +683,12 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
"""
try:
res = []
- SQL = render_template("/".join([self.template_path, 'get_inherits.sql']),
- show_system_objects=self.blueprint.show_system_objects,
- tid=tid,
- server_type=self.manager.server_type)
+ SQL = render_template(
+ "/".join([self.table_template_path, 'get_inherits.sql']),
+ show_system_objects=self.blueprint.show_system_objects,
+ tid=tid,
+ server_type=self.manager.server_type
+ )
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=res)
@@ -1271,7 +706,40 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
except Exception as e:
return internal_server_error(errormsg=str(e))
- @check_precondition
+ @BaseTableView.check_precondition
+ def get_attach_tables(self, gid, sid, did, scid, tid=None):
+ """
+ Returns:
+ This function will return list of tables available to be attached
+ to the partitioned table.
+ """
+ try:
+ res = []
+ SQL = render_template(
+ "/".join([
+ self.partition_template_path, 'get_attach_tables.sql'
+ ]),
+ tid=tid
+ )
+
+ status, rset = self.conn.execute_2darray(SQL)
+ if not status:
+ return internal_server_error(errormsg=res)
+
+ for row in rset['rows']:
+ res.append(
+ {'label': row['table_name'], 'value': row['oid']}
+ )
+
+ return make_json_response(
+ data=res,
+ status=200
+ )
+
+ except Exception as e:
+ return internal_server_error(errormsg=str(e))
+
+ @BaseTableView.check_precondition
def get_relations(self, gid, sid, did, scid, tid=None):
"""
Returns:
@@ -1280,9 +748,11 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
"""
res = [{'label': '', 'value': ''}]
try:
- SQL = render_template("/".join([self.template_path, 'get_relations.sql']),
- show_sys_objects=self.blueprint.show_system_objects,
- server_type=self.manager.server_type)
+ SQL = render_template(
+ "/".join([self.table_template_path, 'get_relations.sql']),
+ show_sys_objects=self.blueprint.show_system_objects,
+ server_type=self.manager.server_type
+ )
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=res)
@@ -1298,41 +768,6 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
except Exception as e:
return internal_server_error(errormsg=str(e))
- @staticmethod
- def _cltype_formatter(data_type):
- """
-
- Args:
- data_type: Type string
-
- Returns:
- We need to remove [] from type and append it
- after length/precision so we will send flag for
- sql template
- """
- if '[]' in data_type:
- return data_type[:-2], True
- else:
- return data_type, False
-
- @staticmethod
- def convert_length_precision_to_string(data):
- """
- This function is used to convert length & precision to string
- to handle case like when user gives 0 as length
-
- Args:
- data: Data from client
-
- Returns:
- Converted data
- """
- if 'attlen' in data and data['attlen'] is not None:
- data['attlen'] = str(data['attlen'])
- if 'attprecision' in data and data['attprecision'] is not None:
- data['attprecision'] = str(data['attprecision'])
- return data
-
def _parse_format_columns(self, data, mode=None):
"""
data:
@@ -1360,12 +795,12 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
# check type for '[]' in it
c['cltype'], c['hasSqrBracket'] = self._cltype_formatter(c['cltype'])
- c = self.convert_length_precision_to_string(c)
+ c = TableView.convert_length_precision_to_string(c)
data['columns'][action] = final_columns
else:
- # We need to exclude all the columns which are inherited from other tables
- # 'CREATE' mode
+ # We need to exclude all the columns which are inherited from other
+ # tables 'CREATE' mode
final_columns = []
for c in columns:
@@ -1382,35 +817,13 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
# check type for '[]' in it
c['cltype'], c['hasSqrBracket'] = self._cltype_formatter(c['cltype'])
- c = self.convert_length_precision_to_string(c)
+ c = TableView.convert_length_precision_to_string(c)
data['columns'] = final_columns
return data
- def check_and_convert_name_to_string(self, data):
- """
- This function will check and covert table to string incase
- it is numeric
-
- Args:
- data: data dict
-
- Returns:
- Updated data dict
- """
- # For Python2, it can be int, long, float
- if hasattr(str, 'decode'):
- if isinstance(data['name'], (int, long, float)):
- data['name'] = str(data['name'])
- else:
- # For Python3, it can be int, float
- if isinstance(data['name'], (int, float)):
- data['name'] = str(data['name'])
- return data
-
-
- @check_precondition
+ @BaseTableView.check_precondition
def create(self, gid, sid, did, scid):
"""
This function will creates new the table object
@@ -1451,7 +864,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
# Parse & format columns
data = self._parse_format_columns(data)
- data = self.check_and_convert_name_to_string(data)
+ data = TableView.check_and_convert_name_to_string(data)
# 'coll_inherits' is Array but it comes as string from browser
# We will convert it again to list
@@ -1463,9 +876,12 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
if 'foreign_key' in data:
for c in data['foreign_key']:
- SQL = render_template("/".join([self.foreign_key_template_path,
- 'get_parent.sql']),
- tid=c['columns'][0]['references'])
+ SQL = render_template(
+ "/".join([
+ self.foreign_key_template_path, 'get_parent.sql'
+ ]),
+ tid=c['columns'][0]['references']
+ )
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
@@ -1474,9 +890,23 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
c['remote_table'] = rset['rows'][0]['table']
try:
- SQL = render_template("/".join([self.template_path,
- 'create.sql']),
- data=data, conn=self.conn)
+ partitions_sql = ''
+ partitioned = False
+ if 'is_partitioned' in data and data['is_partitioned']:
+ data['relkind'] = 'p'
+ # create partition scheme
+ data['partition_scheme'] = self.get_partition_scheme(data)
+ partitions_sql = self.get_partitions_sql(data)
+ partitioned = True
+
+ SQL = render_template(
+ "/".join([self.table_template_path, 'create.sql']),
+ data=data, conn=self.conn
+ )
+
+ # Append SQL for partitions
+ SQL += '\n' + partitions_sql
+
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
@@ -1490,16 +920,21 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
data['name'] = data['name'][0:CONST_MAX_CHAR_COUNT]
# Get updated schema oid
- SQL = render_template("/".join([self.template_path,
- 'get_schema_oid.sql']), tname=data['name'])
+ SQL = render_template(
+ "/".join([self.table_template_path, 'get_schema_oid.sql']),
+ tname=data['name']
+ )
status, scid = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=scid)
# we need oid to to add object in tree at browser
- SQL = render_template("/".join([self.template_path,
- 'get_oid.sql']), scid=scid, data=data)
+ SQL = render_template(
+ "/".join([self.table_template_path, 'get_oid.sql']),
+ scid=scid, data=data
+ )
+
status, tid = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=tid)
@@ -1509,13 +944,14 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
tid,
scid,
data['name'],
- icon="icon-table"
+ icon="icon-partition" if partitioned else "icon-table",
+ is_partitioned=partitioned
)
)
except Exception as e:
return internal_server_error(errormsg=str(e))
- @check_precondition
+ @BaseTableView.check_precondition
def update(self, gid, sid, did, scid, tid):
"""
This function will update an existing table object
@@ -1538,34 +974,21 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
data[k] = v
try:
- SQL, name = self.get_sql(did, scid, tid, data)
-
- SQL = SQL.strip('\n').strip(' ')
- status, res = self.conn.execute_scalar(SQL)
- if not status:
- return internal_server_error(errormsg=res)
-
- SQL = render_template("/".join([self.template_path,
- 'get_schema_oid.sql']), tid=tid)
- status, res = self.conn.execute_2darray(SQL)
- if not status:
- return internal_server_error(errormsg=res)
-
- # new schema id
- scid = res['rows'][0]['scid']
-
- return jsonify(
- node=self.blueprint.generate_browser_node(
- tid,
- scid,
- name,
- icon="icon-%s" % self.node_type
- )
+ SQL = render_template(
+ "/".join([self.table_template_path, 'properties.sql']),
+ did=did, scid=scid, tid=tid,
+ datlastsysoid=self.datlastsysoid
)
+ status, res = self.conn.execute_dict(SQL)
+ if not status:
+ return internal_server_error(errormsg=res)
+
+ return super(TableView, self).update(
+ gid, sid, did, scid, tid, data, res)
except Exception as e:
return internal_server_error(errormsg=str(e))
- @check_precondition
+ @BaseTableView.check_precondition
def delete(self, gid, sid, did, scid, tid):
"""
This function will deletes the table object
@@ -1585,10 +1008,11 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
cascade = False
try:
- SQL = render_template("/".join([self.template_path,
- 'properties.sql']),
- did=did, scid=scid, tid=tid,
- datlastsysoid=self.datlastsysoid)
+ SQL = render_template(
+ "/".join([self.table_template_path, 'properties.sql']),
+ did=did, scid=scid, tid=tid,
+ datlastsysoid=self.datlastsysoid
+ )
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
@@ -1606,10 +1030,11 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
data = res['rows'][0]
- SQL = render_template("/".join([self.template_path,
- 'delete.sql']),
- data=data, cascade=cascade,
- conn=self.conn)
+ SQL = render_template(
+ "/".join([self.table_template_path, 'delete.sql']),
+ data=data, cascade=cascade,
+ conn=self.conn
+ )
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
@@ -1626,7 +1051,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
except Exception as e:
return internal_server_error(errormsg=str(e))
- @check_precondition
+ @BaseTableView.check_precondition
def truncate(self, gid, sid, did, scid, tid):
"""
This function will truncate the table object
@@ -1638,43 +1063,23 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
scid: Schema ID
tid: Table ID
"""
- # Below will decide if it's simple drop or drop with cascade call
- data = request.form if request.form else json.loads(
- request.data, encoding='utf-8'
- )
- # Convert str 'true' to boolean type
- is_cascade = json.loads(data['cascade'])
try:
- SQL = render_template("/".join([self.template_path,
- 'properties.sql']),
- did=did, scid=scid, tid=tid,
- datlastsysoid=self.datlastsysoid)
+ SQL = render_template(
+ "/".join([self.table_template_path, 'properties.sql']),
+ did=did, scid=scid, tid=tid,
+ datlastsysoid=self.datlastsysoid
+ )
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
- data = res['rows'][0]
- SQL = render_template("/".join([self.template_path,
- 'truncate.sql']),
- data=data, cascade=is_cascade)
- status, res = self.conn.execute_scalar(SQL)
- if not status:
- return internal_server_error(errormsg=res)
-
- return make_json_response(
- success=1,
- info=gettext("Table truncated"),
- data={
- 'id': tid,
- 'scid': scid
- }
- )
+ return super(TableView, self).truncate(gid, sid, did, scid, tid, res)
except Exception as e:
return internal_server_error(errormsg=str(e))
- @check_precondition
+ @BaseTableView.check_precondition
def enable_disable_triggers(self, gid, sid, did, scid, tid):
"""
This function will enable/disable trigger(s) on the table object
@@ -1694,18 +1099,22 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
is_enable = json.loads(data['enable'])
try:
- SQL = render_template("/".join([self.template_path,
- 'properties.sql']),
- did=did, scid=scid, tid=tid,
- datlastsysoid=self.datlastsysoid)
+ SQL = render_template(
+ "/".join([self.table_template_path, 'properties.sql']),
+ did=did, scid=scid, tid=tid,
+ datlastsysoid=self.datlastsysoid
+ )
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
data = res['rows'][0]
- SQL = render_template("/".join([self.template_path,
- 'enable_disable_trigger.sql']),
- data=data, is_enable_trigger=is_enable)
+ SQL = render_template(
+ "/".join([
+ self.table_template_path, 'enable_disable_trigger.sql'
+ ]),
+ data=data, is_enable_trigger=is_enable
+ )
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
@@ -1723,7 +1132,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
except Exception as e:
return internal_server_error(errormsg=str(e))
- @check_precondition
+ @BaseTableView.check_precondition
def reset(self, gid, sid, did, scid, tid):
"""
This function will reset statistics of table
@@ -1735,27 +1144,9 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
scid: Schema ID
tid: Table ID
"""
- try:
- SQL = render_template("/".join([self.template_path,
- 'reset_stats.sql']),
- tid=tid)
- status, res = self.conn.execute_scalar(SQL)
- if not status:
- return internal_server_error(errormsg=res)
+ return BaseTableView.reset_statistics(self, scid, tid)
- return make_json_response(
- success=1,
- info=gettext("Table statistics have been reset"),
- data={
- 'id': tid,
- 'scid': scid
- }
- )
-
- except Exception as e:
- return internal_server_error(errormsg=str(e))
-
- @check_precondition
+ @BaseTableView.check_precondition
def msql(self, gid, sid, did, scid, tid=None):
"""
This function will create modified sql for table object
@@ -1768,13 +1159,24 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
tid: Table ID
"""
data = dict()
+ res = None
for k, v in request.args.items():
try:
data[k] = json.loads(v, encoding='utf-8')
except (ValueError, TypeError, KeyError):
data[k] = v
- SQL, name = self.get_sql(did, scid, tid, data)
+ if tid is not None:
+ SQL = render_template(
+ "/".join([self.table_template_path, 'properties.sql']),
+ did=did, scid=scid, tid=tid,
+ datlastsysoid=self.datlastsysoid
+ )
+ status, res = self.conn.execute_dict(SQL)
+ if not status:
+ return internal_server_error(errormsg=res)
+
+ SQL, name = self.get_sql(did, scid, tid, data, res)
SQL = re.sub('\n{2,}', '\n\n', SQL)
SQL = SQL.strip('\n')
if SQL == '':
@@ -1784,670 +1186,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
status=200
)
- def get_index_constraint_sql(self, did, tid, data):
- """
- Args:
- tid: Table ID
- data: data dict coming from the client
-
- Returns:
- This function will generate modified sql for index constraints
- (Primary Key & Unique)
- """
- sql = []
- # We will fetch all the index constraints for the table
- index_constraints = {
- 'p': 'primary_key', 'u': 'unique_constraint'
- }
-
- for ctype in index_constraints.keys():
- # Check if constraint is in data
- # If yes then we need to check for add/change/delete
- if index_constraints[ctype] in data:
- constraint = data[index_constraints[ctype]]
- # If constraint(s) is/are deleted
- if 'deleted' in constraint:
- for c in constraint['deleted']:
- c['schema'] = data['schema']
- c['table'] = data['name']
-
- # Sql for drop
- sql.append(
- render_template("/".join(
- [self.index_constraint_template_path,
- 'delete.sql']),
- data=c, conn=self.conn).strip('\n')
- )
-
- if 'changed' in constraint:
- for c in constraint['changed']:
- c['schema'] = data['schema']
- c['table'] = data['name']
-
- properties_sql = render_template("/".join(
- [self.index_constraint_template_path, 'properties.sql']),
- did=did, tid=tid, cid=c['oid'], constraint_type=ctype)
- status, res = self.conn.execute_dict(properties_sql)
- if not status:
- return internal_server_error(errormsg=res)
-
- old_data = res['rows'][0]
- # Sql to update object
- sql.append(
- render_template("/".join([
- self.index_constraint_template_path,
- 'update.sql']), data=c, o_data=old_data,
- conn=self.conn).strip('\n')
- )
-
- if 'added' in constraint:
- for c in constraint['added']:
- c['schema'] = data['schema']
- c['table'] = data['name']
-
- # Sql to add object
- if self.validate_constrains(index_constraints[ctype], c):
- sql.append(
- render_template(
- "/".join([self.index_constraint_template_path,
- 'create.sql']),
- data=c, conn=self.conn,
- constraint_name='PRIMARY KEY'
- if ctype == 'p' else 'UNIQUE'
- ).strip('\n')
- )
- else:
- sql.append(
- gettext(
- '-- definition incomplete for {0} constraint'.format(index_constraints[ctype])
- )
- )
- if len(sql) > 0:
- # Join all the sql(s) as single string
- return '\n\n'.join(sql)
- else:
- return None
-
- def get_foreign_key_sql(self, tid, data):
- """
- Args:
- tid: Table ID
- data: data dict coming from the client
-
- Returns:
- This function will generate modified sql for foreign key
- """
- sql = []
- # Check if constraint is in data
- # If yes then we need to check for add/change/delete
- if 'foreign_key' in data:
- constraint = data['foreign_key']
- # If constraint(s) is/are deleted
- if 'deleted' in constraint:
- for c in constraint['deleted']:
- c['schema'] = data['schema']
- c['table'] = data['name']
-
- # Sql for drop
- sql.append(
- render_template("/".join(
- [self.foreign_key_template_path,
- 'delete.sql']),
- data=c, conn=self.conn).strip('\n')
- )
-
- if 'changed' in constraint:
- for c in constraint['changed']:
- c['schema'] = data['schema']
- c['table'] = data['name']
-
- properties_sql = render_template("/".join(
- [self.foreign_key_template_path, 'properties.sql']),
- tid=tid, cid=c['oid'])
- status, res = self.conn.execute_dict(properties_sql)
- if not status:
- return internal_server_error(errormsg=res)
-
- old_data = res['rows'][0]
- # Sql to update object
- sql.append(
- render_template("/".join([
- self.foreign_key_template_path,
- 'update.sql']), data=c, o_data=old_data,
- conn=self.conn).strip('\n')
- )
-
- if not self.validate_constrains('foreign_key', c):
- sql.append(
- gettext(
- '-- definition incomplete for foreign_key constraint'
- )
- )
- return '\n\n'.join(sql)
-
- if 'columns' in c:
- cols = []
- for col in c['columns']:
- cols.append(col['local_column'])
-
- coveringindex = self.search_coveringindex(tid, cols)
-
- if coveringindex is None and 'autoindex' in c and c['autoindex'] and \
- ('coveringindex' in c and
- c['coveringindex'] != ''):
- sql.append(render_template(
- "/".join([self.foreign_key_template_path, 'create_index.sql']),
- data=c, conn=self.conn).strip('\n')
- )
-
- if 'added' in constraint:
- for c in constraint['added']:
- c['schema'] = data['schema']
- c['table'] = data['name']
-
- # Sql to add object
- # Columns
-
- if not self.validate_constrains('foreign_key', c):
- sql.append(
- gettext(
- '-- definition incomplete for foreign_key constraint'
- )
- )
- return '\n\n'.join(sql)
-
- SQL = render_template("/".join([self.foreign_key_template_path,
- 'get_parent.sql']),
- tid=c['columns'][0]['references'])
- status, rset = self.conn.execute_2darray(SQL)
- if not status:
- return internal_server_error(errormsg=rset)
-
- c['remote_schema'] = rset['rows'][0]['schema']
- c['remote_table'] = rset['rows'][0]['table']
-
- sql.append(
- render_template(
- "/".join([self.foreign_key_template_path,
- 'create.sql']),
- data=c, conn=self.conn
- ).strip('\n')
- )
-
- if c['autoindex']:
- sql.append(
- render_template(
- "/".join([self.foreign_key_template_path,
- 'create_index.sql']),
- data=c, conn=self.conn).strip('\n')
- )
-
- if len(sql) > 0:
- # Join all the sql(s) as single string
- return '\n\n'.join(sql)
- else:
- return None
-
- def get_check_constraint_sql(self, tid, data):
- """
- Args:
- tid: Table ID
- data: data dict coming from the client
-
- Returns:
- This function will generate modified sql for check constraint
- """
- sql = []
- # Check if constraint is in data
- # If yes then we need to check for add/change/delete
- if 'check_constraint' in data:
- constraint = data['check_constraint']
- # If constraint(s) is/are deleted
- if 'deleted' in constraint:
- for c in constraint['deleted']:
- c['schema'] = data['schema']
- c['table'] = data['name']
-
- # Sql for drop
- sql.append(
- render_template("/".join(
- [self.check_constraint_template_path,
- 'delete.sql']),
- data=c, conn=self.conn).strip('\n')
- )
-
- if 'changed' in constraint:
- for c in constraint['changed']:
- c['schema'] = data['schema']
- c['table'] = data['name']
-
- properties_sql = render_template("/".join(
- [self.check_constraint_template_path, 'properties.sql']),
- tid=tid, cid=c['oid'])
- status, res = self.conn.execute_dict(properties_sql)
- if not status:
- return internal_server_error(errormsg=res)
-
- old_data = res['rows'][0]
- # Sql to update object
- sql.append(
- render_template("/".join([
- self.check_constraint_template_path,
- 'update.sql']), data=c, o_data=old_data,
- conn=self.conn).strip('\n')
- )
-
- if 'added' in constraint:
- for c in constraint['added']:
- c['schema'] = data['schema']
- c['table'] = data['name']
-
- if not self.validate_constrains('check_constraint', c):
- sql.append(
- gettext(
- '-- definition incomplete for check_constraint'
- )
- )
- return '\n\n'.join(sql)
-
- sql.append(
- render_template(
- "/".join([self.check_constraint_template_path,
- 'create.sql']),
- data=c, conn=self.conn
- ).strip('\n')
- )
-
- if len(sql) > 0:
- # Join all the sql(s) as single string
- return '\n\n'.join(sql)
- else:
- return None
-
- def get_exclusion_constraint_sql(self, did, tid, data):
- """
- Args:
- tid: Table ID
- data: data dict coming from the client
-
- Returns:
- This function will generate modified sql for exclusion constraint
- """
- sql = []
- # Check if constraint is in data
- # If yes then we need to check for add/change/delete
- if 'exclude_constraint' in data:
- constraint = data['exclude_constraint']
- # If constraint(s) is/are deleted
- if 'deleted' in constraint:
- for c in constraint['deleted']:
- c['schema'] = data['schema']
- c['table'] = data['name']
-
- # Sql for drop
- sql.append(
- render_template("/".join(
- [self.exclusion_constraint_template_path,
- 'delete.sql']),
- data=c, conn=self.conn).strip('\n')
- )
-
- if 'changed' in constraint:
- for c in constraint['changed']:
- c['schema'] = data['schema']
- c['table'] = data['name']
-
- properties_sql = render_template("/".join(
- [self.exclusion_constraint_template_path, 'properties.sql']),
- did=did, tid=tid, cid=c['oid'])
- status, res = self.conn.execute_dict(properties_sql)
- if not status:
- return internal_server_error(errormsg=res)
-
- old_data = res['rows'][0]
- # Sql to update object
- sql.append(
- render_template("/".join([
- self.exclusion_constraint_template_path,
- 'update.sql']), data=c, o_data=old_data,
- conn=self.conn).strip('\n')
- )
-
- if 'added' in constraint:
- for c in constraint['added']:
- c['schema'] = data['schema']
- c['table'] = data['name']
-
- if not self.validate_constrains('exclude_constraint', c):
- sql.append(
- gettext(
- '-- definition incomplete for exclusion_constraint'
- )
- )
- return '\n\n'.join(sql)
-
- sql.append(
- render_template(
- "/".join([self.exclusion_constraint_template_path,
- 'create.sql']),
- data=c, conn=self.conn
- ).strip('\n')
- )
-
- if len(sql) > 0:
- # Join all the sql(s) as single string
- return u'\n\n'.join(sql)
- else:
- return None
-
- def get_trigger_function_schema(self, data):
- """
- This function will return trigger function with schema name
- """
- # If language is 'edbspl' then trigger function should be
- # 'Inline EDB-SPL' else we will find the trigger function
- # with schema name.
- if data['lanname'] == 'edbspl':
- data['tfunction'] = 'Inline EDB-SPL'
- else:
- SQL = render_template(
- "/".join(
- [self.trigger_template_path,'get_triggerfunctions.sql']
- ),
- tgfoid=data['tgfoid'],
- show_system_objects=self.blueprint.show_system_objects
- )
-
- status, result = self.conn.execute_dict(SQL)
- if not status:
- return internal_server_error(errormsg=res)
-
- # Update the trigger function which we have fetched with
- # schema name
- if 'rows' in result and len(result['rows']) > 0 and \
- 'tfunctions' in result['rows'][0]:
- data['tfunction'] = result['rows'][0]['tfunctions']
- return data
-
- def _format_args(self, args):
- """
- This function will format arguments.
-
- Args:
- args: Arguments
-
- Returns:
- Formated arguments for function
- """
- formatted_args = ["'{0}'".format(arg) for arg in args]
- return ', '.join(formatted_args)
-
- def get_sql(self, did, scid, tid, data):
- """
- This function will generate create/update sql from model data
- coming from client
- """
- if tid is not None:
- SQL = render_template("/".join([self.template_path,
- 'properties.sql']),
- did=did, scid=scid, tid=tid,
- datlastsysoid=self.datlastsysoid)
- status, res = self.conn.execute_dict(SQL)
- if not status:
- return internal_server_error(errormsg=res)
-
- old_data = res['rows'][0]
- old_data = self._formatter(did, scid, tid, old_data)
-
- # We will convert privileges coming from client required
- if 'relacl' in data:
- for mode in ['added', 'changed', 'deleted']:
- if mode in data['relacl']:
- data['relacl'][mode] = parse_priv_to_db(
- data['relacl'][mode], self.acl
- )
-
- # If name is not present in request data
- if 'name' not in data:
- data['name'] = old_data['name']
-
- data = self.check_and_convert_name_to_string(data)
-
- # If name if not present
- if 'schema' not in data:
- data['schema'] = old_data['schema']
-
- # Filter out new tables from list, we will send complete list
- # and not newly added tables in the list from client
- # so we will filter new tables here
- if 'coll_inherits' in data:
- p_len = len(old_data['coll_inherits'])
- c_len = len(data['coll_inherits'])
- # If table(s) added
- if c_len > p_len:
- data['coll_inherits_added'] = list(
- set(data['coll_inherits']) - set(old_data['coll_inherits'])
- )
- # If table(s)removed
- elif c_len < p_len:
- data['coll_inherits_removed'] = list(
- set(old_data['coll_inherits']) - set(data['coll_inherits'])
- )
- # Safe side verification,In case it happens..
- # If user removes and adds same number of table
- # eg removed one table and added one new table
- elif c_len == p_len:
- data['coll_inherits_added'] = list(
- set(data['coll_inherits']) - set(old_data['coll_inherits'])
- )
- data['coll_inherits_removed'] = list(
- set(old_data['coll_inherits']) - set(data['coll_inherits'])
- )
-
- SQL = render_template("/".join([self.template_path, 'update.sql']),
- o_data=old_data, data=data, conn=self.conn)
- # Removes training new lines
- SQL = SQL.strip('\n') + '\n\n'
-
- # Parse/Format columns & create sql
- if 'columns' in data:
- # Parse the data coming from client
- data = self._parse_format_columns(data, mode='edit')
-
- columns = data['columns']
- column_sql = '\n'
-
- # If column(s) is/are deleted
- if 'deleted' in columns:
- for c in columns['deleted']:
- c['schema'] = data['schema']
- c['table'] = data['name']
- # Sql for drop column
- if 'inheritedfrom' not in c:
- column_sql += render_template("/".join(
- [self.column_template_path, 'delete.sql']),
- data=c, conn=self.conn).strip('\n') + '\n\n'
-
- # If column(s) is/are changed
- # Here we will be needing previous properties of column
- # so that we can compare & update it
- if 'changed' in columns:
- for c in columns['changed']:
- c['schema'] = data['schema']
- c['table'] = data['name']
- if 'attacl' in c:
- c['attacl'] = parse_priv_to_db(c['attacl'],
- self.column_acl)
-
- properties_sql = render_template("/".join([self.column_template_path,
- 'properties.sql']),
- tid=tid,
- clid=c['attnum'],
- show_sys_objects=self.blueprint.show_system_objects
- )
-
- status, res = self.conn.execute_dict(properties_sql)
- if not status:
- return internal_server_error(errormsg=res)
- old_data = res['rows'][0]
-
- old_data['cltype'], old_data['hasSqrBracket'] = self._cltype_formatter(old_data['cltype'])
- old_data = self.convert_length_precision_to_string(old_data)
-
- fulltype = self.get_full_type(
- old_data['typnspname'], old_data['typname'],
- old_data['isdup'], old_data['attndims'], old_data['atttypmod']
- )
-
- # If we have length & precision both
- matchObj = re.search(r'(\d+),(\d+)', fulltype)
- if matchObj:
- old_data['attlen'] = int(matchObj.group(1))
- old_data['attprecision'] = int(matchObj.group(2))
- else:
- # If we have length only
- matchObj = re.search(r'(\d+)', fulltype)
- if matchObj:
- old_data['attlen'] = int(matchObj.group(1))
- old_data['attprecision'] = None
- else:
- old_data['attlen'] = None
- old_data['attprecision'] = None
-
- old_data['cltype'] = DataTypeReader.parse_type_name(old_data['cltype'])
-
- # Sql for alter column
- if 'inheritedfrom' not in c:
- column_sql += render_template("/".join(
- [self.column_template_path, 'update.sql']),
- data=c, o_data=old_data, conn=self.conn).strip('\n') + '\n\n'
-
- # If column(s) is/are added
- if 'added' in columns:
- for c in columns['added']:
- c['schema'] = data['schema']
- c['table'] = data['name']
- # Sql for create column
- if 'attacl' in c:
- c['attacl'] = parse_priv_to_db(c['attacl'],
- self.column_acl)
-
- c = self.convert_length_precision_to_string(c)
-
- if 'inheritedfrom' not in c:
- column_sql += render_template("/".join(
- [self.column_template_path, 'create.sql']),
- data=c, conn=self.conn).strip('\n') + '\n\n'
-
- # Combine all the SQL together
- SQL += column_sql.strip('\n')
-
- # Check if index constraints are added/changed/deleted
- index_constraint_sql = self.get_index_constraint_sql(did, tid, data)
- # If we have index constraint sql then ad it in main sql
- if index_constraint_sql is not None:
- SQL += '\n' + index_constraint_sql
-
- # Check if foreign key(s) is/are added/changed/deleted
- foreign_key_sql = self.get_foreign_key_sql(tid, data)
- # If we have foreign key sql then ad it in main sql
- if foreign_key_sql is not None:
- SQL += '\n' + foreign_key_sql
-
- # Check if check constraint(s) is/are added/changed/deleted
- check_constraint_sql = self.get_check_constraint_sql(tid, data)
- # If we have check constraint sql then ad it in main sql
- if check_constraint_sql is not None:
- SQL += '\n' + check_constraint_sql
-
- # Check if exclusion constraint(s) is/are added/changed/deleted
- exclusion_constraint_sql = self.get_exclusion_constraint_sql(did, tid, data)
- # If we have check constraint sql then ad it in main sql
- if exclusion_constraint_sql is not None:
- SQL += '\n' + exclusion_constraint_sql
-
- else:
- required_args = [
- 'name'
- ]
-
- for arg in required_args:
- if arg not in data:
- return gettext('-- definition incomplete')
-
- # validate constraint data.
- for key in ['primary_key', 'unique_constraint',
- 'foreign_key', 'check_constraint',
- 'exclude_constraint']:
- if key in data and len(data[key]) > 0:
- for constraint in data[key]:
- if not self.validate_constrains(key, constraint):
- return gettext('-- definition incomplete for {0}'.format(key))
-
- # We will convert privileges coming from client required
- # in server side format
- if 'relacl' in data:
- data['relacl'] = parse_priv_to_db(data['relacl'], self.acl)
-
- # Parse & format columns
- data = self._parse_format_columns(data)
- data = self.check_and_convert_name_to_string(data)
-
- if 'foreign_key' in data:
- for c in data['foreign_key']:
- SQL = render_template("/".join([self.foreign_key_template_path,
- 'get_parent.sql']),
- tid=c['columns'][0]['references'])
- status, rset = self.conn.execute_2darray(SQL)
- if not status:
- return internal_server_error(errormsg=rset)
-
- c['remote_schema'] = rset['rows'][0]['schema']
- c['remote_table'] = rset['rows'][0]['table']
-
- # If the request for new object which do not have did
- SQL = render_template("/".join([self.template_path, 'create.sql']),
- data=data, conn=self.conn)
- SQL = re.sub('\n{2,}', '\n\n', SQL)
- SQL = SQL.strip('\n')
-
- return SQL, data['name'] if 'name' in data else old_data['name']
-
- @staticmethod
- def validate_constrains(key, data):
-
- if key == 'primary_key' or key == 'unique_constraint':
- if 'columns' in data and len(data['columns']) > 0:
- return True
- else:
- return False
- elif key == 'foreign_key':
- if 'oid' not in data:
- for arg in ['columns']:
- if arg not in data:
- return False
- elif isinstance(data[arg], list) and len(data[arg]) < 1:
- return False
-
- if 'autoindex' in data and data['autoindex'] and \
- ('coveringindex' not in data or
- data['coveringindex'] == ''):
- return False
-
- return True
-
- elif key == 'check_constraint':
- for arg in ['consrc']:
- if arg not in data or data[arg] == '':
- return False
- return True
-
- elif key == 'exclude_constraint':
- pass
-
- return True
-
- @check_precondition
+ @BaseTableView.check_precondition
def dependents(self, gid, sid, did, scid, tid):
"""
This function get the dependents and return ajax response
@@ -2460,43 +1199,9 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
scid: Schema ID
tid: Table ID
"""
- # Specific condition for column which we need to append
- where = "WHERE dep.refobjid={0}::OID".format(tid)
+ return BaseTableView.get_table_dependents(self, tid)
- dependents_result = self.get_dependents(
- self.conn, tid
- )
-
- # Specific sql to run againt column to fetch dependents
- SQL = render_template("/".join([self.template_path,
- 'depend.sql']), where=where)
-
- status, res = self.conn.execute_dict(SQL)
- if not status:
- return internal_server_error(errormsg=res)
-
- for row in res['rows']:
- ref_name = row['refname']
- if ref_name is None:
- continue
-
- dep_type = ''
- dep_str = row['deptype']
- if dep_str == 'a':
- dep_type = 'auto'
- elif dep_str == 'n':
- dep_type = 'normal'
- elif dep_str == 'i':
- dep_type = 'internal'
-
- dependents_result.append({'type': 'sequence', 'name': ref_name, 'field': dep_type})
-
- return ajax_response(
- response=dependents_result,
- status=200
- )
-
- @check_precondition
+ @BaseTableView.check_precondition
def dependencies(self, gid, sid, did, scid, tid):
"""
This function get the dependencies and return ajax response
@@ -2508,18 +1213,10 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
did: Database ID
scid: Schema ID
tid: Table ID
-
"""
- dependencies_result = self.get_dependencies(
- self.conn, tid
- )
+ return BaseTableView.get_table_dependencies(self, tid)
- return ajax_response(
- response=dependencies_result,
- status=200
- )
-
- @check_precondition
+ @BaseTableView.check_precondition
def sql(self, gid, sid, did, scid, tid):
"""
This function will creates reverse engineered sql for
@@ -2534,15 +1231,11 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
"""
main_sql = []
- """
- #####################################
- # 1) Reverse engineered sql for TABLE
- #####################################
- """
- SQL = render_template("/".join([self.template_path,
- 'properties.sql']),
- did=did, scid=scid, tid=tid,
- datlastsysoid=self.datlastsysoid)
+ SQL = render_template(
+ "/".join([self.table_template_path, 'properties.sql']),
+ did=did, scid=scid, tid=tid,
+ datlastsysoid=self.datlastsysoid
+ )
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
@@ -2552,250 +1245,10 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
data = res['rows'][0]
- # Table & Schema declaration so that we can use them in child nodes
- schema = data['schema']
- table = data['name']
+ return BaseTableView.get_reverse_engineered_sql(
+ self, did, scid, tid, main_sql, data)
- data = self._formatter(did, scid, tid, data)
-
- # Now we have all lis of columns which we need
- # to include in our create definition, Let's format them
- if 'columns' in data:
- for c in data['columns']:
- if 'attacl' in c:
- c['attacl'] = parse_priv_to_db(c['attacl'], self.column_acl)
-
- # check type for '[]' in it
- if 'cltype' in c:
- c['cltype'], c['hasSqrBracket'] = self._cltype_formatter(c['cltype'])
-
- sql_header = u"-- Table: {0}\n\n-- ".format(self.qtIdent(self.conn,
- data['schema'],
- data['name']))
-
- sql_header += render_template("/".join([self.template_path,
- 'delete.sql']),
- data=data, conn=self.conn)
-
- sql_header = sql_header.strip('\n')
- sql_header += '\n'
-
- # Add into main sql
- main_sql.append(sql_header)
-
- # Parse privilege data
- if 'relacl' in data:
- data['relacl'] = parse_priv_to_db(data['relacl'], self.acl)
-
- # If the request for new object which do not have did
- table_sql = render_template("/".join([self.template_path,
- 'create.sql']),
- data=data, conn=self.conn, is_sql=True)
-
- # Add into main sql
- table_sql = re.sub('\n{2,}', '\n\n', table_sql)
- main_sql.append(table_sql.strip('\n'))
-
- """
- ######################################
- # 2) Reverse engineered sql for INDEX
- ######################################
- """
-
- SQL = render_template("/".join([self.index_template_path,
- 'nodes.sql']), tid=tid)
- status, rset = self.conn.execute_2darray(SQL)
- if not status:
- return internal_server_error(errormsg=rset)
-
- for row in rset['rows']:
-
- SQL = render_template("/".join([self.index_template_path,
- 'properties.sql']),
- did=did, tid=tid, idx=row['oid'],
- datlastsysoid=self.datlastsysoid)
-
- status, res = self.conn.execute_dict(SQL)
- if not status:
- return internal_server_error(errormsg=res)
-
- data = dict(res['rows'][0])
- # Adding parent into data dict, will be using it while creating sql
- data['schema'] = schema
- data['table'] = table
- # We also need to fecth columns of index
- SQL = render_template("/".join([self.index_template_path,
- 'column_details.sql']),
- idx=row['oid'])
- status, rset = self.conn.execute_2darray(SQL)
- if not status:
- return internal_server_error(errormsg=rset)
-
- # 'attdef' comes with quotes from query so we need to strip them
- # 'options' we need true/false to render switch ASC(false)/DESC(true)
- columns = []
- cols = []
- for col_row in rset['rows']:
- # We need all data as collection for ColumnsModel
- cols_data = {
- 'colname': col_row['attdef'].strip('"'),
- 'collspcname': col_row['collnspname'],
- 'op_class': col_row['opcname'],
- }
- if col_row['options'][0] == 'DESC':
- cols_data['sort_order'] = True
- columns.append(cols_data)
-
- # We need same data as string to display in properties window
- # If multiple column then separate it by colon
- cols_str = col_row['attdef']
- if col_row['collnspname']:
- cols_str += ' COLLATE ' + col_row['collnspname']
- if col_row['opcname']:
- cols_str += ' ' + col_row['opcname']
- if col_row['options'][0] == 'DESC':
- cols_str += ' DESC'
- cols.append(cols_str)
-
- # Push as collection
- data['columns'] = columns
- # Push as string
- data['cols'] = ', '.join(cols)
-
- sql_header = u"\n-- Index: {0}\n\n-- ".format(data['name'])
-
- sql_header += render_template("/".join([self.index_template_path,
- 'delete.sql']),
- data=data, conn=self.conn)
-
- index_sql = render_template("/".join([self.index_template_path,
- 'create.sql']),
- data=data, conn=self.conn)
- index_sql += "\n"
- index_sql += render_template("/".join([self.index_template_path,
- 'alter.sql']),
- data=data, conn=self.conn)
-
- # Add into main sql
- index_sql = re.sub('\n{2,}', '\n\n', index_sql)
- main_sql.append(sql_header + '\n\n' + index_sql.strip('\n'))
-
- """
- ########################################
- # 3) Reverse engineered sql for TRIGGERS
- ########################################
- """
- SQL = render_template("/".join([self.trigger_template_path,
- 'nodes.sql']), tid=tid)
- status, rset = self.conn.execute_2darray(SQL)
- if not status:
- return internal_server_error(errormsg=rset)
-
- for row in rset['rows']:
- trigger_sql = ''
-
- SQL = render_template("/".join([self.trigger_template_path,
- 'properties.sql']),
- tid=tid, trid=row['oid'],
- datlastsysoid=self.datlastsysoid)
-
- status, res = self.conn.execute_dict(SQL)
- if not status:
- return internal_server_error(errormsg=res)
-
- data = dict(res['rows'][0])
- # Adding parent into data dict, will be using it while creating sql
- data['schema'] = schema
- data['table'] = table
-
- data = self.get_trigger_function_schema(data)
-
- if len(data['custom_tgargs']) > 1:
- # We know that trigger has more than 1 argument, let's join them
- data['tgargs'] = self._format_args(data['custom_tgargs'])
-
- if len(data['tgattr']) > 1:
- columns = ', '.join(data['tgattr'].split(' '))
-
- SQL = render_template("/".join([self.trigger_template_path,
- 'get_columns.sql']),
- tid=tid, clist=columns)
-
- status, rset = self.conn.execute_2darray(SQL)
- if not status:
- return internal_server_error(errormsg=rset)
- # 'tgattr' contains list of columns from table used in trigger
- columns = []
-
- for col_row in rset['rows']:
- columns.append({'column': col_row['name']})
-
- data['columns'] = columns
-
- data = trigger_definition(data)
-
- sql_header = u"\n-- Trigger: {0}\n\n-- ".format(data['name'])
-
- sql_header += render_template("/".join([self.trigger_template_path,
- 'delete.sql']),
- data=data, conn=self.conn)
-
- # If the request for new object which do not have did
- trigger_sql = render_template("/".join([self.trigger_template_path,
- 'create.sql']),
- data=data, conn=self.conn)
-
- trigger_sql = sql_header + '\n\n' + trigger_sql.strip('\n')
-
- # If trigger is disabled then add sql code for the same
- if not data['is_enable_trigger']:
- trigger_sql += '\n\n'
- trigger_sql += render_template("/".join([
- self.trigger_template_path,
- 'enable_disable_trigger.sql']),
- data=data, conn=self.conn)
-
- # Add into main sql
- trigger_sql = re.sub('\n{2,}', '\n\n', trigger_sql)
- main_sql.append(trigger_sql)
-
- """
- #####################################
- # 4) Reverse engineered sql for RULES
- #####################################
- """
-
- SQL = render_template("/".join(
- [self.rules_template_path, 'properties.sql']), tid=tid)
-
- status, rset = self.conn.execute_2darray(SQL)
- if not status:
- return internal_server_error(errormsg=rset)
-
- for row in rset['rows']:
- rules_sql = '\n'
- SQL = render_template("/".join(
- [self.rules_template_path, 'properties.sql']
- ), rid=row['oid'], datlastsysoid=self.datlastsysoid)
-
- status, res = self.conn.execute_dict(SQL)
- if not status:
- return internal_server_error(errormsg=res)
-
- res_data = parse_rule_definition(res)
- rules_sql += render_template("/".join(
- [self.rules_template_path, 'create.sql']),
- data=res_data, display_comments=True)
-
- # Add into main sql
- rules_sql = re.sub('\n{2,}', '\n\n', rules_sql)
- main_sql.append(rules_sql)
-
- sql = '\n'.join(main_sql)
-
- return ajax_response(response=sql.strip('\n'))
-
- @check_precondition
+ @BaseTableView.check_precondition
def select_sql(self, gid, sid, did, scid, tid):
"""
SELECT script sql for the object
@@ -2810,10 +1263,11 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
Returns:
SELECT Script sql for the object
"""
- SQL = render_template("/".join([self.template_path,
- 'properties.sql']),
- did=did, scid=scid, tid=tid,
- datlastsysoid=self.datlastsysoid)
+ SQL = render_template(
+ "/".join([self.table_template_path, 'properties.sql']),
+ did=did, scid=scid, tid=tid,
+ datlastsysoid=self.datlastsysoid
+ )
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
@@ -2839,7 +1293,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
)
return ajax_response(response=sql)
- @check_precondition
+ @BaseTableView.check_precondition
def insert_sql(self, gid, sid, did, scid, tid):
"""
INSERT script sql for the object
@@ -2854,10 +1308,11 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
Returns:
INSERT Script sql for the object
"""
- SQL = render_template("/".join([self.template_path,
- 'properties.sql']),
- did=did, scid=scid, tid=tid,
- datlastsysoid=self.datlastsysoid)
+ SQL = render_template(
+ "/".join([self.table_template_path, 'properties.sql']),
+ did=did, scid=scid, tid=tid,
+ datlastsysoid=self.datlastsysoid
+ )
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
@@ -2886,7 +1341,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
return ajax_response(response=sql)
- @check_precondition
+ @BaseTableView.check_precondition
def update_sql(self, gid, sid, did, scid, tid):
"""
UPDATE script sql for the object
@@ -2901,10 +1356,11 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
Returns:
UPDATE Script sql for the object
"""
- SQL = render_template("/".join([self.template_path,
- 'properties.sql']),
- did=did, scid=scid, tid=tid,
- datlastsysoid=self.datlastsysoid)
+ SQL = render_template(
+ "/".join([self.table_template_path, 'properties.sql']),
+ did=did, scid=scid, tid=tid,
+ datlastsysoid=self.datlastsysoid
+ )
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
@@ -2935,7 +1391,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
return ajax_response(response=sql)
- @check_precondition
+ @BaseTableView.check_precondition
def delete_sql(self, gid, sid, did, scid, tid):
"""
DELETE script sql for the object
@@ -2950,10 +1406,11 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
Returns:
DELETE Script sql for the object
"""
- SQL = render_template("/".join([self.template_path,
- 'properties.sql']),
- did=did, scid=scid, tid=tid,
- datlastsysoid=self.datlastsysoid)
+ SQL = render_template(
+ "/".join([self.table_template_path, 'properties.sql']),
+ did=did, scid=scid, tid=tid,
+ datlastsysoid=self.datlastsysoid
+ )
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
@@ -2966,7 +1423,7 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
return ajax_response(response=sql)
- @check_precondition
+ @BaseTableView.check_precondition
def statistics(self, gid, sid, did, scid, tid=None):
"""
Statistics
@@ -2982,63 +1439,6 @@ class TableView(PGChildNodeView, DataTypeReader, VacuumSettings):
otherwise it will return statistics for all the tables in that
schema.
"""
-
- # Fetch schema name
- status, schema_name = self.conn.execute_scalar(
- render_template(
- "/".join([self.template_path, 'get_schema.sql']),
- conn=self.conn, scid=scid
- )
- )
- if not status:
- return internal_server_error(errormsg=schema_name)
-
- if tid is None:
- status, res = self.conn.execute_dict(
- render_template(
- "/".join([self.template_path, 'coll_table_stats.sql']),
- conn=self.conn, schema_name=schema_name
- )
- )
- else:
- # For Individual table stats
-
- # Check if pgstattuple extension is already created?
- # if created then only add extended stats
- status, is_pgstattuple = self.conn.execute_scalar("""
- SELECT (count(extname) > 0) AS is_pgstattuple
- FROM pg_extension
- WHERE extname='pgstattuple'
- """)
- if not status:
- return internal_server_error(errormsg=is_pgstattuple)
-
- # Fetch Table name
- status, table_name = self.conn.execute_scalar(
- render_template(
- "/".join([self.template_path, 'get_table.sql']),
- conn=self.conn, scid=scid, tid=tid
- )
- )
- if not status:
- return internal_server_error(errormsg=table_name)
-
- status, res = self.conn.execute_dict(
- render_template(
- "/".join([self.template_path, 'stats.sql']),
- conn=self.conn, schema_name=schema_name,
- table_name=table_name,
- is_pgstattuple=is_pgstattuple, tid=tid
- )
- )
-
- if not status:
- return internal_server_error(errormsg=res)
-
- return make_json_response(
- data=res,
- status=200
- )
-
+ return BaseTableView.get_table_statistics(self, scid, tid)
TableView.register_node_view(blueprint)
diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/column/templates/column/js/column.js b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/column/templates/column/js/column.js
index 179b9610c..10737e0a2 100644
--- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/column/templates/column/js/column.js
+++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/column/templates/column/js/column.js
@@ -86,6 +86,7 @@ define('pgadmin.node.column', [
if (!pgBrowser.Nodes['column']) {
pgBrowser.Nodes['column'] = pgBrowser.Node.extend({
+ getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy,
parent_type: ['table', 'view', 'mview'],
collection_type: ['coll-table', 'coll-view', 'coll-mview'],
type: 'column',
@@ -197,11 +198,17 @@ define('pgadmin.node.column', [
);
},
disabled: function(m){
- // If primary key already exist then disable.
+ // Disable it, when one of this:
+ // - Primary key already exist
+ // - Table is a partitioned table
if (
- m.top && !_.isUndefined(m.top.get('oid')) &&
- m.top.get('primary_key').length > 0 &&
- !_.isUndefined(m.top.get('primary_key').first().get('oid'))
+ m.top && ((
+ !_.isUndefined(m.top.get('oid')) &&
+ m.top.get('primary_key').length > 0 &&
+ !_.isUndefined(m.top.get('primary_key').first().get('oid'))
+ ) || (
+ m.top.has('is_partitioned') && m.top.get('is_partitioned')
+ ))
) {
return true;
}
@@ -228,6 +235,17 @@ define('pgadmin.node.column', [
return false;
}
+ // If table is partitioned table then disable
+ if (m.top && !_.isUndefined(m.top.get('is_partitioned')) &&
+ m.top.get('is_partitioned'))
+ {
+ setTimeout(function () {
+ m.set('is_primary_key', false);
+ }, 10);
+
+ return false;
+ }
+
if(!m.inSchemaWithColumnCheck.apply(this, [m]) &&
!_.isUndefined(name) && !_.isNull(name) && name !== '') {
return true;
diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/column/tests/test_column_get.py b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/column/tests/test_column_get.py
index 713d9992b..4adfbef11 100644
--- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/column/tests/test_column_get.py
+++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/column/tests/test_column_get.py
@@ -24,7 +24,7 @@ from . import utils as columns_utils
class ColumnGetTestCase(BaseTestGenerator):
"""This class will get column under table node."""
scenarios = [
- ('Fetch table Node URL', dict(url='/browser/column/obj/'))
+ ('Fetch columns under table node', dict(url='/browser/column/obj/'))
]
def setUp(self):
diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/check_constraint/templates/check_constraint/js/check_constraint.js b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/check_constraint/templates/check_constraint/js/check_constraint.js
index b12d9d300..85f62f719 100644
--- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/check_constraint/templates/check_constraint/js/check_constraint.js
+++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/check_constraint/templates/check_constraint/js/check_constraint.js
@@ -10,6 +10,7 @@ define('pgadmin.node.check_constraints', [
// Check Constraint Node
if (!pgBrowser.Nodes['check_constraints']) {
pgAdmin.Browser.Nodes['check_constraints'] = pgBrowser.Node.extend({
+ getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy,
type: 'check_constraints',
label: gettext('Check'),
collection_type: 'coll-constraints',
@@ -18,7 +19,7 @@ define('pgadmin.node.check_constraints', [
dialogHelp: url_for('help.static', {'filename': 'check_dialog.html'}),
hasSQL: true,
hasDepends: true,
- parent_type: ['table'],
+ parent_type: ['table','partition'],
Init: function() {
// Avoid mulitple registration of menus
if (this.initialized)
@@ -137,6 +138,18 @@ define('pgadmin.node.check_constraints', [
'switch', cell: 'boolean', group: gettext('Definition'), mode:
['properties', 'create', 'edit'], min_version: 90200,
disabled: function(m) {
+ // Disabled if table is a partitioned table.
+ if ((_.has(m , 'top') && !_.isUndefined(m.top) && m.top.get('is_partitioned')) ||
+ (_.has(m, 'node_info') && _.has(m.node_info, 'table') &&
+ _.has(m.node_info.table, 'is_partitioned') && m.node_info.table.is_partitioned)
+ ){
+ setTimeout(function(){
+ m.set('connoinherit', false);
+ },10);
+
+ return true;
+ }
+
return ((_.has(m, 'handler') &&
!_.isUndefined(m.handler) &&
!_.isUndefined(m.get('oid'))) || (_.isFunction(m.isNew) && !m.isNew()));
diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/exclusion_constraint/templates/exclusion_constraint/js/exclusion_constraint.js b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/exclusion_constraint/templates/exclusion_constraint/js/exclusion_constraint.js
index 814edce50..605495d79 100644
--- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/exclusion_constraint/templates/exclusion_constraint/js/exclusion_constraint.js
+++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/exclusion_constraint/templates/exclusion_constraint/js/exclusion_constraint.js
@@ -602,6 +602,7 @@ define('pgadmin.node.exclusion_constraint', [
// Extend the browser's node class for exclusion constraint node
if (!pgBrowser.Nodes['exclusion_constraint']) {
pgAdmin.Browser.Nodes['exclusion_constraint'] = pgBrowser.Node.extend({
+ getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy,
type: 'exclusion_constraint',
label: gettext('Exclusion constraint'),
collection_type: 'coll-constraints',
@@ -609,7 +610,7 @@ define('pgadmin.node.exclusion_constraint', [
sqlCreateHelp: 'ddl-constraints.html',
dialogHelp: url_for('help.static', {'filename': 'exclusion_constraint_dialog.html'}),
hasSQL: true,
- parent_type: 'table',
+ parent_type: ['table','partition'],
canDrop: true,
canDropCascade: true,
hasDepends: true,
@@ -916,12 +917,22 @@ define('pgadmin.node.exclusion_constraint', [
if (data && data.check == false)
return true;
- var t = pgBrowser.tree, i = item, d = itemData, parents = [];
+ var t = pgBrowser.tree, i = item, d = itemData, parents = [],
+ immediate_parent_table_found = false,
+ is_immediate_parent_table_partitioned = false;
// To iterate over tree to check parent node
while (i) {
+ // If table is partitioned table then return false
+ if (!immediate_parent_table_found && (d._type == 'table' || d._type == 'partition')) {
+ immediate_parent_table_found = true;
+ if ('is_partitioned' in d && d.is_partitioned) {
+ is_immediate_parent_table_partitioned = true;
+ }
+ }
+
// If it is schema then allow user to create table
if (_.indexOf(['schema'], d._type) > -1)
- return true;
+ return !is_immediate_parent_table_partitioned;
parents.push(d._type);
i = t.hasParent(i) ? t.parent(i) : null;
d = i ? t.itemData(i) : null;
@@ -930,7 +941,7 @@ define('pgadmin.node.exclusion_constraint', [
if (_.indexOf(parents, 'catalog') > -1) {
return false;
} else {
- return true;
+ return !is_immediate_parent_table_partitioned;
}
}
});
diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/foreign_key/templates/foreign_key/js/foreign_key.js b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/foreign_key/templates/foreign_key/js/foreign_key.js
index 95afa3ef9..a4bec09de 100644
--- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/foreign_key/templates/foreign_key/js/foreign_key.js
+++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/foreign_key/templates/foreign_key/js/foreign_key.js
@@ -602,6 +602,7 @@ define('pgadmin.node.foreign_key', [
// Extend the browser's node class for foreign key node
if (!pgBrowser.Nodes['foreign_key']) {
pgAdmin.Browser.Nodes['foreign_key'] = pgBrowser.Node.extend({
+ getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy,
type: 'foreign_key',
label: gettext('Foreign key'),
collection_type: 'coll-constraints',
@@ -610,7 +611,7 @@ define('pgadmin.node.foreign_key', [
dialogHelp: url_for('help.static', {'filename': 'foreign_key_dialog.html'}),
hasSQL: true,
hasDepends: false,
- parent_type: 'table',
+ parent_type: ['table','partition'],
canDrop: true,
canDropCascade: true,
hasDepends: true,
@@ -1068,12 +1069,22 @@ define('pgadmin.node.foreign_key', [
if (data && data.check == false)
return true;
- var t = pgBrowser.tree, i = item, d = itemData, parents = [];
+ var t = pgBrowser.tree, i = item, d = itemData, parents = [],
+ immediate_parent_table_found = false,
+ is_immediate_parent_table_partitioned = false;
// To iterate over tree to check parent node
while (i) {
+ // If table is partitioned table then return false
+ if (!immediate_parent_table_found && (d._type == 'table' || d._type == 'partition')) {
+ immediate_parent_table_found = true;
+ if ('is_partitioned' in d && d.is_partitioned) {
+ is_immediate_parent_table_partitioned = true;
+ }
+ }
+
// If it is schema then allow user to c reate table
if (_.indexOf(['schema'], d._type) > -1)
- return true;
+ return !is_immediate_parent_table_partitioned;
parents.push(d._type);
i = t.hasParent(i) ? t.parent(i) : null;
d = i ? t.itemData(i) : null;
@@ -1082,7 +1093,7 @@ define('pgadmin.node.foreign_key', [
if (_.indexOf(parents, 'catalog') > -1) {
return false;
} else {
- return true;
+ return !is_immediate_parent_table_partitioned;
}
}
});
diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/index_constraint/templates/index_constraint/js/index_constraint.js b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/index_constraint/templates/index_constraint/js/index_constraint.js
index 97a404a4d..9479acd8f 100644
--- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/index_constraint/templates/index_constraint/js/index_constraint.js
+++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/index_constraint/templates/index_constraint/js/index_constraint.js
@@ -6,6 +6,7 @@ define('pgadmin.node.{{node_type}}', [
// Extend the browser's node class for index constraint node
if (!pgBrowser.Nodes['{{node_type}}']) {
pgAdmin.Browser.Nodes['{{node_type}}'] = pgBrowser.Node.extend({
+ getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy,
type: '{{node_type}}',
label: '{{ node_label }}',
collection_type: 'coll-constraints',
@@ -20,7 +21,7 @@ define('pgadmin.node.{{node_type}}', [
hasDepends: true,
hasStatistics: true,
statsPrettifyFields: ['Index size'],
- parent_type: 'table',
+ parent_type: ['table','partition'],
canDrop: true,
canDropCascade: true,
Init: function() {
@@ -45,12 +46,28 @@ define('pgadmin.node.{{node_type}}', [
if (data && data.check == false)
return true;
- var t = pgBrowser.tree, i = item, d = itemData, parents = [];
+ var t = pgBrowser.tree, i = item, d = itemData, parents = [],
+ immediate_parent_table_found = false,
+ is_immediate_parent_table_partitioned = false;
+
// To iterate over tree to check parent node
while (i) {
- // If it is schema then allow user to c reate table
+ // If table is partitioned table then return false
+ if (!immediate_parent_table_found && (d._type == 'table' || d._type == 'partition')) {
+ immediate_parent_table_found = true;
+ if ('is_partitioned' in d && d.is_partitioned) {
+ is_immediate_parent_table_partitioned = true;
+ }
+ }
+
+ // If it is schema then allow user to create table
if (_.indexOf(['schema'], d._type) > -1) {
{% if node_type == 'primary_key' %}
+
+ if (is_immediate_parent_table_partitioned) {
+ return false;
+ }
+
// There should be only one primary key per table.
var children = t.children(arguments[1], false),
primary_key_found = false;
@@ -63,7 +80,7 @@ define('pgadmin.node.{{node_type}}', [
});
return !primary_key_found;
{% else %}
- return true;
+ return !is_immediate_parent_table_partitioned;
{% endif %}
}
parents.push(d._type);
@@ -74,7 +91,7 @@ define('pgadmin.node.{{node_type}}', [
if (_.indexOf(parents, 'catalog') > -1) {
return false;
} else {
- return true;
+ return !is_immediate_parent_table_partitioned;
}
},
diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/templates/constraints/js/constraints.js b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/templates/constraints/js/constraints.js
index a7346f1e5..e372b3ccb 100644
--- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/templates/constraints/js/constraints.js
+++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/templates/constraints/js/constraints.js
@@ -9,16 +9,18 @@ define('pgadmin.node.constraints', [
node: 'constraints',
label: gettext('Constraints'),
type: 'coll-constraints',
+ getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy,
columns: ['name', 'comment']
});
};
if (!pgBrowser.Nodes['constraints']) {
pgAdmin.Browser.Nodes['constraints'] = pgBrowser.Node.extend({
+ getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy,
type: 'constraints',
label: gettext('Constraints'),
collection_type: 'coll-constraints',
- parent_type: ['table'],
+ parent_type: ['table','partition'],
Init: function() {
/* Avoid mulitple registration of menus */
if (this.initialized)
diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/indexes/__init__.py b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/indexes/__init__.py
index e52c15ba1..13f9d8648 100644
--- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/indexes/__init__.py
+++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/indexes/__init__.py
@@ -72,12 +72,28 @@ class IndexesModule(CollectionNodeModule):
if super(IndexesModule, self).BackendSupported(manager, **kwargs):
conn = manager.connection(did=kwargs['did'])
+ # In case of partitioned table return false.
+ if 'tid' in kwargs and manager.version >= 100000:
+ partition_path = 'partition/sql/#{0}#'.format(manager.version)
+ SQL = render_template(
+ "/".join([partition_path, 'backend_support.sql']),
+ tid=kwargs['tid']
+ )
+ status, res = conn.execute_scalar(SQL)
+
+ # check if any errors
+ if not status:
+ return internal_server_error(errormsg=res)
+ return not res
+
if 'vid' not in kwargs:
return True
template_path = 'index/sql/#{0}#'.format(manager.version)
- SQL = render_template("/".join(
- [template_path, 'backend_support.sql']), vid=kwargs['vid'])
+ SQL = render_template(
+ "/".join([template_path, 'backend_support.sql']),
+ vid=kwargs['vid']
+ )
status, res = conn.execute_scalar(SQL)
# check if any errors
@@ -239,9 +255,10 @@ class IndexesView(PGChildNodeView):
# We need parent's name eg table name and schema name
# when we create new index in update we can fetch it using
# property sql
- SQL = render_template("/".join([self.template_path,
- 'get_parent.sql']),
- tid=kwargs['tid'])
+ SQL = render_template(
+ "/".join([self.template_path, 'get_parent.sql']),
+ tid=kwargs['tid']
+ )
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
@@ -262,8 +279,9 @@ class IndexesView(PGChildNodeView):
"""
res = [{'label': '', 'value': ''}]
try:
- SQL = render_template("/".join([self.template_path,
- 'get_collations.sql']))
+ SQL = render_template(
+ "/".join([self.template_path, 'get_collations.sql'])
+ )
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=res)
@@ -289,8 +307,7 @@ class IndexesView(PGChildNodeView):
"""
res = [{'label': '', 'value': ''}]
try:
- SQL = render_template("/".join([self.template_path,
- 'get_am.sql']))
+ SQL = render_template("/".join([self.template_path, 'get_am.sql']))
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=res)
@@ -317,17 +334,17 @@ class IndexesView(PGChildNodeView):
res = dict()
try:
# Fetching all the access methods
- SQL = render_template("/".join([self.template_path,
- 'get_am.sql']))
+ SQL = render_template("/".join([self.template_path, 'get_am.sql']))
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=res)
for row in rset['rows']:
# Fetching all the op_classes for each access method
- SQL = render_template("/".join([self.template_path,
- 'get_op_class.sql']),
- oid=row['oid'])
+ SQL = render_template(
+ "/".join([self.template_path, 'get_op_class.sql']),
+ oid=row['oid']
+ )
status, result = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=res)
@@ -365,8 +382,9 @@ class IndexesView(PGChildNodeView):
JSON of available schema nodes
"""
- SQL = render_template("/".join([self.template_path,
- 'nodes.sql']), tid=tid)
+ SQL = render_template(
+ "/".join([self.template_path, 'nodes.sql']), tid=tid
+ )
status, res = self.conn.execute_dict(SQL)
if not status:
@@ -393,10 +411,10 @@ class IndexesView(PGChildNodeView):
Returns:
JSON of available schema child nodes
"""
- SQL = render_template("/".join([self.template_path,
- 'nodes.sql']),
- tid=tid,
- idx=idx)
+ SQL = render_template(
+ "/".join([self.template_path, 'nodes.sql']),
+ tid=tid, idx=idx
+ )
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
@@ -433,8 +451,9 @@ class IndexesView(PGChildNodeView):
JSON of available schema child nodes
"""
res = []
- SQL = render_template("/".join([self.template_path,
- 'nodes.sql']), tid=tid)
+ SQL = render_template(
+ "/".join([self.template_path, 'nodes.sql']), tid=tid
+ )
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
@@ -465,8 +484,9 @@ class IndexesView(PGChildNodeView):
Updated properties data with column details
"""
- SQL = render_template("/".join([self.template_path,
- 'column_details.sql']), idx=idx)
+ SQL = render_template(
+ "/".join([self.template_path, 'column_details.sql']), idx=idx
+ )
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
@@ -521,10 +541,10 @@ class IndexesView(PGChildNodeView):
JSON of selected schema node
"""
- SQL = render_template("/".join([self.template_path,
- 'properties.sql']),
- did=did, tid=tid, idx=idx,
- datlastsysoid=self.datlastsysoid)
+ SQL = render_template(
+ "/".join([self.template_path, 'properties.sql']),
+ did=did, tid=tid, idx=idx, datlastsysoid=self.datlastsysoid
+ )
status, res = self.conn.execute_dict(SQL)
@@ -595,9 +615,10 @@ class IndexesView(PGChildNodeView):
try:
# Start transaction.
self.conn.execute_scalar("BEGIN;")
- SQL = render_template("/".join([self.template_path,
- 'create.sql']),
- data=data, conn=self.conn, mode='create')
+ SQL = render_template(
+ "/".join([self.template_path, 'create.sql']),
+ data=data, conn=self.conn, mode='create'
+ )
status, res = self.conn.execute_scalar(SQL)
if not status:
# End transaction.
@@ -606,9 +627,10 @@ class IndexesView(PGChildNodeView):
# If user chooses concurrent index then we cannot run it along
# with other alter statements so we will separate alter index part
- SQL = render_template("/".join([self.template_path,
- 'alter.sql']),
- data=data, conn=self.conn)
+ SQL = render_template(
+ "/".join([self.template_path, 'alter.sql']),
+ data=data, conn=self.conn
+ )
SQL = SQL.strip('\n').strip(' ')
if SQL != '':
status, res = self.conn.execute_scalar(SQL)
@@ -618,9 +640,10 @@ class IndexesView(PGChildNodeView):
return internal_server_error(errormsg=res)
# we need oid to to add object in tree at browser
- SQL = render_template("/".join([self.template_path,
- 'get_oid.sql']),
- tid=tid, data=data)
+ SQL = render_template(
+ "/".join([self.template_path, 'get_oid.sql']),
+ tid=tid, data=data
+ )
status, idx = self.conn.execute_scalar(SQL)
if not status:
# End transaction.
@@ -665,10 +688,10 @@ class IndexesView(PGChildNodeView):
try:
# We will first fetch the index name for current request
# so that we create template for dropping index
- SQL = render_template("/".join([self.template_path,
- 'properties.sql']),
- did=did, tid=tid, idx=idx,
- datlastsysoid=self.datlastsysoid)
+ SQL = render_template(
+ "/".join([self.template_path, 'properties.sql']),
+ did=did, tid=tid, idx=idx, datlastsysoid=self.datlastsysoid
+ )
status, res = self.conn.execute_dict(SQL)
if not status:
@@ -687,9 +710,10 @@ class IndexesView(PGChildNodeView):
data = dict(res['rows'][0])
- SQL = render_template("/".join([self.template_path,
- 'delete.sql']),
- data=data, conn=self.conn, cascade=cascade)
+ SQL = render_template(
+ "/".join([self.template_path, 'delete.sql']),
+ data=data, conn=self.conn, cascade=cascade
+ )
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
@@ -787,10 +811,10 @@ class IndexesView(PGChildNodeView):
This function will genrate sql from model data
"""
if idx is not None:
- SQL = render_template("/".join([self.template_path,
- 'properties.sql']),
- did=did, tid=tid, idx=idx,
- datlastsysoid=self.datlastsysoid)
+ SQL = render_template(
+ "/".join([self.template_path, 'properties.sql']),
+ did=did, tid=tid, idx=idx, datlastsysoid=self.datlastsysoid
+ )
status, res = self.conn.execute_dict(SQL)
if not status:
@@ -826,11 +850,15 @@ class IndexesView(PGChildNodeView):
return gettext('-- definition incomplete')
# If the request for new object which do not have did
- SQL = render_template("/".join([self.template_path, 'create.sql']),
- data=data, conn=self.conn, mode=mode)
+ SQL = render_template(
+ "/".join([self.template_path, 'create.sql']),
+ data=data, conn=self.conn, mode=mode
+ )
SQL += "\n"
- SQL += render_template("/".join([self.template_path, 'alter.sql']),
- data=data, conn=self.conn)
+ SQL += render_template(
+ "/".join([self.template_path, 'alter.sql']),
+ data=data, conn=self.conn
+ )
return SQL, data['name'] if 'name' in data else old_data['name']
@@ -848,10 +876,10 @@ class IndexesView(PGChildNodeView):
idx: Index ID
"""
- SQL = render_template("/".join([self.template_path,
- 'properties.sql']),
- did=did, tid=tid, idx=idx,
- datlastsysoid=self.datlastsysoid)
+ SQL = render_template(
+ "/".join([self.template_path, 'properties.sql']),
+ did=did, tid=tid, idx=idx, datlastsysoid=self.datlastsysoid
+ )
status, res = self.conn.execute_dict(SQL)
if not status:
@@ -872,9 +900,10 @@ class IndexesView(PGChildNodeView):
return SQL
sql_header = u"-- Index: {0}\n\n-- ".format(data['name'])
- sql_header += render_template("/".join([self.template_path,
- 'delete.sql']),
- data=data, conn=self.conn)
+ sql_header += render_template(
+ "/".join([self.template_path, 'delete.sql']),
+ data=data, conn=self.conn
+ )
SQL = sql_header + '\n\n' + SQL
@@ -959,10 +988,11 @@ class IndexesView(PGChildNodeView):
if is_pgstattuple:
# Fetch index details only if extended stats available
- SQL = render_template("/".join([self.template_path,
- 'properties.sql']),
- did=did, tid=tid, idx=idx,
- datlastsysoid=self.datlastsysoid)
+ SQL = render_template(
+ "/".join([self.template_path, 'properties.sql']),
+ did=did, tid=tid, idx=idx,
+ datlastsysoid=self.datlastsysoid
+ )
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/indexes/templates/index/js/index.js b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/indexes/templates/index/js/index.js
index 24f37f0ed..4fc1d54a6 100644
--- a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/indexes/templates/index/js/index.js
+++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/indexes/templates/index/js/index.js
@@ -10,6 +10,7 @@ define('pgadmin.node.index', [
node: 'index',
label: gettext('Indexes'),
type: 'coll-index',
+ getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy,
sqlAlterHelp: 'sql-alterindex.html',
sqlCreateHelp: 'sql-createindex.html',
dialogHelp: url_for('help.static', {'filename': 'index_dialog.html'}),
@@ -210,8 +211,9 @@ define('pgadmin.node.index', [
});
if (!pgBrowser.Nodes['index']) {
- pgAdmin.Browser.Nodes['index'] = pgAdmin.Browser.Node.extend({
- parent_type: ['table', 'view', 'mview'],
+ pgAdmin.Browser.Nodes['index'] = pgBrowser.Node.extend({
+ getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy,
+ parent_type: ['table', 'view', 'mview', 'partition'],
collection_type: ['coll-table', 'coll-view'],
sqlAlterHelp: 'sql-alterindex.html',
sqlCreateHelp: 'sql-createindex.html',
@@ -246,6 +248,12 @@ define('pgadmin.node.index', [
category: 'create', priority: 4, label: gettext('Index...'),
icon: 'wcTabIcon icon-index', data: {action: 'create', check: true},
enable: 'canCreate'
+ },{
+ name: 'create_index_onPartition', node: 'partition', module: this,
+ applies: ['object', 'context'], callback: 'show_obj_properties',
+ category: 'create', priority: 4, label: gettext('Index...'),
+ icon: 'wcTabIcon icon-index', data: {action: 'create', check: true},
+ enable: 'canCreate'
},{
name: 'create_index_onMatView', node: 'mview', module: this,
applies: ['object', 'context'], callback: 'show_obj_properties',
@@ -472,12 +480,23 @@ define('pgadmin.node.index', [
if (data && data.check == false)
return true;
- var t = pgBrowser.tree, i = item, d = itemData, parents = [];
+ var t = pgBrowser.tree, i = item, d = itemData, parents = [],
+ immediate_parent_table_found = false,
+ is_immediate_parent_table_partitioned = false;
// To iterate over tree to check parent node
while (i) {
- // If it is schema then allow user to c reate table
+ // Do not allow creating index on partitioned tables.
+ if (!immediate_parent_table_found &&
+ _.indexOf(['table', 'partition'], d._type) > -1) {
+ immediate_parent_table_found = true;
+ if ('is_partitioned' in d && d.is_partitioned) {
+ is_immediate_parent_table_partitioned = true;
+ }
+ }
+
+ // If it is schema then allow user to create index
if (_.indexOf(['schema'], d._type) > -1)
- return true;
+ return !is_immediate_parent_table_partitioned;
parents.push(d._type);
i = t.hasParent(i) ? t.parent(i) : null;
d = i ? t.itemData(i) : null;
@@ -486,7 +505,7 @@ define('pgadmin.node.index', [
if (_.indexOf(parents, 'catalog') > -1) {
return false;
} else {
- return true;
+ return !is_immediate_parent_table_partitioned;
}
}
});
diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/partitions/__init__.py b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/partitions/__init__.py
new file mode 100644
index 000000000..a1c010f8b
--- /dev/null
+++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/partitions/__init__.py
@@ -0,0 +1,583 @@
+##########################################################################
+#
+# pgAdmin 4 - PostgreSQL Tools
+#
+# Copyright (C) 2013 - 2017, The pgAdmin Development Team
+# This software is released under the PostgreSQL Licence
+#
+##########################################################################
+
+""" Implements Partitions Node """
+
+import re
+import simplejson as json
+import pgadmin.browser.server_groups.servers.databases.schemas as schema
+from flask import render_template, request
+from flask_babel import gettext
+from pgadmin.browser.server_groups.servers.databases.schemas.utils \
+ import DataTypeReader, VacuumSettings
+from pgadmin.utils.ajax import internal_server_error, \
+ make_response as ajax_response, gone
+from pgadmin.browser.server_groups.servers.databases.schemas.tables.utils \
+ import BaseTableView
+from pgadmin.browser.collection import CollectionNodeModule
+from pgadmin.utils.ajax import make_json_response, precondition_required
+from config import PG_DEFAULT_DRIVER
+from pgadmin.browser.utils import PGChildModule
+
+
+class PartitionsModule(CollectionNodeModule):
+ """
+ class PartitionsModule(CollectionNodeModule)
+
+ A module class for Partition node derived from CollectionNodeModule.
+
+ Methods:
+ -------
+ * __init__(*args, **kwargs)
+ - Method is used to initialize the Partition and it's base module.
+
+ * get_nodes(gid, sid, did, scid, tid)
+ - Method is used to generate the browser collection node.
+
+ * node_inode()
+ - Method is overridden from its base class to make the node as leaf node.
+
+ * script_load()
+ - Load the module script for schema, when any of the server node is
+ initialized.
+ """
+
+ NODE_TYPE = 'partition'
+ COLLECTION_LABEL = gettext("Partitions")
+
+ def __init__(self, *args, **kwargs):
+ """
+ Method is used to initialize the PartitionsModule and it's base module.
+
+ Args:
+ *args:
+ **kwargs:
+ """
+ super(PartitionsModule, self).__init__(*args, **kwargs)
+ self.min_ver = 100000
+ self.max_ver = None
+
+ def get_nodes(self, gid, sid, did, scid, **kwargs):
+ """
+ Generate the collection node
+ """
+ yield self.generate_browser_collection_node(kwargs['tid'])
+
+ @property
+ def script_load(self):
+ """
+ Load the module script for server, when any of the server-group node is
+ initialized.
+ """
+ return schema.SchemaModule.NODE_TYPE
+
+ @property
+ def node_inode(self):
+ """
+ Load the module node as a leaf node
+ """
+ return True
+
+ def BackendSupported(self, manager, **kwargs):
+ """
+ Load this module if it is a partition table
+ """
+ if 'tid' in kwargs and CollectionNodeModule.BackendSupported(self, manager, **kwargs):
+ conn = manager.connection(did=kwargs['did'])
+
+ template_path = 'partition/sql/#{0}#'.format(manager.version)
+ SQL = render_template("/".join(
+ [template_path, 'backend_support.sql']), tid=kwargs['tid'])
+ status, res = conn.execute_scalar(SQL)
+
+ # check if any errors
+ if not status:
+ return internal_server_error(errormsg=res)
+
+ return res
+
+ def register(self, app, options, first_registration=False):
+ """
+ Override the default register function to automatically register
+ sub-modules of table node under partition table node.
+ """
+
+ if first_registration:
+ self.submodules = list(app.find_submodules(self.import_name))
+
+ super(CollectionNodeModule, self).register(app, options, first_registration)
+
+ for module in self.submodules:
+ if first_registration:
+ module.parentmodules.append(self)
+ app.register_blueprint(module)
+
+ # Now add sub modules of table node to partition table node.
+ if first_registration:
+ # Exclude 'partition' module for now to avoid cyclic import issue.
+ modules_to_skip = ['partition', 'column']
+ for parent in self.parentmodules:
+ if parent.NODE_TYPE == 'table':
+ self.submodules += [
+ submodule for submodule in parent.submodules
+ if submodule.NODE_TYPE not in modules_to_skip
+ ]
+
+ @property
+ def module_use_template_javascript(self):
+ """
+ Returns whether Jinja2 template is used for generating the javascript
+ module.
+ """
+ return False
+
+
+blueprint = PartitionsModule(__name__)
+
+
+class PartitionsView(BaseTableView, DataTypeReader, VacuumSettings):
+ """
+ This class is responsible for generating routes for Partition node
+
+ Methods:
+ -------
+
+ * list()
+ - This function is used to list all the Partition nodes within that
+ collection.
+
+ * nodes()
+ - This function will used to create all the child node within that
+ collection, Here it will create all the Partition node.
+
+ * properties(gid, sid, did, scid, tid, ptid)
+ - This function will show the properties of the selected Partition node
+
+ """
+
+ node_type = blueprint.node_type
+
+ parent_ids = [
+ {'type': 'int', 'id': 'gid'},
+ {'type': 'int', 'id': 'sid'},
+ {'type': 'int', 'id': 'did'},
+ {'type': 'int', 'id': 'scid'},
+ {'type': 'int', 'id': 'tid'}
+ ]
+ ids = [
+ {'type': 'int', 'id': 'ptid'}
+ ]
+
+ operations = dict({
+ 'obj': [
+ {'get': 'properties', 'delete': 'delete', 'put': 'update'},
+ {'get': 'list', 'post': 'create'}
+ ],
+ 'nodes': [{'get': 'nodes'}, {'get': 'nodes'}],
+ 'children': [{'get': 'children'}],
+ 'sql': [{'get': 'sql'}],
+ 'msql': [{'get': 'msql'}, {}],
+ 'detach': [{'put': 'detach'}],
+ 'truncate': [{'put': 'truncate'}]
+
+ })
+
+ def children(self, **kwargs):
+ """Build a list of treeview nodes from the child nodes."""
+
+ if 'sid' not in kwargs:
+ return precondition_required(
+ gettext('Required properties are missing.')
+ )
+
+ from pgadmin.utils.driver import get_driver
+ manager = get_driver(PG_DEFAULT_DRIVER).connection_manager(
+ sid=kwargs['sid']
+ )
+
+ did = None
+ if 'did' in kwargs:
+ did = kwargs['did']
+
+ conn = manager.connection(did=did)
+
+ if not conn.connected():
+ return precondition_required(
+ gettext(
+ "Connection to the server has been lost."
+ )
+ )
+
+ nodes = []
+ for module in self.blueprint.submodules:
+ if isinstance(module, PGChildModule):
+ if manager is not None and \
+ module.BackendSupported(manager, **kwargs):
+ # treat partition table as normal table.
+ # replace tid with ptid and pop ptid from kwargs
+ if 'ptid' in kwargs:
+ ptid = kwargs.pop('ptid')
+ kwargs['tid'] = ptid
+ nodes.extend(module.get_nodes(**kwargs))
+ else:
+ nodes.extend(module.get_nodes(**kwargs))
+
+ # Explicitly include 'partition' module as we had excluded it during
+ # registration.
+ nodes.extend(self.blueprint.get_nodes(**kwargs))
+
+ # Return sorted nodes based on label
+ return make_json_response(
+ data=sorted(
+ nodes, key=lambda c: c['label']
+ )
+ )
+
+ @BaseTableView.check_precondition
+ def list(self, gid, sid, did, scid, tid):
+ """
+ This function is used to list all the table nodes within that
+ collection.
+
+ Args:
+ gid: Server group ID
+ sid: Server ID
+ did: Database ID
+ scid: Schema ID
+ tid: Table ID
+
+ Returns:
+ JSON of available table nodes
+ """
+ SQL = render_template("/".join([self.partition_template_path,
+ 'properties.sql']),
+ did=did, scid=scid, tid=tid,
+ datlastsysoid=self.datlastsysoid)
+ status, res = self.conn.execute_dict(SQL)
+
+ if not status:
+ return internal_server_error(errormsg=res)
+ return ajax_response(
+ response=res['rows'],
+ status=200
+ )
+
+ @BaseTableView.check_precondition
+ def nodes(self, gid, sid, did, scid, tid, ptid=None):
+ """
+ This function is used to list all the table nodes within that
+ collection.
+
+ Args:
+ gid: Server group ID
+ sid: Server ID
+ did: Database ID
+ scid: Schema ID
+ tid: Parent Table ID
+ ptid: Partition Table ID
+
+ Returns:
+ JSON of available table nodes
+ """
+ SQL = render_template(
+ "/".join([self.partition_template_path, 'nodes.sql']),
+ scid=scid, tid=tid
+ )
+ status, rset = self.conn.execute_2darray(SQL)
+ if not status:
+ return internal_server_error(errormsg=rset)
+
+ def browser_node(row):
+ return self.blueprint.generate_browser_node(
+ row['oid'],
+ tid,
+ row['name'],
+ icon="icon-partition",
+ tigger_count=row['triggercount'],
+ has_enable_triggers=row['has_enable_triggers'],
+ is_partitioned=row['is_partitioned'],
+ parent_schema_id=scid,
+ schema_id=row['schema_id'],
+ schema_name=row['schema_name']
+ )
+
+ if ptid is not None:
+ if len(rset['rows']) == 0:
+ return gone(gettext(
+ "The specified partitioned table could not be found."
+ ))
+
+ return make_json_response(
+ data=browser_node(rset['rows'][0]), status=200
+ )
+
+ res = []
+ for row in rset['rows']:
+ res.append(browser_node(row))
+
+ return make_json_response(
+ data=res,
+ status=200
+ )
+
+ @BaseTableView.check_precondition
+ def properties(self, gid, sid, did, scid, tid, ptid):
+ """
+ This function will show the properties of the selected table node.
+
+ Args:
+ gid: Server Group ID
+ sid: Server ID
+ did: Database ID
+ scid: Schema ID
+ scid: Schema ID
+ tid: Table ID
+ ptid: Partition Table ID
+
+ Returns:
+ JSON of selected table node
+ """
+
+ SQL = render_template("/".join([self.partition_template_path,
+ 'properties.sql']),
+ did=did, scid=scid, tid=tid,
+ ptid=ptid, datlastsysoid=self.datlastsysoid)
+ status, res = self.conn.execute_dict(SQL)
+ if not status:
+ return internal_server_error(errormsg=res)
+
+ if len(res['rows']) == 0:
+ return gone(gettext(
+ "The specified partitioned table could not be found."))
+
+ return super(PartitionsView, self).properties(
+ gid, sid, did, scid, ptid, res)
+
+ @BaseTableView.check_precondition
+ def sql(self, gid, sid, did, scid, tid, ptid):
+ """
+ This function will creates reverse engineered sql for
+ the table object
+
+ Args:
+ gid: Server Group ID
+ sid: Server ID
+ did: Database ID
+ scid: Schema ID
+ tid: Table ID
+ ptid: Partition Table ID
+ """
+ main_sql = []
+
+ SQL = render_template("/".join([self.partition_template_path,
+ 'properties.sql']),
+ did=did, scid=scid, tid=tid,
+ ptid=ptid, datlastsysoid=self.datlastsysoid)
+ status, res = self.conn.execute_dict(SQL)
+ if not status:
+ return internal_server_error(errormsg=res)
+
+ if len(res['rows']) == 0:
+ return gone(gettext(
+ "The specified partitioned table could not be found."))
+
+ data = res['rows'][0]
+
+ return BaseTableView.get_reverse_engineered_sql(self, did, scid, ptid,
+ main_sql, data)
+
+ @BaseTableView.check_precondition
+ def detach(self, gid, sid, did, scid, tid, ptid):
+ """
+ This function will reset statistics of table
+
+ Args:
+ gid: Server Group ID
+ sid: Server ID
+ did: Database ID
+ scid: Schema ID
+ tid: Table ID
+ ptid: Partition Table ID
+ """
+ # Fetch schema name
+ status, parent_schema = self.conn.execute_scalar(
+ render_template(
+ "/".join([self.table_template_path, 'get_schema.sql']),
+ conn=self.conn, scid=scid
+ )
+ )
+ if not status:
+ return internal_server_error(errormsg=parent_schema)
+
+ # Fetch Parent Table name
+ status, partitioned_table_name = self.conn.execute_scalar(
+ render_template(
+ "/".join([self.table_template_path, 'get_table.sql']),
+ conn=self.conn, scid=scid, tid=tid
+ )
+ )
+ if not status:
+ return internal_server_error(errormsg=partitioned_table_name)
+
+ # Get schema oid of partition
+ status, pscid = self.conn.execute_scalar(
+ render_template("/".join([self.table_template_path,
+ 'get_schema_oid.sql']), tid=ptid))
+ if not status:
+ return internal_server_error(errormsg=scid)
+
+ # Fetch schema name
+ status, partition_schema = self.conn.execute_scalar(
+ render_template("/".join([self.table_template_path,
+ 'get_schema.sql']), conn=self.conn,
+ scid=pscid)
+ )
+ if not status:
+ return internal_server_error(errormsg=partition_schema)
+
+ # Fetch Partition Table name
+ status, partition_name = self.conn.execute_scalar(
+ render_template(
+ "/".join([self.table_template_path, 'get_table.sql']),
+ conn=self.conn, scid=pscid, tid=ptid
+ )
+ )
+ if not status:
+ return internal_server_error(errormsg=partition_name)
+
+ try:
+ temp_data = dict()
+ temp_data['parent_schema'] = parent_schema
+ temp_data['partitioned_table_name'] = partitioned_table_name
+ temp_data['schema'] = partition_schema
+ temp_data['name'] = partition_name
+
+ SQL = render_template("/".join(
+ [self.partition_template_path, 'detach.sql']),
+ data=temp_data, conn=self.conn)
+
+ status, res = self.conn.execute_scalar(SQL)
+ if not status:
+ return internal_server_error(errormsg=res)
+
+ return make_json_response(
+ success=1,
+ info=gettext("Partition detached."),
+ data={
+ 'id': ptid,
+ 'scid': scid
+ }
+ )
+ except Exception as e:
+ return internal_server_error(errormsg=str(e))
+
+ @BaseTableView.check_precondition
+ def msql(self, gid, sid, did, scid, tid, ptid=None):
+ """
+ This function will create modified sql for table object
+
+ Args:
+ gid: Server Group ID
+ sid: Server ID
+ did: Database ID
+ scid: Schema ID
+ tid: Table ID
+ """
+ data = dict()
+ for k, v in request.args.items():
+ try:
+ data[k] = json.loads(v, encoding='utf-8')
+ except (ValueError, TypeError, KeyError):
+ data[k] = v
+
+ if ptid is not None:
+ SQL = render_template("/".join([self.partition_template_path,
+ 'properties.sql']),
+ did=did, scid=scid, tid=tid,
+ ptid=ptid, datlastsysoid=self.datlastsysoid)
+ status, res = self.conn.execute_dict(SQL)
+ if not status:
+ return internal_server_error(errormsg=res)
+
+ SQL, name = self.get_sql(did, scid, ptid, data, res)
+ SQL = re.sub('\n{2,}', '\n\n', SQL)
+ SQL = SQL.strip('\n')
+ if SQL == '':
+ SQL = "--modified SQL"
+ return make_json_response(
+ data=SQL,
+ status=200
+ )
+
+ @BaseTableView.check_precondition
+ def update(self, gid, sid, did, scid, tid, ptid):
+ """
+ This function will update an existing table object
+
+ Args:
+ gid: Server Group ID
+ sid: Server ID
+ did: Database ID
+ scid: Schema ID
+ tid: Table ID
+ ptid: Partition Table ID
+ """
+ data = request.form if request.form else json.loads(
+ request.data, encoding='utf-8'
+ )
+
+ for k, v in data.items():
+ try:
+ data[k] = json.loads(v, encoding='utf-8')
+ except (ValueError, TypeError, KeyError):
+ data[k] = v
+
+ try:
+ SQL = render_template("/".join([self.partition_template_path,
+ 'properties.sql']),
+ did=did, scid=scid, tid=tid,
+ ptid=ptid, datlastsysoid=self.datlastsysoid)
+ status, res = self.conn.execute_dict(SQL)
+ if not status:
+ return internal_server_error(errormsg=res)
+
+ return super(PartitionsView, self).update(
+ gid, sid, did, scid, ptid, data, res, parent_id=tid)
+ except Exception as e:
+ return internal_server_error(errormsg=str(e))
+
+ @BaseTableView.check_precondition
+ def truncate(self, gid, sid, did, scid, tid, ptid):
+ """
+ This function will truncate the table object
+
+ Args:
+ gid: Server Group ID
+ sid: Server ID
+ did: Database ID
+ scid: Schema ID
+ tid: Table ID
+ """
+
+ try:
+ SQL = render_template("/".join([self.partition_template_path,
+ 'properties.sql']),
+ did=did, scid=scid, tid=tid,
+ ptid=ptid, datlastsysoid=self.datlastsysoid)
+ status, res = self.conn.execute_dict(SQL)
+ if not status:
+ return internal_server_error(errormsg=res)
+
+ return super(PartitionsView, self).truncate(gid, sid, did, scid, ptid, res)
+
+ except Exception as e:
+ return internal_server_error(errormsg=str(e))
+
+
+PartitionsView.register_node_view(blueprint)
diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/partitions/static/img/coll-partition.png b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/partitions/static/img/coll-partition.png
new file mode 100644
index 000000000..8536c66af
Binary files /dev/null and b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/partitions/static/img/coll-partition.png differ
diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/partitions/static/img/partition.png b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/partitions/static/img/partition.png
new file mode 100644
index 000000000..4d6dd8b35
Binary files /dev/null and b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/partitions/static/img/partition.png differ
diff --git a/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/partitions/static/js/partition.js b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/partitions/static/js/partition.js
new file mode 100644
index 000000000..63adb5ec5
--- /dev/null
+++ b/web/pgadmin/browser/server_groups/servers/databases/schemas/tables/partitions/static/js/partition.js
@@ -0,0 +1,1276 @@
+define([
+ 'sources/gettext', 'sources/url_for', 'jquery', 'underscore',
+ 'underscore.string', 'pgadmin', 'pgadmin.browser', 'backform', 'alertify',
+ 'pgadmin.browser.collection', 'pgadmin.browser.table.partition.utils'
+],
+function(gettext, url_for, $, _, S, pgAdmin, pgBrowser, Backform, alertify) {
+
+ if (!pgBrowser.Nodes['coll-partition']) {
+ var databases = pgAdmin.Browser.Nodes['coll-partition'] =
+ pgAdmin.Browser.Collection.extend({
+ node: 'partition',
+ label: gettext('Partitions'),
+ getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy,
+ type: 'coll-partition',
+ columns: [
+ 'name', 'schema', 'partition_value', 'is_partitioned', 'description'
+ ],
+ hasStatistics: true
+ });
+ };
+
+ if (!pgBrowser.Nodes['partition']) {
+ pgAdmin.Browser.Nodes['partition'] = pgBrowser.Node.extend({
+ getTreeNodeHierarchy: pgBrowser.tableChildTreeNodeHierarchy,
+ parent_type: 'table',
+ collection_type: 'coll-partition',
+ type: 'partition',
+ label: gettext('Partition'),
+ hasSQL: true,
+ hasDepends: true,
+ hasStatistics: true,
+ statsPrettifyFields: ['Size', 'Indexes size', 'Table size',
+ 'Toast table size', 'Tuple length',
+ 'Dead tuple length', 'Free space'],
+ sqlAlterHelp: 'sql-altertable.html',
+ sqlCreateHelp: 'sql-createtable.html',
+ dialogHelp: url_for('help.static', {'filename': 'table_dialog.html'}),
+ hasScriptTypes: ['create'],
+ height: '95%',
+ width: '85%',
+ Init: function() {
+ /* Avoid mulitple registration of menus */
+ if (this.initialized)
+ return;
+
+ this.initialized = true;
+
+ pgBrowser.add_menus([{
+ name: 'truncate_table', node: 'partition', module: this,
+ applies: ['object', 'context'], callback: 'truncate_table',
+ category: 'Truncate', priority: 3, label: gettext('Truncate'),
+ icon: 'fa fa-eraser', enable : 'canCreate'
+ },{
+ name: 'truncate_table_cascade', node: 'partition', module: this,
+ applies: ['object', 'context'], callback: 'truncate_table_cascade',
+ category: 'Truncate', priority: 3, label: gettext('Truncate Cascade'),
+ icon: 'fa fa-eraser', enable : 'canCreate'
+ },{
+ // To enable/disable all triggers for the table
+ name: 'enable_all_triggers', node: 'partition', module: this,
+ applies: ['object', 'context'], callback: 'enable_triggers_on_table',
+ category: 'Trigger(s)', priority: 4, label: gettext('Enable All'),
+ icon: 'fa fa-check', enable : 'canCreate_with_trigger_enable'
+ },{
+ name: 'disable_all_triggers', node: 'partition', module: this,
+ applies: ['object', 'context'], callback: 'disable_triggers_on_table',
+ category: 'Trigger(s)', priority: 4, label: gettext('Disable All'),
+ icon: 'fa fa-times', enable : 'canCreate_with_trigger_disable'
+ },{
+ name: 'reset_table_stats', node: 'partition', module: this,
+ applies: ['object', 'context'], callback: 'reset_table_stats',
+ category: 'Reset', priority: 4, label: gettext('Reset Statistics'),
+ icon: 'fa fa-bar-chart', enable : 'canCreate'
+ },{
+ name: 'detach_partition', node: 'partition', module: this,
+ applies: ['object', 'context'], callback: 'detach_partition',
+ priority: 2, label: gettext('Detach Partition'),
+ icon: 'fa fa-remove'
+ }
+ ]);
+ },
+ getTreeNodeHierarchy: function(i) {
+ var idx = 0,
+ res = {},
+ t = pgBrowser.tree;
+
+ do {
+ d = t.itemData(i);
+ if (
+ d._type in pgBrowser.Nodes && pgBrowser.Nodes[d._type].hasId
+ ) {
+ if (d._type == 'partition' && 'partition' in res) {
+ if (!('table' in res)) {
+ res['table'] = _.extend({}, d, {'priority': idx});
+ idx -= 1;
+ }
+ } else if (d._type == 'table') {
+ if (!('table' in res)) {
+ res['table'] = _.extend({}, d, {'priority': idx});
+ idx -= 1;
+ }
+ } else {
+ res[d._type] = _.extend({}, d, {'priority': idx});
+ idx -= 1;
+ }
+ }
+ i = t.hasParent(i) ? t.parent(i) : null;
+ } while (i);
+
+ return res;
+ },
+ generate_url: function(item, type, d, with_id, info) {
+ if (_.indexOf([
+ 'stats', 'statistics', 'dependency', 'dependent', 'reset',
+ 'get_relations', 'get_oftype', 'get_attach_tables'
+ ], type) == -1) {
+ return pgBrowser.Node.generate_url.apply(this, arguments);
+ }
+
+ if (type == 'statistics') {
+ type = 'stats';
+ }
+
+ info = (_.isUndefined(item) || _.isNull(item)) ?
+ info || {} : this.getTreeNodeHierarchy(item);
+
+ return S('table/%s/%s/%s/%s/%s/%s').sprintf(
+ encodeURIComponent(type), encodeURIComponent(info['server-group']._id),
+ encodeURIComponent(info['server']._id),
+ encodeURIComponent(info['database']._id),
+ encodeURIComponent(info['partition'].schema_id),
+ encodeURIComponent(info['partition']._id)
+ ).value();
+ },
+ canDrop: pgBrowser.Nodes['schema'].canChildDrop,
+ canDropCascade: pgBrowser.Nodes['schema'].canChildDrop,
+ callbacks: {
+ /* Enable trigger(s) on table */
+ enable_triggers_on_table: function(args) {
+ var params = {'enable': true };
+ this.callbacks.set_triggers.apply(this, [args, params]);
+ },
+ /* Disable trigger(s) on table */
+ disable_triggers_on_table: function(args) {
+ var params = {'enable': false };
+ this.callbacks.set_triggers.apply(this, [args, params]);
+ },
+ set_triggers: function(args, params) {
+ // This function will send request to enable or
+ // disable triggers on table level
+ var input = args || {};
+ obj = this,
+ t = pgBrowser.tree,
+ i = input.item || t.selected(),
+ d = i && i.length == 1 ? t.itemData(i) : undefined;
+ if (!d)
+ return false;
+
+ $.ajax({
+ url: obj.generate_url(i, 'set_trigger' , d, true),
+ type:'PUT',
+ data: params,
+ dataType: "json",
+ success: function(res) {
+ if (res.success == 1) {
+ alertify.success(res.info);
+ t.unload(i);
+ t.setInode(i);
+ t.deselect(i);
+ setTimeout(function() {
+ t.select(i);
+ }, 10);
+ }
+ },
+ error: function(xhr, status, error) {
+ try {
+ var err = $.parseJSON(xhr.responseText);
+ if (err.success == 0) {
+ alertify.error(err.errormsg);
+ }
+ } catch (e) {}
+ t.unload(i);
+ }
+ });
+ },
+ /* Truncate table */
+ truncate_table: function(args) {
+ var params = {'cascade': false };
+ this.callbacks.truncate.apply(this, [args, params]);
+ },
+ /* Truncate table with cascade */
+ truncate_table_cascade: function(args) {
+ var params = {'cascade': true };
+ this.callbacks.truncate.apply(this, [args, params]);
+ },
+ truncate: function(args, params) {
+ var input = args || {};
+ obj = this,
+ t = pgBrowser.tree,
+ i = input.item || t.selected(),
+ d = i && i.length == 1 ? t.itemData(i) : undefined;
+
+ if (!d)
+ return false;
+
+ alertify.confirm(
+ gettext('Truncate Table'),
+ S(gettext('Are you sure you want to truncate table %s?')).sprintf(d.label).value(),
+ function (e) {
+ if (e) {
+ var data = d;
+ $.ajax({
+ url: obj.generate_url(i, 'truncate' , d, true),
+ type:'PUT',
+ data: params,
+ dataType: "json",
+ success: function(res) {
+ if (res.success == 1) {
+ alertify.success(res.info);
+ t.removeIcon(i);
+ data.icon = 'icon-table';
+ t.addIcon(i, {icon: data.icon});
+ t.unload(i);
+ t.setInode(i);
+ t.deselect(i);
+ // Fetch updated data from server
+ setTimeout(function() {
+ t.select(i);
+ }, 10);
+ }
+ },
+ error: function(xhr, status, error) {
+ try {
+ var err = $.parseJSON(xhr.responseText);
+ if (err.success == 0) {
+ alertify.error(err.errormsg);
+ }
+ } catch (e) {}
+ t.unload(i);
+ }
+ });
+ }},
+ function() {}
+ );
+ },
+ reset_table_stats: function(args) {
+ var input = args || {},
+ obj = this,
+ t = pgBrowser.tree,
+ i = input.item || t.selected(),
+ d = i && i.length == 1 ? t.itemData(i) : undefined;
+
+ if (!d)
+ return false;
+
+ alertify.confirm(
+ gettext('Reset statistics'),
+ S(gettext('Are you sure you want to reset the statistics for table %s?')).sprintf(d._label).value(),
+ function (e) {
+ if (e) {
+ var data = d;
+ $.ajax({
+ url: obj.generate_url(i, 'reset' , d, true),
+ type:'DELETE',
+ success: function(res) {
+ if (res.success == 1) {
+ alertify.success(res.info);
+ t.removeIcon(i);
+ data.icon = 'icon-table';
+ t.addIcon(i, {icon: data.icon});
+ t.unload(i);
+ t.setInode(i);
+ t.deselect(i);
+ // Fetch updated data from server
+ setTimeout(function() {
+ t.select(i);
+ }, 10);
+ }
+ },
+ error: function(xhr, status, error) {
+ try {
+ var err = $.parseJSON(xhr.responseText);
+ if (err.success == 0) {
+ alertify.error(err.errormsg);
+ }
+ } catch (e) {}
+ t.unload(i);
+ }
+ });
+ }
+ },
+ function() {}
+ );
+ },
+ detach_partition: function(args) {
+ var input = args || {},
+ obj = this,
+ t = pgBrowser.tree,
+ i = input.item || t.selected(),
+ d = i && i.length == 1 ? t.itemData(i) : undefined;
+
+ if (!d)
+ return false;
+
+ alertify.confirm(
+ gettext('Detach Partition'),
+ S(gettext('Are you sure you want to detach the partition %s?')).sprintf(d._label).value(),
+ function (e) {
+ if (e) {
+ var data = d;
+ $.ajax({
+ url: obj.generate_url(i, 'detach' , d, true),
+ type:'PUT',
+ success: function(res) {
+ if (res.success == 1) {
+ alertify.success(res.info);
+ var n = t.next(i);
+ if (!n || !n.length) {
+ n = t.prev(i);
+ if (!n || !n.length) {
+ n = t.parent(i);
+ t.setInode(n, true);
+ }
+ }
+ t.remove(i);
+ if (n.length) {
+ t.select(n);
+ }
+ }
+ },
+ error: function(xhr, status, error) {
+ try {
+ var err = $.parseJSON(xhr.responseText);
+ if (err.success == 0) {
+ alertify.error(err.errormsg);
+ }
+ } catch (e) {}
+ }
+ });
+ }
+ },
+ function() {}
+ );
+ }
+ },
+ model: pgBrowser.Node.Model.extend({
+ defaults: {
+ name: undefined,
+ oid: undefined,
+ spcoid: undefined,
+ spcname: undefined,
+ relowner: undefined,
+ relacl: undefined,
+ relhasoids: undefined,
+ relhassubclass: undefined,
+ reltuples: undefined,
+ description: undefined,
+ conname: undefined,
+ conkey: undefined,
+ isrepl: undefined,
+ triggercount: undefined,
+ relpersistence: undefined,
+ fillfactor: undefined,
+ reloftype: undefined,
+ typname: undefined,
+ labels: undefined,
+ providers: undefined,
+ is_sys_table: undefined,
+ coll_inherits: [],
+ hastoasttable: true,
+ toast_autovacuum_enabled: false,
+ autovacuum_enabled: false,
+ primary_key: [],
+ partitions: [],
+ partition_type: 'range',
+ is_partitioned: false,
+ partition_value: undefined
+ },
+ // Default values!
+ initialize: function(attrs, args) {
+ var self = this;
+
+ if (_.size(attrs) === 0) {
+ var userInfo = pgBrowser.serverInfo[args.node_info.server._id].user,
+ schemaInfo = args.node_info.schema;
+
+ this.set({
+ 'relowner': userInfo.name, 'schema': schemaInfo._label
+ }, {silent: true});
+ }
+ pgBrowser.Node.Model.prototype.initialize.apply(this, arguments);
+
+ },
+ schema: [{
+ id: 'name', label: gettext('Name'), type: 'text',
+ mode: ['properties', 'create', 'edit'], disabled: 'inSchema'
+ },{
+ id: 'oid', label: gettext('OID'), type: 'text', mode: ['properties']
+ },{
+ id: 'relowner', label: gettext('Owner'), type: 'text', node: 'role',
+ mode: ['properties', 'create', 'edit'], select2: {allowClear: false},
+ disabled: 'inSchema', control: 'node-list-by-name'
+ },{
+ id: 'schema', label: gettext('Schema'), type: 'text', node: 'schema',
+ control: 'node-list-by-name', mode: ['create', 'edit', 'properties'],
+ disabled: 'inSchema', filter: function(d) {
+ // If schema name start with pg_* then we need to exclude them
+ if(d && d.label.match(/^pg_/))
+ {
+ return false;
+ }
+ return true;
+ }, cache_node: 'database', cache_level: 'database'
+ },{
+ id: 'spcname', label: gettext('Tablespace'), node: 'tablespace',
+ type: 'text', control: 'node-list-by-name', disabled: 'inSchema',
+ mode: ['properties', 'create', 'edit'],
+ filter: function(d) {
+ // If tablespace name is not "pg_global" then we need to exclude them
+ return (!(d && d.label.match(/pg_global/)))
+ }
+ },{
+ id: 'partition', type: 'group', label: gettext('Partition'),
+ mode: ['edit', 'create'], min_version: 100000,
+ visible: function(m) {
+ // Always show in case of create mode
+ if (m.isNew() || m.get('is_partitioned'))
+ return true;
+ return false;
+ }
+ },{
+ id: 'is_partitioned', label:gettext('Partitioned Table?'), cell: 'switch',
+ type: 'switch', mode: ['properties', 'create', 'edit'],
+ visible: function(m) {
+ if(!_.isUndefined(m.node_info) && !_.isUndefined(m.node_info.server)
+ && !_.isUndefined(m.node_info.server.version) &&
+ m.node_info.server.version >= 100000)
+ return true;
+
+ return false;
+ },
+ disabled: function(m) {
+ if (!m.isNew())
+ return true;
+ return false;
+ }
+ },{
+ id: 'description', label: gettext('Comment'), type: 'multiline',
+ mode: ['properties', 'create', 'edit'], disabled: 'inSchema'
+ },
+ {
+ id: 'partition_value', label:gettext('Partition Scheme'),
+ type: 'text', visible: false
+ },{
+ id: 'coll_inherits', label: gettext('Inherited from table(s)'),
+ type: 'text', group: gettext('Advanced'), mode: ['properties']
+ },{
+ id: 'Columns', type: 'group', label: gettext('Columns'),
+ mode: ['edit', 'create'], min_version: 100000,
+ visible: function(m) {
+ // Always hide in case of partition table.
+ return false;
+ }
+ },{
+ // Tab control for columns
+ id: 'columns', label: gettext('Columns'), type: 'collection',
+ group: gettext('Columns'),
+ model: pgBrowser.Nodes['column'].model,
+ subnode: pgBrowser.Nodes['column'].model,
+ mode: ['create', 'edit'],
+ disabled: function(m) {
+ // In case of partitioned table remove inherited columns
+ if (m.isNew() && m.get('is_partitioned')) {
+ setTimeout(function() {
+ var coll = m.get('columns');
+ coll.remove(coll.filter(function(model) {
+ if (_.isUndefined(model.get('inheritedfrom')))
+ return false;
+ return true;
+ }));
+ }, 10);
+ }
+
+ if(this.node_info && 'catalog' in this.node_info)
+ {
+ return true;
+ }
+ return false;
+ },
+ deps: ['typname', 'is_partitioned'],
+ canAdd: 'check_grid_add_condition',
+ canEdit: true, canDelete: true,
+ // For each row edit/delete button enable/disable
+ canEditRow: 'check_grid_row_edit_delete',
+ canDeleteRow: 'check_grid_row_edit_delete',
+ uniqueCol : ['name'],
+ columns : ['name' , 'cltype', 'attlen', 'attprecision', 'attnotnull', 'is_primary_key'],
+ control: Backform.UniqueColCollectionControl.extend({
+ initialize: function() {
+ Backform.UniqueColCollectionControl.prototype.initialize.apply(this, arguments);
+ var self = this,
+ collection = self.model.get(self.field.get('name'));
+
+ collection.on("change:is_primary_key", function(m) {
+ var primary_key_coll = self.model.get('primary_key'),
+ column_name = m.get('name'),
+ primary_key;
+
+ if(m.get('is_primary_key')) {
+ // Add column to primary key.
+ if (primary_key_coll.length < 1) {
+ primary_key = new (primary_key_coll.model)({}, {
+ top: self.model,
+ collection: primary_key_coll,
+ handler: primary_key_coll
+ });
+ primary_key_coll.add(primary_key);
+ } else {
+ primary_key = primary_key_coll.first();
+ }
+ // Do not alter existing primary key columns.
+ if (_.isUndefined(primary_key.get('oid'))) {
+ var primary_key_column_coll = primary_key.get('columns'),
+ primary_key_column_exist = primary_key_column_coll.where({column:column_name});
+
+ if (primary_key_column_exist.length == 0) {
+ var primary_key_column = new (primary_key_column_coll.model)(
+ {column: column_name}, { silent: true,
+ top: self.model,
+ collection: primary_key_coll,
+ handler: primary_key_coll
+ });
+
+ primary_key_column_coll.add(primary_key_column);
+ }
+
+ primary_key_column_coll.trigger('pgadmin:multicolumn:updated', primary_key_column_coll);
+ }
+
+ } else {
+ // remove column from primary key.
+ if (primary_key_coll.length > 0) {
+ var primary_key = primary_key_coll.first();
+ // Do not alter existing primary key columns.
+ if (!_.isUndefined(primary_key.get('oid'))) {
+ return;
+ }
+
+ var primary_key_column_coll = primary_key.get('columns'),
+ removedCols = primary_key_column_coll.where({column:column_name});
+ if (removedCols.length > 0) {
+ primary_key_column_coll.remove(removedCols);
+ _.each(removedCols, function(m) {
+ m.destroy();
+ })
+ if (primary_key_column_coll.length == 0) {
+ setTimeout(function () {
+ // There will be only on primary key so remove the first one.
+ primary_key_coll.remove(primary_key_coll.first());
+ /* Ideally above line of code should be "primary_key_coll.reset()".
+ * But our custom DataCollection (extended from Backbone collection in datamodel.js)
+ * does not respond to reset event, it only supports add, remove, change events.
+ * And hence no custom event listeners/validators get called for reset event.
+ */
+ }, 10);
+ }
+ }
+ primary_key_column_coll.trigger('pgadmin:multicolumn:updated', primary_key_column_coll);
+ }
+ }
+ })
+ },
+ remove: function() {
+ var collection = this.model.get(this.field.get('name'));
+ if (collection) {
+ collection.off("change:is_primary_key");
+ }
+
+ Backform.UniqueColCollectionControl.prototype.remove.apply(this, arguments);
+ }
+ }),
+ allowMultipleEmptyRow: false
+ },{
+ id: 'inherited_tables_cnt', label: gettext('Inherited tables count'),
+ type: 'text', mode: ['properties'], group: gettext('Advanced'),
+ disabled: 'inSchema'
+ },{
+ // Here we will create tab control for constraints
+ type: 'nested', control: 'tab', group: gettext('Constraints'),
+ mode: ['edit', 'create'],
+ schema: [{
+ id: 'primary_key', label: gettext('Primary key'),
+ model: pgBrowser.Nodes['primary_key'].model,
+ subnode: pgBrowser.Nodes['primary_key'].model,
+ editable: false, type: 'collection',
+ group: gettext('Primary Key'), mode: ['edit', 'create'],
+ canEdit: true, canDelete: true, deps:['is_partitioned'],
+ control: 'unique-col-collection',
+ columns : ['name', 'columns'],
+ canAdd: function(m) {
+ if (m.get('is_partitioned')) {
+ setTimeout(function() {
+ var coll = m.get('primary_key');
+ coll.remove(coll.filter(function(model) {
+ return true;
+ }));
+ }, 10);
+ return false;
+ }
+
+ return true;
+ },
+ canAddRow: function(m) {
+ // User can only add one primary key
+ var columns = m.get('columns');
+
+ return (m.get('primary_key') &&
+ m.get('primary_key').length < 1 &&
+ _.some(columns.pluck('name')));
+ }
+ },{
+ id: 'foreign_key', label: gettext('Foreign key'),
+ model: pgBrowser.Nodes['foreign_key'].model,
+ subnode: pgBrowser.Nodes['foreign_key'].model,
+ editable: false, type: 'collection',
+ group: gettext('Foreign Key'), mode: ['edit', 'create'],
+ canEdit: true, canDelete: true, deps:['is_partitioned'],
+ control: 'unique-col-collection',
+ canAdd: function(m) {
+ if (m.get('is_partitioned')) {
+ setTimeout(function() {
+ var coll = m.get('foreign_key');
+ coll.remove(coll.filter(function(model) {
+ return true;
+ }));
+ }, 10);
+ return false;
+ }
+
+ return true;
+ },
+ columns : ['name', 'columns'],
+ canAddRow: function(m) {
+ // User can only add if there is at least one column with name.
+ var columns = m.get('columns');
+ return _.some(columns.pluck('name'));
+ }
+ },{
+ id: 'check_constraint', label: gettext('Check constraint'),
+ model: pgBrowser.Nodes['check_constraints'].model,
+ subnode: pgBrowser.Nodes['check_constraints'].model,
+ editable: false, type: 'collection',
+ group: gettext('Check'), mode: ['edit', 'create'],
+ canEdit: true, canDelete: true, deps:['is_partitioned'],
+ control: 'unique-col-collection',
+ canAdd: true,
+ columns : ['name', 'consrc']
+ },{
+ id: 'unique_constraint', label: gettext('Unique Constraint'),
+ model: pgBrowser.Nodes['unique_constraint'].model,
+ subnode: pgBrowser.Nodes['unique_constraint'].model,
+ editable: false, type: 'collection',
+ group: gettext('Unique'), mode: ['edit', 'create'],
+ canEdit: true, canDelete: true, deps:['is_partitioned'],
+ control: 'unique-col-collection',
+ columns : ['name', 'columns'],
+ canAdd: function(m) {
+ if (m.get('is_partitioned')) {
+ setTimeout(function() {
+ var coll = m.get('unique_constraint');
+ coll.remove(coll.filter(function(model) {
+ return true;
+ }));
+ }, 10);
+ return false;
+ }
+
+ return true;
+ },
+ canAddRow: function(m) {
+ // User can only add if there is at least one column with name.
+ var columns = m.get('columns');
+ return _.some(columns.pluck('name'));
+ }
+ },{
+ id: 'exclude_constraint', label: gettext('Exclude constraint'),
+ model: pgBrowser.Nodes['exclusion_constraint'].model,
+ subnode: pgBrowser.Nodes['exclusion_constraint'].model,
+ editable: false, type: 'collection',
+ group: gettext('Exclude'), mode: ['edit', 'create'],
+ canEdit: true, canDelete: true, deps:['is_partitioned'],
+ control: 'unique-col-collection',
+ columns : ['name', 'columns', 'constraint'],
+ canAdd: function(m) {
+ if (m.get('is_partitioned')) {
+ setTimeout(function() {
+ var coll = m.get('exclude_constraint');
+ coll.remove(coll.filter(function(model) {
+ return true;
+ }));
+ }, 10);
+ return false;
+ }
+
+ return true;
+ },
+ canAddRow: function(m) {
+ // User can only add if there is at least one column with name.
+ var columns = m.get('columns');
+ return _.some(columns.pluck('name'));
+ }
+ }]
+ },{
+ id: 'typname', label: gettext('Of type'), type: 'text',
+ control: 'node-ajax-options', mode: ['properties', 'create', 'edit'],
+ disabled: 'checkOfType', url: 'get_oftype', group: gettext('Advanced'),
+ deps: ['coll_inherits', 'is_partitioned'], transform: function(data, cell) {
+ var control = cell || this,
+ m = control.model;
+ m.of_types_tables = data;
+ return data;
+ },
+ control: Backform.NodeAjaxOptionsControl.extend({
+ // When of_types changes we need to clear columns collection
+ onChange: function() {
+ Backform.NodeAjaxOptionsControl.prototype.onChange.apply(this, arguments);
+ var self = this,
+ tbl_oid = undefined,
+ tbl_name = self.model.get('typname'),
+ data = undefined,
+ arg = undefined,
+ column_collection = self.model.get('columns');
+
+ if (!_.isUndefined(tbl_name) &&
+ tbl_name !== '' && column_collection.length !== 0) {
+ var msg = gettext('Changing of type table will clear columns collection');
+ alertify.confirm(msg, function (e) {
+ if (e) {
+ // User clicks Ok, lets clear columns collection
+ column_collection.reset();
+ } else {
+ return this;
+ }
+ });
+ } else if (!_.isUndefined(tbl_name) && tbl_name === '') {
+ column_collection.reset();
+ }
+
+ // Run Ajax now to fetch columns
+ if (!_.isUndefined(tbl_name) && tbl_name !== '') {
+ arg = { 'tname': tbl_name }
+ data = self.model.fetch_columns_ajax.apply(self, [arg]);
+ // Add into column collection
+ column_collection.set(data, { merge:false,remove:false });
+ }
+ }
+ })
+ },{
+ id: 'fillfactor', label: gettext('Fill factor'), type: 'int',
+ mode: ['create', 'edit'], min: 10, max: 100,
+ disabled: 'inSchema',group: gettext('Advanced')
+ },{
+ id: 'relhasoids', label: gettext('Has OIDs?'), cell: 'switch',
+ type: 'switch', mode: ['properties', 'create', 'edit'],
+ disabled: 'inSchema', group: gettext('Advanced')
+ },{
+ id: 'relpersistence', label: gettext('Unlogged?'), cell: 'switch',
+ type: 'switch', mode: ['properties', 'create', 'edit'],
+ disabled: 'inSchemaWithModelCheck',
+ group: gettext('Advanced')
+ },{
+ id: 'conname', label: gettext('Primary key'), cell: 'string',
+ type: 'text', mode: ['properties'], group: gettext('Advanced'),
+ disabled: 'inSchema'
+ },{
+ id: 'reltuples', label: gettext('Rows (estimated)'), cell: 'string',
+ type: 'text', mode: ['properties'], group: gettext('Advanced'),
+ disabled: 'inSchema'
+ },{
+ id: 'rows_cnt', label: gettext('Rows (counted)'), cell: 'string',
+ type: 'text', mode: ['properties'], group: gettext('Advanced'),
+ disabled: 'inSchema'
+ },{
+ id: 'relhassubclass', label: gettext('Inherits tables?'), cell: 'switch',
+ type: 'switch', mode: ['properties'], group: gettext('Advanced'),
+ disabled: 'inSchema'
+ },{
+ id: 'is_sys_table', label: gettext('System table?'), cell: 'switch',
+ type: 'switch', mode: ['properties'],
+ disabled: 'inSchema'
+ },{
+ type: 'nested', control: 'fieldset', label: gettext('Like'),
+ group: gettext('Advanced'),
+ schema:[{
+ id: 'like_relation', label: gettext('Relation'),
+ type: 'text', mode: ['create', 'edit'], deps: ['typname'],
+ control: 'node-ajax-options', url: 'get_relations',
+ disabled: 'isLikeDisable', group: gettext('Like')
+ },{
+ id: 'like_default_value', label: gettext('With default values?'),
+ type: 'switch', mode: ['create', 'edit'], deps: ['typname'],
+ disabled: 'isLikeDisable', group: gettext('Like')
+ },{
+ id: 'like_constraints', label: gettext('With constraints?'),
+ type: 'switch', mode: ['create', 'edit'], deps: ['typname'],
+ disabled: 'isLikeDisable', group: gettext('Like')
+ },{
+ id: 'like_indexes', label: gettext('With indexes?'),
+ type: 'switch', mode: ['create', 'edit'], deps: ['typname'],
+ disabled: 'isLikeDisable', group: gettext('Like')
+ },{
+ id: 'like_storage', label: gettext('With storage?'),
+ type: 'switch', mode: ['create', 'edit'], deps: ['typname'],
+ disabled: 'isLikeDisable', group: gettext('Like')
+ },{
+ id: 'like_comments', label: gettext('With comments?'),
+ type: 'switch', mode: ['create', 'edit'], deps: ['typname'],
+ disabled: 'isLikeDisable', group: gettext('Like')
+ }]
+ },{
+ id: 'partition_type', label:gettext('Partition Type'),
+ editable: false, type: 'select2', select2: {allowClear: false},
+ group: 'partition', deps: ['is_partitioned'],
+ options:[{
+ label: 'Range', value: 'range'
+ },{
+ label: 'List', value: 'list'
+ }],
+ mode:['create'],
+ visible: function(m) {
+ if(!_.isUndefined(m.node_info) && !_.isUndefined(m.node_info.server)
+ && !_.isUndefined(m.node_info.server.version) &&
+ m.node_info.server.version >= 100000)
+ return true;
+
+ return false;
+ },
+ disabled: function(m) {
+ if (!m.isNew() || !m.get('is_partitioned'))
+ return true;
+ return false;
+ }
+ },{
+ id: 'partition_keys', label:gettext('Partition Keys'),
+ model: Backform.PartitionKeyModel,
+ subnode: Backform.PartitionKeyModel,
+ editable: true, type: 'collection',
+ group: 'partition', mode: ['create'],
+ deps: ['is_partitioned', 'partition_type'],
+ canEdit: false, canDelete: true,
+ control: 'sub-node-collection',
+ canAdd: function(m) {
+ if (m.isNew() && m.get('is_partitioned'))
+ return true;
+ return false;
+ },
+ canAddRow: function(m) {
+ var columns = m.get('columns');
+ var max_row_count = 1000;
+
+ if (m.get('partition_type') && m.get('partition_type') == 'list')
+ max_row_count = 1;
+
+ return (m.get('partition_keys') &&
+ m.get('partition_keys').length < max_row_count &&
+ _.some(columns.pluck('name'))
+ );
+ },
+ visible: function(m) {
+ if(!_.isUndefined(m.node_info) && !_.isUndefined(m.node_info.server)
+ && !_.isUndefined(m.node_info.server.version) &&
+ m.node_info.server.version >= 100000)
+ return true;
+
+ return false;
+ },
+ disabled: function(m) {
+ if (m.get('partition_keys') && m.get('partition_keys').models.length > 0) {
+ setTimeout(function () {
+ var coll = m.get('partition_keys');
+ coll.remove(coll.filter(function(model) {
+ return true;
+ }));
+ }, 10);
+ }
+ }
+ },{
+ id: 'partition_scheme', label: gettext('Partition Scheme'),
+ type: 'note', group: 'partition', mode: ['edit'],
+ visible: function(m) {
+ if(!_.isUndefined(m.node_info) && !_.isUndefined(m.node_info.server)
+ && !_.isUndefined(m.node_info.server.version) &&
+ m.node_info.server.version >= 100000)
+ return true;
+
+ return false;
+ },
+ disabled: function(m) {
+ if (!m.isNew()) {
+ this.text = m.get('partition_scheme');
+ }
+ }
+ },{
+ id: 'partitions', label:gettext('Partitions'),
+ model: Backform.PartitionsModel,
+ subnode: Backform.PartitionsModel,
+ editable: true, type: 'collection',
+ group: 'partition', mode: ['edit', 'create'],
+ deps: ['is_partitioned', 'partition_type'],
+ canEdit: false, canDelete: true,
+ customDeleteTitle: gettext('Detach Partition'),
+ customDeleteMsg: gettext('Are you sure you wish to detach this partition?'),
+ columns:['is_attach', 'partition_name', 'values_from', 'values_to', 'values_in'],
+ control: Backform.SubNodeCollectionControl.extend({
+ row: Backgrid.PartitionRow,
+ initialize: function() {
+ Backform.SubNodeCollectionControl.prototype.initialize.apply(this, arguments);
+ var self = this;
+ if (!this.model.isNew()) {
+ var node = this.field.get('schema_node'),
+ node_info = this.field.get('node_info');
+
+ // Make ajax call to get the tables to be attached
+ $.ajax({
+ url: node.generate_url.apply(
+ node, [
+ null, 'get_attach_tables', this.field.get('node_data'),
+ true, node_info
+ ]),
+
+ type: 'GET',
+ async: false,
+ success: function(res) {
+ if (res.success == 1) {
+ self.model.table_options = res.data;
+ }
+ else {
+ alertify.alert(
+ 'Error fetching tables to be attached', res.data.result
+ );
+ }
+ },
+ error: function(e) {
+ var errmsg = $.parseJSON(e.responseText);
+ alertify.alert('Error fetching tables to be attached.', errmsg.errormsg);
+ }
+ });
+ }
+ }
+ }
+ ),
+ canAdd: function(m) {
+ if (m.get('is_partitioned'))
+ return true;
+ return false;
+ },
+ visible: function(m) {
+ if(!_.isUndefined(m.node_info) && !_.isUndefined(m.node_info.server)
+ && !_.isUndefined(m.node_info.server.version) &&
+ m.node_info.server.version >= 100000)
+ return true;
+
+ return false;
+ },
+ disabled: function(m) {
+ if (m.isNew() && m.get('partitions') && m.get('partitions').models.length > 0) {
+ setTimeout(function () {
+ var coll = m.get('partitions');
+ coll.remove(coll.filter(function(model) {
+ return true;
+ }));
+ }, 10);
+ }
+ }
+ },{
+ id: 'partition_note', label: gettext('Partition'),
+ type: 'note', group: 'partition',
+ text: gettext('Above control will be used to Create/Attach/Detach partitions.
' +
+ '
', + gettext('Example'), + ':', + gettext("Let's say, we want to create a partition table based per year for the column 'saledate', having datatype 'date/timestamp', then we need to specify the expression as 'extract(YEAR from saledate)' as partition key."), + '