Fixed cognitive complexity issues reported by SonarQube.

pull/33/head
Nikhil Mohite 2020-07-14 15:33:54 +05:30 committed by Akshay Joshi
parent 1889c256e1
commit 0013a3b047
3 changed files with 567 additions and 445 deletions

View File

@ -600,6 +600,19 @@ class TableView(BaseTableView, DataTypeReader, VacuumSettings,
gid, sid, did, scid, tid, res=res
)
@staticmethod
def _check_rlspolicy_support(res):
# Check whether 'rlspolicy' in response as it supported for
# version 9.5 and above
if 'rlspolicy' in res['rows'][0]:
# Set the value of rls policy
if res['rows'][0]['rlspolicy'] == "true":
res['rows'][0]['rlspolicy'] = True
# Set the value of force rls policy for table owner
if res['rows'][0]['forcerlspolicy'] == "true":
res['rows'][0]['forcerlspolicy'] = True
def _fetch_properties(self, did, scid, tid):
"""
This function is used to fetch the properties of the specified object
@ -608,16 +621,16 @@ class TableView(BaseTableView, DataTypeReader, VacuumSettings,
:param tid:
:return:
"""
SQL = render_template(
sql = render_template(
"/".join([self.table_template_path, 'properties.sql']),
did=did, scid=scid, tid=tid,
datlastsysoid=self.datlastsysoid
)
status, res = self.conn.execute_dict(SQL)
status, res = self.conn.execute_dict(sql)
if not status:
return False, internal_server_error(errormsg=res)
if len(res['rows']) == 0:
elif len(res['rows']) == 0:
return False, gone(
gettext("The specified table could not be found."))
@ -667,14 +680,7 @@ class TableView(BaseTableView, DataTypeReader, VacuumSettings,
# Check whether 'rlspolicy' in response as it supported for
# version 9.5 and above
if 'rlspolicy' in res['rows'][0]:
# Set the value of rls policy
if res['rows'][0]['rlspolicy'] == "true":
res['rows'][0]['rlspolicy'] = True
# Set the value of force rls policy for table owner
if res['rows'][0]['forcerlspolicy'] == "true":
res['rows'][0]['forcerlspolicy'] = True
TableView._check_rlspolicy_support(res)
# If estimated rows are greater than threshold then
if estimated_row_count and \
@ -684,13 +690,13 @@ class TableView(BaseTableView, DataTypeReader, VacuumSettings,
# If estimated rows is lower than threshold then calculate the count
elif estimated_row_count and \
table_row_count_threshold >= estimated_row_count:
SQL = render_template(
sql = render_template(
"/".join(
[self.table_template_path, 'get_table_row_count.sql']
), data=res['rows'][0]
)
status, count = self.conn.execute_scalar(SQL)
status, count = self.conn.execute_scalar(sql)
if not status:
return False, internal_server_error(errormsg=count)
@ -921,6 +927,39 @@ class TableView(BaseTableView, DataTypeReader, VacuumSettings,
except Exception as e:
return internal_server_error(errormsg=str(e))
def _parser_data_input_from_client(self, data):
# Parse privilege data coming from client according to database format
if 'relacl' in data:
data['relacl'] = parse_priv_to_db(data['relacl'], self.acl)
# Parse & format columns
data = column_utils.parse_format_columns(data)
data = TableView.check_and_convert_name_to_string(data)
# 'coll_inherits' is Array but it comes as string from browser
# We will convert it again to list
if 'coll_inherits' in data and \
isinstance(data['coll_inherits'], str):
data['coll_inherits'] = json.loads(
data['coll_inherits'], encoding='utf-8'
)
if 'foreign_key' in data:
for c in data['foreign_key']:
schema, table = fkey_utils.get_parent(
self.conn, c['columns'][0]['references'])
c['remote_schema'] = schema
c['remote_table'] = table
def _check_for_table_partitions(self, data):
partitions_sql = ''
if self.is_table_partitioned(data):
data['relkind'] = 'p'
# create partition scheme
data['partition_scheme'] = self.get_partition_scheme(data)
partitions_sql = self.get_partitions_sql(data)
return partitions_sql
@BaseTableView.check_precondition
def create(self, gid, sid, did, scid):
"""
@ -962,50 +1001,25 @@ class TableView(BaseTableView, DataTypeReader, VacuumSettings,
)
# Parse privilege data coming from client according to database format
if 'relacl' in data:
data['relacl'] = parse_priv_to_db(data['relacl'], self.acl)
# Parse & format columns
data = column_utils.parse_format_columns(data)
data = TableView.check_and_convert_name_to_string(data)
# 'coll_inherits' is Array but it comes as string from browser
# We will convert it again to list
if 'coll_inherits' in data and \
isinstance(data['coll_inherits'], str):
data['coll_inherits'] = json.loads(
data['coll_inherits'], encoding='utf-8'
)
if 'foreign_key' in data:
for c in data['foreign_key']:
schema, table = fkey_utils.get_parent(
self.conn, c['columns'][0]['references'])
c['remote_schema'] = schema
c['remote_table'] = table
self._parser_data_input_from_client(data)
try:
partitions_sql = ''
if self.is_table_partitioned(data):
data['relkind'] = 'p'
# create partition scheme
data['partition_scheme'] = self.get_partition_scheme(data)
partitions_sql = self.get_partitions_sql(data)
partitions_sql = self._check_for_table_partitions(data)
# Update the vacuum table settings.
BaseTableView.update_vacuum_settings(self, 'vacuum_table', data)
# Update the vacuum toast table settings.
BaseTableView.update_vacuum_settings(self, 'vacuum_toast', data)
SQL = render_template(
sql = render_template(
"/".join([self.table_template_path, 'create.sql']),
data=data, conn=self.conn
)
# Append SQL for partitions
SQL += '\n' + partitions_sql
sql += '\n' + partitions_sql
status, res = self.conn.execute_scalar(SQL)
status, res = self.conn.execute_scalar(sql)
if not status:
return internal_server_error(errormsg=res)
@ -1018,22 +1032,22 @@ class TableView(BaseTableView, DataTypeReader, VacuumSettings,
data['name'] = data['name'][0:CONST_MAX_CHAR_COUNT]
# Get updated schema oid
SQL = render_template(
sql = render_template(
"/".join([self.table_template_path, 'get_schema_oid.sql']),
tname=data['name']
)
status, new_scid = self.conn.execute_scalar(SQL)
status, new_scid = self.conn.execute_scalar(sql)
if not status:
return internal_server_error(errormsg=new_scid)
# we need oid to to add object in tree at browser
SQL = render_template(
sql = render_template(
"/".join([self.table_template_path, 'get_oid.sql']),
scid=new_scid, data=data
)
status, tid = self.conn.execute_scalar(SQL)
status, tid = self.conn.execute_scalar(sql)
if not status:
return internal_server_error(errormsg=tid)
@ -1690,9 +1704,9 @@ class TableView(BaseTableView, DataTypeReader, VacuumSettings,
else:
res = dict()
SQL = render_template("/".join([self.table_template_path,
sql = render_template("/".join([self.table_template_path,
'nodes.sql']), scid=scid)
status, tables = self.conn.execute_2darray(SQL)
status, tables = self.conn.execute_2darray(sql)
if not status:
current_app.logger.error(tables)
return False
@ -1706,6 +1720,16 @@ class TableView(BaseTableView, DataTypeReader, VacuumSettings,
return_ajax_response=False
)
# Get sub module data of a specified table for object
# comparison
self._get_sub_module_data_for_compare(sid, did, scid, data,
row, sub_modules)
res[row['name']] = data
return res
def _get_sub_module_data_for_compare(self, sid, did, scid, data,
row, sub_modules):
# Get sub module data of a specified table for object
# comparison
for module in sub_modules:
@ -1717,9 +1741,6 @@ class TableView(BaseTableView, DataTypeReader, VacuumSettings,
sid=sid, did=did, scid=scid, tid=row['oid'],
oid=None)
data[module] = sub_data
res[row['name']] = data
return res
SchemaDiffRegistry(blueprint.node_type, TableView)

View File

@ -860,11 +860,10 @@ class IndexesView(PGChildNodeView, SchemaDiffObjectCompare):
elif diff_schema:
schema = diff_schema
sql = index_utils.get_reverse_engineered_sql(
self.conn, schema,
self.table, did, tid, idx,
self.datlastsysoid,
self.conn, schema=schema,
table=self.table, did=did, tid=tid, idx=idx,
datlastsysoid=self.datlastsysoid,
template_path=None, with_header=False)
drop_sql = ''

View File

@ -170,9 +170,9 @@ class BaseTableView(PGChildNodeView, BasePartitionTable):
data['seclabels'] = seclabels
# We need to parse & convert ACL coming from database to json format
SQL = render_template("/".join([self.table_template_path, 'acl.sql']),
sql = render_template("/".join([self.table_template_path, 'acl.sql']),
tid=tid, scid=scid)
status, acl = self.conn.execute_dict(SQL)
status, acl = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=acl)
@ -196,11 +196,11 @@ class BaseTableView(PGChildNodeView, BasePartitionTable):
table_or_type = ''
# Get of_type table columns and add it into columns dict
if data['typoid']:
SQL = render_template("/".join([self.table_template_path,
sql = render_template("/".join([self.table_template_path,
'get_columns_for_table.sql']),
tid=data['typoid'])
status, res = self.conn.execute_dict(SQL)
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
other_columns = res['rows']
@ -227,24 +227,24 @@ class BaseTableView(PGChildNodeView, BasePartitionTable):
def _get_inherited_tables(self, scid, data, other_columns):
# Return all tables which can be inherited & do not show
# system columns
SQL = render_template("/".join([self.table_template_path,
sql = render_template("/".join([self.table_template_path,
'get_inherits.sql']),
show_system_objects=False,
scid=scid
)
status, rset = self.conn.execute_2darray(SQL)
status, rset = self.conn.execute_2darray(sql)
if not status:
return True, rset
for row in rset['rows']:
if row['inherits'] in data['coll_inherits']:
# Fetch columns using inherited table OID
SQL = render_template("/".join(
sql = render_template("/".join(
[self.table_template_path,
'get_columns_for_table.sql']),
tid=row['oid']
)
status, res = self.conn.execute_dict(SQL)
status, res = self.conn.execute_dict(sql)
if not status:
return True, res
other_columns.extend(res['rows'][:])
@ -311,10 +311,10 @@ class BaseTableView(PGChildNodeView, BasePartitionTable):
)
# Specific sql to run againt column to fetch dependents
SQL = render_template("/".join([self.table_template_path,
sql = render_template("/".join([self.table_template_path,
'depend.sql']), where=where)
status, res = self.conn.execute_dict(SQL)
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
@ -429,36 +429,7 @@ class BaseTableView(PGChildNodeView, BasePartitionTable):
status=200
)
def get_reverse_engineered_sql(self, **kwargs):
"""
This function will creates reverse engineered sql for
the table object
Args:
kwargs
"""
did = kwargs.get('did')
scid = kwargs.get('scid')
tid = kwargs.get('tid')
main_sql = kwargs.get('main_sql')
data = kwargs.get('data')
json_resp = kwargs.get('json_resp', True)
diff_partition_sql = kwargs.get('diff_partition_sql', False)
"""
#####################################
# 1) Reverse engineered sql for TABLE
#####################################
"""
# Table & Schema declaration so that we can use them in child nodes
schema = data['schema']
table = data['name']
is_partitioned = 'is_partitioned' in data and data['is_partitioned']
sql_header = ''
data = self._formatter(did, scid, tid, data)
def _format_column_list(self, data):
# Now we have all lis of columns which we need
# to include in our create definition, Let's format them
if 'columns' in data:
@ -473,6 +444,17 @@ class BaseTableView(PGChildNodeView, BasePartitionTable):
c['cltype'], c['hasSqrBracket'] = \
column_utils.type_formatter(c['cltype'])
def _get_resql_for_table(self, did, scid, tid, data, json_resp, main_sql):
"""
#####################################
# Reverse engineered sql for TABLE
#####################################
"""
data = self._formatter(did, scid, tid, data)
# Format column list
self._format_column_list(data)
if json_resp:
sql_header = u"-- Table: {0}.{1}\n\n-- ".format(
data['schema'], data['name'])
@ -486,7 +468,6 @@ class BaseTableView(PGChildNodeView, BasePartitionTable):
# Add into main sql
main_sql.append(sql_header)
partition_main_sql = ""
# Parse privilege data
if 'relacl' in data:
@ -506,15 +487,17 @@ class BaseTableView(PGChildNodeView, BasePartitionTable):
table_sql = re.sub('\n{2,}', '\n\n', table_sql)
main_sql.append(table_sql.strip('\n'))
def _get_resql_for_index(self, did, tid, main_sql, json_resp, schema,
table):
"""
######################################
# 2) Reverse engineered sql for INDEX
# Reverse engineered sql for INDEX
######################################
"""
SQL = render_template("/".join([self.index_template_path,
sql = render_template("/".join([self.index_template_path,
'nodes.sql']), tid=tid)
status, rset = self.conn.execute_2darray(SQL)
status, rset = self.conn.execute_2darray(sql)
if not status:
return internal_server_error(errormsg=rset)
@ -533,22 +516,24 @@ class BaseTableView(PGChildNodeView, BasePartitionTable):
main_sql.append(index_sql.strip('\n'))
def _get_resql_for_row_security_policy(self, scid, tid, json_resp,
main_sql, schema, table):
"""
########################################################
# 2) Reverse engineered sql for ROW SECURITY POLICY
# Reverse engineered sql for ROW SECURITY POLICY
########################################################
"""
if self.manager.version >= 90500:
SQL = \
sql = \
render_template(
"/".join([self.row_security_policies_template_path,
'nodes.sql']), tid=tid)
status, rset = self.conn.execute_2darray(SQL)
status, rset = self.conn.execute_2darray(sql)
if not status:
return internal_server_error(errormsg=rset)
for row in rset['rows']:
policy_sql = row_security_policies_utils.\
policy_sql = row_security_policies_utils. \
get_reverse_engineered_sql(
self.conn, schema=schema, table=table, scid=scid,
plid=row['oid'], datlastsysoid=self.datlastsysoid,
@ -560,14 +545,16 @@ class BaseTableView(PGChildNodeView, BasePartitionTable):
main_sql.append(policy_sql.strip('\n'))
def _get_resql_for_triggers(self, tid, json_resp, main_sql, schema,
table):
"""
########################################
# 3) Reverse engineered sql for TRIGGERS
# Reverse engineered sql for TRIGGERS
########################################
"""
SQL = render_template("/".join([self.trigger_template_path,
sql = render_template("/".join([self.trigger_template_path,
'nodes.sql']), tid=tid)
status, rset = self.conn.execute_2darray(SQL)
status, rset = self.conn.execute_2darray(sql)
if not status:
return internal_server_error(errormsg=rset)
@ -583,18 +570,19 @@ class BaseTableView(PGChildNodeView, BasePartitionTable):
trigger_sql = re.sub('\n{2,}', '\n\n', trigger_sql)
main_sql.append(trigger_sql)
def _get_resql_for_compound_triggers(self, tid, main_sql, schema, table):
"""
#################################################
# 4) Reverse engineered sql for COMPOUND TRIGGERS
# Reverse engineered sql for COMPOUND TRIGGERS
#################################################
"""
if self.manager.server_type == 'ppas' \
and self.manager.version >= 120000:
SQL = render_template("/".join(
sql = render_template("/".join(
[self.compound_trigger_template_path, 'nodes.sql']), tid=tid)
status, rset = self.conn.execute_2darray(SQL)
status, rset = self.conn.execute_2darray(sql)
if not status:
return internal_server_error(errormsg=rset)
@ -610,26 +598,27 @@ class BaseTableView(PGChildNodeView, BasePartitionTable):
re.sub('\n{2,}', '\n\n', compound_trigger_sql)
main_sql.append(compound_trigger_sql)
def _get_resql_for_rules(self, tid, main_sql, table, json_resp):
"""
#####################################
# 5) Reverse engineered sql for RULES
# Reverse engineered sql for RULES
#####################################
"""
SQL = render_template("/".join(
sql = render_template("/".join(
[self.rules_template_path, 'nodes.sql']), tid=tid)
status, rset = self.conn.execute_2darray(SQL)
status, rset = self.conn.execute_2darray(sql)
if not status:
return internal_server_error(errormsg=rset)
for row in rset['rows']:
rules_sql = '\n'
SQL = render_template("/".join(
sql = render_template("/".join(
[self.rules_template_path, 'properties.sql']
), rid=row['oid'], datlastsysoid=self.datlastsysoid)
status, res = self.conn.execute_dict(SQL)
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
@ -649,19 +638,15 @@ class BaseTableView(PGChildNodeView, BasePartitionTable):
rules_sql = re.sub('\n{2,}', '\n\n', rules_sql)
main_sql.append(rules_sql)
def _get_resql_for_partitions(self, data, rset, json_resp,
diff_partition_sql, main_sql):
"""
##########################################
# 6) Reverse engineered sql for PARTITIONS
# Reverse engineered sql for PARTITIONS
##########################################
"""
if is_partitioned:
SQL = render_template("/".join([self.partition_template_path,
'nodes.sql']),
scid=scid, tid=tid)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
sql_header = ''
if len(rset['rows']):
if json_resp:
sql_header = u"\n-- Partitions SQL"
@ -693,6 +678,59 @@ class BaseTableView(PGChildNodeView, BasePartitionTable):
sql_header + '\n\n' + partition_main_sql
)
def get_reverse_engineered_sql(self, **kwargs):
"""
This function will creates reverse engineered sql for
the table object
Args:
kwargs
"""
did = kwargs.get('did')
scid = kwargs.get('scid')
tid = kwargs.get('tid')
main_sql = kwargs.get('main_sql')
data = kwargs.get('data')
json_resp = kwargs.get('json_resp', True)
diff_partition_sql = kwargs.get('diff_partition_sql', False)
# Table & Schema declaration so that we can use them in child nodes
schema = data['schema']
table = data['name']
is_partitioned = 'is_partitioned' in data and data['is_partitioned']
# Get Reverse engineered sql for Table
self._get_resql_for_table(did, scid, tid, data, json_resp, main_sql)
# Get Reverse engineered sql for Table
self._get_resql_for_index(did, tid, main_sql, json_resp, schema,
table)
# Get Reverse engineered sql for ROW SECURITY POLICY
self._get_resql_for_row_security_policy(scid, tid, json_resp,
main_sql, schema, table)
# Get Reverse engineered sql for Triggers
self._get_resql_for_triggers(tid, json_resp, main_sql, schema, table)
# Get Reverse engineered sql for Compound Triggers
self._get_resql_for_compound_triggers(tid, main_sql, schema, table)
# Get Reverse engineered sql for Rules
self._get_resql_for_rules(tid, main_sql, table, json_resp)
# Get Reverse engineered sql for Partitions
partition_main_sql = ""
if is_partitioned:
sql = render_template("/".join([self.partition_template_path,
'nodes.sql']),
scid=scid, tid=tid)
status, rset = self.conn.execute_2darray(sql)
if not status:
return internal_server_error(errormsg=rset)
self._get_resql_for_partitions(data, rset, json_resp,
diff_partition_sql, main_sql)
sql = '\n'.join(main_sql)
if not json_resp:
@ -715,10 +753,10 @@ class BaseTableView(PGChildNodeView, BasePartitionTable):
# table exist
try:
SQL = render_template("/".join([self.table_template_path,
sql = render_template("/".join([self.table_template_path,
'reset_stats.sql']),
tid=tid)
status, res = self.conn.execute_scalar(SQL)
status, res = self.conn.execute_scalar(sql)
if not status:
return internal_server_error(errormsg=res)
@ -819,15 +857,7 @@ class BaseTableView(PGChildNodeView, BasePartitionTable):
return data
def get_sql(self, did, scid, tid, data, res):
"""
This function will generate create/update sql from model data
coming from client
"""
if tid is not None:
old_data = res['rows'][0]
old_data = self._formatter(did, scid, tid, old_data)
def _get_privileges_from_client(self, data):
# We will convert privileges coming from client required
if 'relacl' in data:
for mode in ['added', 'changed', 'deleted']:
@ -836,16 +866,8 @@ class BaseTableView(PGChildNodeView, BasePartitionTable):
data['relacl'][mode], self.acl
)
# If name is not present in request data
if 'name' not in data:
data['name'] = old_data['name']
data = BaseTableView.check_and_convert_name_to_string(data)
# If name if not present
if 'schema' not in data:
data['schema'] = old_data['schema']
@staticmethod
def _filter_new_tables(data, old_data):
# Filter out new tables from list, we will send complete list
# and not newly added tables in the list from client
# so we will filter new tables here
@ -877,26 +899,7 @@ class BaseTableView(PGChildNodeView, BasePartitionTable):
set(data['coll_inherits'])
)
# Update the vacuum table settings.
self.update_vacuum_settings('vacuum_table', old_data, data)
# Update the vacuum toast table settings.
self.update_vacuum_settings('vacuum_toast', old_data, data)
SQL = render_template(
"/".join([self.table_template_path, 'update.sql']),
o_data=old_data, data=data, conn=self.conn
)
# Removes training new lines
SQL = SQL.strip('\n') + '\n\n'
# Parse/Format columns & create sql
if 'columns' in data:
# Parse the data coming from client
data = column_utils.parse_format_columns(data, mode='edit')
columns = data['columns']
column_sql = '\n'
def _check_for_column_delete(self, columns, data, column_sql):
# If column(s) is/are deleted
if 'deleted' in columns:
for c in columns['deleted']:
@ -907,8 +910,9 @@ class BaseTableView(PGChildNodeView, BasePartitionTable):
column_sql += render_template("/".join(
[self.column_template_path, 'delete.sql']),
data=c, conn=self.conn).strip('\n') + '\n\n'
return column_sql
# If column(s) is/are changed
def _check_for_column_update(self, columns, data, column_sql, tid):
# Here we will be needing previous properties of column
# so that we can compare & update it
if 'changed' in columns:
@ -943,13 +947,15 @@ class BaseTableView(PGChildNodeView, BasePartitionTable):
old_col_data['cltype'])
# Sql for alter column
if 'inheritedfrom' not in c and\
if 'inheritedfrom' not in c and \
'inheritedfromtable' not in c:
column_sql += render_template("/".join(
[self.column_template_path, 'update.sql']),
data=c, o_data=old_col_data, conn=self.conn
).strip('\n') + '\n\n'
return column_sql
def _check_for_column_add(self, columns, data, column_sql):
# If column(s) is/are added
if 'added' in columns:
for c in columns['added']:
@ -958,15 +964,14 @@ class BaseTableView(PGChildNodeView, BasePartitionTable):
c = column_utils.convert_length_precision_to_string(c)
if 'inheritedfrom' not in c and\
if 'inheritedfrom' not in c and \
'inheritedfromtable' not in c:
column_sql += render_template("/".join(
[self.column_template_path, 'create.sql']),
data=c, conn=self.conn).strip('\n') + '\n\n'
return column_sql
# Combine all the SQL together
SQL += column_sql.strip('\n')
def _check_for_partitions_in_sql(self, data, old_data, sql):
# Check for partitions
if 'partitions' in data:
partitions = data['partitions']
@ -996,7 +1001,7 @@ class BaseTableView(PGChildNodeView, BasePartitionTable):
conn=self.conn).strip('\n') + '\n\n'
# If partition(s) is/are added
if 'added' in partitions and 'partition_scheme' in old_data\
if 'added' in partitions and 'partition_scheme' in old_data \
and old_data['partition_scheme'] != '':
temp_data = dict()
temp_data['schema'] = data['schema']
@ -1010,34 +1015,28 @@ class BaseTableView(PGChildNodeView, BasePartitionTable):
self.get_partitions_sql(temp_data).strip('\n') + '\n\n'
# Combine all the SQL together
SQL += '\n' + partitions_sql.strip('\n')
sql += '\n' + partitions_sql.strip('\n')
return sql
data['columns_to_be_dropped'] = []
if 'columns' in data and 'deleted' in data['columns']:
data['columns_to_be_dropped'] = list(map(
lambda d: d['name'], data['columns']['deleted']))
# Check if index constraints are added/changed/deleted
index_constraint_sql = \
idxcons_utils.get_index_constraint_sql(
self.conn, did, tid, data)
def _check_for_constraints(self, index_constraint_sql, data, did,
tid, sql):
# If we have index constraint sql then ad it in main sql
if index_constraint_sql is not None:
SQL += '\n' + index_constraint_sql
sql += '\n' + index_constraint_sql
# Check if foreign key(s) is/are added/changed/deleted
foreign_key_sql = fkey_utils.get_foreign_key_sql(
self.conn, tid, data)
# If we have foreign key sql then ad it in main sql
if foreign_key_sql is not None:
SQL += '\n' + foreign_key_sql
sql += '\n' + foreign_key_sql
# Check if check constraint(s) is/are added/changed/deleted
check_constraint_sql = check_utils.get_check_constraint_sql(
self.conn, tid, data)
# If we have check constraint sql then ad it in main sql
if check_constraint_sql is not None:
SQL += '\n' + check_constraint_sql
sql += '\n' + check_constraint_sql
# Check if exclusion constraint(s) is/are added/changed/deleted
exclusion_constraint_sql = \
@ -1045,18 +1044,27 @@ class BaseTableView(PGChildNodeView, BasePartitionTable):
self.conn, did, tid, data)
# If we have check constraint sql then ad it in main sql
if exclusion_constraint_sql is not None:
SQL += '\n' + exclusion_constraint_sql
sql += '\n' + exclusion_constraint_sql
return sql
else:
res = None
required_args = [
'name'
]
def _check_for_foreign_key(self, data):
if 'foreign_key' in data:
for c in data['foreign_key']:
schema, table = fkey_utils.get_parent(
self.conn, c['columns'][0]['references'])
c['remote_schema'] = schema
c['remote_table'] = table
for arg in required_args:
if arg not in data:
return gettext('-- definition incomplete')
def _check_for_partitioned(self, data):
partitions_sql = ''
if 'is_partitioned' in data and data['is_partitioned']:
data['relkind'] = 'p'
# create partition scheme
data['partition_scheme'] = self.get_partition_scheme(data)
partitions_sql = self.get_partitions_sql(data)
return partitions_sql
def _validate_constraint_data(self, data):
# validate constraint data.
for key in ['primary_key', 'unique_constraint',
'foreign_key', 'check_constraint',
@ -1068,45 +1076,130 @@ class BaseTableView(PGChildNodeView, BasePartitionTable):
'-- definition incomplete for {0}'.format(key)
)
@staticmethod
def _check_for_create_sql(data):
required_args = [
'name'
]
for arg in required_args:
if arg not in data:
return True, '-- definition incomplete'
return False, ''
def _convert_privilege_to_server_format(self, data):
# We will convert privileges coming from client required
# in server side format
if 'relacl' in data:
data['relacl'] = parse_priv_to_db(data['relacl'], self.acl)
def get_sql(self, did, scid, tid, data, res):
"""
This function will generate create/update sql from model data
coming from client
"""
if tid is not None:
old_data = res['rows'][0]
old_data = self._formatter(did, scid, tid, old_data)
self._get_privileges_from_client(data)
# If name is not present in request data
if 'name' not in data:
data['name'] = old_data['name']
data = BaseTableView.check_and_convert_name_to_string(data)
# If name if not present
if 'schema' not in data:
data['schema'] = old_data['schema']
self._filter_new_tables(data, old_data)
# Update the vacuum table settings.
self.update_vacuum_settings('vacuum_table', old_data, data)
# Update the vacuum toast table settings.
self.update_vacuum_settings('vacuum_toast', old_data, data)
sql = render_template(
"/".join([self.table_template_path, 'update.sql']),
o_data=old_data, data=data, conn=self.conn
)
# Removes training new lines
sql = sql.strip('\n') + '\n\n'
# Parse/Format columns & create sql
if 'columns' in data:
# Parse the data coming from client
data = column_utils.parse_format_columns(data, mode='edit')
columns = data['columns']
column_sql = '\n'
# If column(s) is/are deleted
column_sql = self._check_for_column_delete(columns, data,
column_sql)
# If column(s) is/are changed
column_sql = self._check_for_column_update(columns, data,
column_sql, tid)
# If column(s) is/are added
column_sql = self._check_for_column_add(columns, data,
column_sql)
# Combine all the SQL together
sql += column_sql.strip('\n')
# Check for partitions
sql = self._check_for_partitions_in_sql(data, old_data, sql)
data['columns_to_be_dropped'] = []
if 'columns' in data and 'deleted' in data['columns']:
data['columns_to_be_dropped'] = list(map(
lambda d: d['name'], data['columns']['deleted']))
# Check if index constraints are added/changed/deleted
index_constraint_sql = \
idxcons_utils.get_index_constraint_sql(
self.conn, did, tid, data)
sql = self._check_for_constraints(index_constraint_sql, data, did,
tid, sql)
else:
error, errmsg = BaseTableView._check_for_create_sql(data)
if error:
return gettext('-- definition incomplete')
# validate constraint data.
self._validate_constraint_data(data)
# We will convert privileges coming from client required
# in server side format
self._convert_privilege_to_server_format(data)
# Parse & format columns
data = column_utils.parse_format_columns(data)
data = BaseTableView.check_and_convert_name_to_string(data)
if 'foreign_key' in data:
for c in data['foreign_key']:
schema, table = fkey_utils.get_parent(
self.conn, c['columns'][0]['references'])
c['remote_schema'] = schema
c['remote_table'] = table
self._check_foreign_key(data)
partitions_sql = ''
if 'is_partitioned' in data and data['is_partitioned']:
data['relkind'] = 'p'
# create partition scheme
data['partition_scheme'] = self.get_partition_scheme(data)
partitions_sql = self.get_partitions_sql(data)
partitions_sql = self._check_for_partitioned(data)
# Update the vacuum table settings.
self.update_vacuum_settings('vacuum_table', data)
# Update the vacuum toast table settings.
self.update_vacuum_settings('vacuum_toast', data)
SQL = render_template("/".join([self.table_template_path,
sql = render_template("/".join([self.table_template_path,
'create.sql']),
data=data, conn=self.conn)
# Append SQL for partitions
SQL += '\n' + partitions_sql
sql += '\n' + partitions_sql
SQL = re.sub('\n{2,}', '\n\n', SQL)
SQL = SQL.strip('\n')
sql = re.sub('\n{2,}', '\n\n', sql)
sql = sql.strip('\n')
return SQL, data['name'] if 'name' in data else old_data['name']
return sql, data['name'] if 'name' in data else old_data['name']
def update(self, gid, sid, did, scid, tid, **kwargs):
"""
@ -1135,16 +1228,16 @@ class BaseTableView(PGChildNodeView, BasePartitionTable):
# table exists
try:
SQL, name = self.get_sql(did, scid, tid, data, res)
sql, name = self.get_sql(did, scid, tid, data, res)
SQL = SQL.strip('\n').strip(' ')
status, rest = self.conn.execute_scalar(SQL)
sql = sql.strip('\n').strip(' ')
status, rest = self.conn.execute_scalar(sql)
if not status:
return internal_server_error(errormsg=rest)
SQL = render_template("/".join([self.table_template_path,
sql = render_template("/".join([self.table_template_path,
'get_schema_oid.sql']), tid=tid)
status, rest = self.conn.execute_2darray(SQL)
status, rest = self.conn.execute_2darray(sql)
if not status:
return internal_server_error(errormsg=rest)
@ -1153,6 +1246,34 @@ class BaseTableView(PGChildNodeView, BasePartitionTable):
# Check for partitions
partitions_oid = dict()
is_partitioned = self._check_for_partitions(data, partitions_oid,
res, scid)
# If partitioned_table_name in result set then get partition
# icon css class else table icon.
if 'partitioned_table_name' in res['rows'][0]:
res['rows'][0]['is_sub_partitioned'] = is_partitioned
icon = self.get_partition_icon_css_class(res['rows'][0])
else:
icon = self.get_icon_css_class(res['rows'][0])
return jsonify(
node=self.blueprint.generate_browser_node(
tid,
parent_id,
name,
icon=icon,
is_partitioned=is_partitioned,
parent_schema_id=scid,
schema_id=rest['rows'][0]['scid'],
schema_name=rest['rows'][0]['nspname'],
affected_partitions=partitions_oid
)
)
except Exception as e:
return internal_server_error(errormsg=str(e))
def _check_for_partitions(self, data, partitions_oid, res, scid):
if 'partitions' in data:
# Fetch oid of schema for all detached partitions
if 'deleted' in data['partitions']:
@ -1175,6 +1296,16 @@ class BaseTableView(PGChildNodeView, BasePartitionTable):
)
partitions_oid['detached'] = detached
self._fetch_oid_schema_iod(data, scid, partitions_oid)
if 'is_partitioned' in res['rows'][0]:
is_partitioned = res['rows'][0]['is_partitioned']
else:
is_partitioned = False
return is_partitioned
def _fetch_oid_schema_iod(self, data, scid, partitions_oid):
# Fetch oid and schema oid for all created/attached partitions
if 'added' in data['partitions']:
created = []
@ -1201,14 +1332,14 @@ class BaseTableView(PGChildNodeView, BasePartitionTable):
else:
tmp_data = dict()
tmp_data['name'] = row['partition_name']
SQL = render_template(
sql = render_template(
"/".join([
self.table_template_path, 'get_oid.sql'
]),
scid=scid, data=tmp_data
)
status, ptid = self.conn.execute_scalar(SQL)
status, ptid = self.conn.execute_scalar(sql)
if not status:
return internal_server_error(errormsg=ptid)
@ -1220,35 +1351,6 @@ class BaseTableView(PGChildNodeView, BasePartitionTable):
partitions_oid['created'] = created
partitions_oid['attached'] = attached
if 'is_partitioned' in res['rows'][0]:
is_partitioned = res['rows'][0]['is_partitioned']
else:
is_partitioned = False
# If partitioned_table_name in result set then get partition
# icon css class else table icon.
if 'partitioned_table_name' in res['rows'][0]:
res['rows'][0]['is_sub_partitioned'] = is_partitioned
icon = self.get_partition_icon_css_class(res['rows'][0])
else:
icon = self.get_icon_css_class(res['rows'][0])
return jsonify(
node=self.blueprint.generate_browser_node(
tid,
parent_id,
name,
icon=icon,
is_partitioned=is_partitioned,
parent_schema_id=scid,
schema_id=rest['rows'][0]['scid'],
schema_name=rest['rows'][0]['nspname'],
affected_partitions=partitions_oid
)
)
except Exception as e:
return internal_server_error(errormsg=str(e))
def properties(self, gid, sid, did, scid, tid, **kwargs):
"""
This function will show the properties of the selected table node.
@ -1293,10 +1395,10 @@ class BaseTableView(PGChildNodeView, BasePartitionTable):
data['partition_scheme'].split()[0].lower()
partitions = []
SQL = render_template("/".join([self.partition_template_path,
sql = render_template("/".join([self.partition_template_path,
'nodes.sql']),
scid=scid, tid=tid)
status, rset = self.conn.execute_2darray(SQL)
status, rset = self.conn.execute_2darray(sql)
if not status:
return internal_server_error(errormsg=rset)
@ -1494,10 +1596,10 @@ class BaseTableView(PGChildNodeView, BasePartitionTable):
data = res['rows'][0]
SQL = render_template("/".join([self.table_template_path,
sql = render_template("/".join([self.table_template_path,
'truncate.sql']),
data=data, cascade=is_cascade)
status, res = self.conn.execute_scalar(SQL)
status, res = self.conn.execute_scalar(sql)
if not status:
return internal_server_error(errormsg=res)
@ -1538,9 +1640,9 @@ class BaseTableView(PGChildNodeView, BasePartitionTable):
tid: Table ID
"""
SQL = self.get_delete_sql(res)
sql = self.get_delete_sql(res)
status, res = self.conn.execute_scalar(SQL)
status, res = self.conn.execute_scalar(sql)
if not status:
return status, res