diff --git a/superset/connectors/base/models.py b/superset/connectors/base/models.py index 406dbd0431139..3808391f880df 100644 --- a/superset/connectors/base/models.py +++ b/superset/connectors/base/models.py @@ -219,7 +219,7 @@ def __repr__(self): num_types = ( 'DOUBLE', 'FLOAT', 'INT', 'BIGINT', - 'LONG', 'REAL', 'NUMERIC', 'DECIMAL' + 'LONG', 'REAL', 'NUMERIC', 'DECIMAL', ) date_types = ('DATE', 'TIME', 'DATETIME') str_types = ('VARCHAR', 'STRING', 'CHAR') diff --git a/superset/connectors/connector_registry.py b/superset/connectors/connector_registry.py index 9c58f48076467..5924797f9a4ff 100644 --- a/superset/connectors/connector_registry.py +++ b/superset/connectors/connector_registry.py @@ -61,7 +61,7 @@ def get_eager_datasource(cls, session, datasource_type, datasource_id): session.query(datasource_class) .options( subqueryload(datasource_class.columns), - subqueryload(datasource_class.metrics) + subqueryload(datasource_class.metrics), ) .filter_by(id=datasource_id) .one() diff --git a/superset/connectors/druid/models.py b/superset/connectors/druid/models.py index 1f724e7a5549f..1f5a04047eb1d 100644 --- a/superset/connectors/druid/models.py +++ b/superset/connectors/druid/models.py @@ -232,7 +232,7 @@ class DruidColumn(Model, BaseColumn): export_fields = ( 'datasource_name', 'column_name', 'is_active', 'type', 'groupby', 'count_distinct', 'sum', 'avg', 'max', 'min', 'filterable', - 'description', 'dimension_spec_json' + 'description', 'dimension_spec_json', ) def __repr__(self): @@ -253,7 +253,7 @@ def get_metrics(self): metric_name='count', verbose_name='COUNT(*)', metric_type='count', - json=json.dumps({'type': 'count', 'name': 'count'}) + json=json.dumps({'type': 'count', 'name': 'count'}), ) # Somehow we need to reassign this for UDAFs if self.type in ('DOUBLE', 'FLOAT'): @@ -269,7 +269,7 @@ def get_metrics(self): metric_type='sum', verbose_name='SUM({})'.format(self.column_name), json=json.dumps({ - 'type': mt, 'name': name, 'fieldName': self.column_name}) + 'type': mt, 'name': name, 'fieldName': self.column_name}), ) if self.avg and self.is_num: @@ -280,7 +280,7 @@ def get_metrics(self): metric_type='avg', verbose_name='AVG({})'.format(self.column_name), json=json.dumps({ - 'type': mt, 'name': name, 'fieldName': self.column_name}) + 'type': mt, 'name': name, 'fieldName': self.column_name}), ) if self.min and self.is_num: @@ -291,7 +291,7 @@ def get_metrics(self): metric_type='min', verbose_name='MIN({})'.format(self.column_name), json=json.dumps({ - 'type': mt, 'name': name, 'fieldName': self.column_name}) + 'type': mt, 'name': name, 'fieldName': self.column_name}), ) if self.max and self.is_num: mt = corrected_type.lower() + 'Max' @@ -301,7 +301,7 @@ def get_metrics(self): metric_type='max', verbose_name='MAX({})'.format(self.column_name), json=json.dumps({ - 'type': mt, 'name': name, 'fieldName': self.column_name}) + 'type': mt, 'name': name, 'fieldName': self.column_name}), ) if self.count_distinct: name = 'count_distinct__' + self.column_name @@ -313,8 +313,8 @@ def get_metrics(self): json=json.dumps({ 'type': self.type, 'name': name, - 'fieldName': self.column_name - }) + 'fieldName': self.column_name, + }), ) else: metrics[name] = DruidMetric( @@ -324,7 +324,7 @@ def get_metrics(self): json=json.dumps({ 'type': 'cardinality', 'name': name, - 'fieldNames': [self.column_name]}) + 'fieldNames': [self.column_name]}), ) return metrics @@ -372,7 +372,7 @@ class DruidMetric(Model, BaseMetric): export_fields = ( 'metric_name', 'verbose_name', 'metric_type', 'datasource_name', - 'json', 'description', 'is_restricted', 'd3format' + 'json', 'description', 'is_restricted', 'd3format', ) @property @@ -392,7 +392,7 @@ def perm(self): return ( "{parent_name}.[{obj.metric_name}](id:{obj.id})" ).format(obj=self, - parent_name=self.datasource.full_name + parent_name=self.datasource.full_name, ) if self.datasource else None @classmethod @@ -434,7 +434,7 @@ class DruidDatasource(Model, BaseDatasource): export_fields = ( 'datasource_name', 'is_hidden', 'description', 'default_endpoint', - 'cluster_name', 'offset', 'cache_timeout', 'params' + 'cluster_name', 'offset', 'cache_timeout', 'params', ) @property @@ -491,7 +491,7 @@ def time_column_grains(self): 'week', 'week_starting_sunday', 'week_ending_saturday', 'month', ], - "time_grains": ['now'] + "time_grains": ['now'], } def __repr__(self): @@ -815,11 +815,11 @@ def recursive_get_fields(_conf): elif mconf.get('type') == 'constant': post_aggs[metric_name] = Const( mconf.get('value'), - output_name=mconf.get('name', '') + output_name=mconf.get('name', ''), ) elif mconf.get('type') == 'hyperUniqueCardinality': post_aggs[metric_name] = HyperUniqueCardinality( - mconf.get('name') + mconf.get('name'), ) elif mconf.get('type') == 'arithmetic': post_aggs[metric_name] = Postaggregator( @@ -936,7 +936,7 @@ def run_query( # noqa / druid if rejected_metrics: raise MetricPermException( - "Access to the metrics denied: " + ', '.join(rejected_metrics) + "Access to the metrics denied: " + ', '.join(rejected_metrics), ) # the dimensions list with dimensionSpecs expanded @@ -1155,18 +1155,18 @@ def get_filters(raw_filters, num_cols): # noqa elif op == '>': cond = Bound( col, eq, None, - lowerStrict=True, alphaNumeric=is_numeric_col + lowerStrict=True, alphaNumeric=is_numeric_col, ) elif op == '<': cond = Bound( col, None, eq, - upperStrict=True, alphaNumeric=is_numeric_col + upperStrict=True, alphaNumeric=is_numeric_col, ) if filters: filters = Filter(type="and", fields=[ cond, - filters + filters, ]) else: filters = cond @@ -1192,7 +1192,7 @@ def get_having_filters(self, raw_filters): reversed_op_map = { '!=': '==', '>=': '<', - '<=': '>' + '<=': '>', } for flt in raw_filters: diff --git a/superset/connectors/druid/views.py b/superset/connectors/druid/views.py index d3e0fa56e082c..ade28c754aefb 100644 --- a/superset/connectors/druid/views.py +++ b/superset/connectors/druid/views.py @@ -14,7 +14,7 @@ from superset.views.base import ( BaseSupersetView, DatasourceFilter, DeleteMixin, get_datasource_exist_error_mgs, ListWidgetWithCheckboxes, SupersetModelView, - validate_json + validate_json, ) from . import models @@ -184,7 +184,7 @@ class DruidDatasourceModelView(DatasourceModelView, DeleteMixin): # noqa 'filter_select_enabled', 'fetch_values_from', 'default_endpoint', 'offset', 'cache_timeout'] search_columns = ( - 'datasource_name', 'cluster', 'description', 'owner' + 'datasource_name', 'cluster', 'description', 'owner', ) add_columns = edit_columns show_columns = add_columns + ['perm'] diff --git a/superset/connectors/sqla/models.py b/superset/connectors/sqla/models.py index 975cce9390fa5..67734114e2d3f 100644 --- a/superset/connectors/sqla/models.py +++ b/superset/connectors/sqla/models.py @@ -44,7 +44,7 @@ class TableColumn(Model, BaseColumn): 'table_id', 'column_name', 'verbose_name', 'is_dttm', 'is_active', 'type', 'groupby', 'count_distinct', 'sum', 'avg', 'max', 'min', 'filterable', 'expression', 'description', 'python_date_format', - 'database_expression' + 'database_expression', ) @property @@ -262,7 +262,7 @@ def sql_url(self): def time_column_grains(self): return { "time_columns": self.dttm_cols, - "time_grains": [grain.name for grain in self.database.grains()] + "time_grains": [grain.name for grain in self.database.grains()], } def get_col(self, col_name): @@ -322,8 +322,8 @@ def get_query_str(self, query_obj): sql = str( qry.compile( engine, - compile_kwargs={"literal_binds": True} - ) + compile_kwargs={"literal_binds": True}, + ), ) logging.info(sql) sql = sqlparse.format(sql, reindent=True) @@ -622,35 +622,35 @@ def fetch_metadata(self): metric_name='sum__' + dbcol.column_name, verbose_name='sum__' + dbcol.column_name, metric_type='sum', - expression="SUM({})".format(quoted) + expression="SUM({})".format(quoted), )) if dbcol.avg: metrics.append(M( metric_name='avg__' + dbcol.column_name, verbose_name='avg__' + dbcol.column_name, metric_type='avg', - expression="AVG({})".format(quoted) + expression="AVG({})".format(quoted), )) if dbcol.max: metrics.append(M( metric_name='max__' + dbcol.column_name, verbose_name='max__' + dbcol.column_name, metric_type='max', - expression="MAX({})".format(quoted) + expression="MAX({})".format(quoted), )) if dbcol.min: metrics.append(M( metric_name='min__' + dbcol.column_name, verbose_name='min__' + dbcol.column_name, metric_type='min', - expression="MIN({})".format(quoted) + expression="MIN({})".format(quoted), )) if dbcol.count_distinct: metrics.append(M( metric_name='count_distinct__' + dbcol.column_name, verbose_name='count_distinct__' + dbcol.column_name, metric_type='count_distinct', - expression="COUNT(DISTINCT {})".format(quoted) + expression="COUNT(DISTINCT {})".format(quoted), )) dbcol.type = datatype @@ -658,7 +658,7 @@ def fetch_metadata(self): metric_name='count', verbose_name='COUNT(*)', metric_type='count', - expression="COUNT(*)" + expression="COUNT(*)", )) dbmetrics = db.session.query(M).filter(M.table_id == self.id).filter( diff --git a/superset/connectors/sqla/views.py b/superset/connectors/sqla/views.py index 495c98d8c858d..513dd5a4f2fff 100644 --- a/superset/connectors/sqla/views.py +++ b/superset/connectors/sqla/views.py @@ -117,7 +117,7 @@ class SqlMetricInlineView(CompactCRUDMixin, SupersetModelView): # noqa "(https://github.com/d3/d3-format/blob/master/README.md#format). " "For instance, this default formatting applies in the Table " "visualization and allow for different metric to use different " - "formats", True + "formats", True, ), } add_columns = edit_columns @@ -189,13 +189,13 @@ class TableModelView(DatasourceModelView, DeleteMixin): # noqa "markdown"), 'sql': _( "This fields acts a Superset view, meaning that Superset will " - "run a query against this string as a subquery." + "run a query against this string as a subquery.", ), 'fetch_values_predicate': _( "Predicate applied when fetching distinct value to " "populate the filter control component. Supports " "jinja template syntax. Applies only when " - "`Enable Filter Select` is on." + "`Enable Filter Select` is on.", ), 'default_endpoint': _( "Redirects to this endpoint when clicking on the table " diff --git a/superset/data/__init__.py b/superset/data/__init__.py index 3b5689ed9964f..a96749a7b5cbd 100644 --- a/superset/data/__init__.py +++ b/superset/data/__init__.py @@ -93,7 +93,7 @@ def load_energy(): "viz_type": "sankey", "where": "" } - """) + """), ) misc_dash_slices.append(slc.slice_name) merge_slice(slc) @@ -119,7 +119,7 @@ def load_energy(): "viz_type": "directed_force", "where": "" } - """) + """), ) misc_dash_slices.append(slc.slice_name) merge_slice(slc) @@ -145,7 +145,7 @@ def load_energy(): "xscale_interval": "1", "yscale_interval": "1" } - """) + """), ) misc_dash_slices.append(slc.slice_name) merge_slice(slc) @@ -971,7 +971,7 @@ def load_country_map_data(): '2012': BigInteger, '2013': BigInteger, '2014': BigInteger, - 'date': Date() + 'date': Date(), }, index=False) print("Done loading table!") diff --git a/superset/dataframe.py b/superset/dataframe.py index 1e56f2bb99dff..1d74fdde194d8 100644 --- a/superset/dataframe.py +++ b/superset/dataframe.py @@ -139,7 +139,7 @@ def columns(self): column.update({ 'is_date': True, 'is_dim': False, - 'agg': None + 'agg': None, }) # 'agg' is optional attribute if not column['agg']: diff --git a/superset/db_engine_specs.py b/superset/db_engine_specs.py index c287a7eef0c22..1b71a75e04ff1 100644 --- a/superset/db_engine_specs.py +++ b/superset/db_engine_specs.py @@ -486,7 +486,7 @@ def extra_table_metadata(cls, database, table_name, schema_name): 'cols': cols, 'latest': {col_name: latest_part}, 'partitionQuery': pql, - } + }, } @classmethod diff --git a/superset/db_engines/hive.py b/superset/db_engines/hive.py index bf3566fac11ec..1a1f51350b9e5 100644 --- a/superset/db_engines/hive.py +++ b/superset/db_engines/hive.py @@ -30,7 +30,7 @@ def fetch_logs(self, max_rows=1024, operationHandle=self._operationHandle, orientation=ttypes.TFetchOrientation.FETCH_NEXT, maxRows=self.arraysize, - fetchType=1 # 0: results, 1: logs + fetchType=1, # 0: results, 1: logs ) response = self._connection.client.FetchResults(req) hive._check_status(response) diff --git a/superset/models/core.py b/superset/models/core.py index 2db2af6f8c796..03582091e5b2a 100644 --- a/superset/models/core.py +++ b/superset/models/core.py @@ -199,7 +199,7 @@ def form_data(self): form_data.update({ 'slice_id': self.id, 'viz_type': self.viz_type, - 'datasource': str(self.datasource_id) + '__' + self.datasource_type + 'datasource': str(self.datasource_id) + '__' + self.datasource_type, }) if self.cache_timeout: form_data['cache_timeout'] = self.cache_timeout @@ -301,7 +301,7 @@ def import_obj(cls, slc_to_import, import_time=None): 'dashboard_user', metadata, Column('id', Integer, primary_key=True), Column('user_id', Integer, ForeignKey('ab_user.id')), - Column('dashboard_id', Integer, ForeignKey('dashboards.id')) + Column('dashboard_id', Integer, ForeignKey('dashboards.id')), ) @@ -687,7 +687,7 @@ def wrap_sql_limit(self, sql, limit=1000): select('*') .select_from( TextAsFrom(text(sql), ['*']) - .alias('inner_qry') + .alias('inner_qry'), ).limit(limit) ) return self.compile_sqla_query(qry) diff --git a/superset/models/helpers.py b/superset/models/helpers.py index 929976045737c..6cc21fc482ee6 100644 --- a/superset/models/helpers.py +++ b/superset/models/helpers.py @@ -134,13 +134,13 @@ def merge_perm(sm, permission_name, view_menu_name, connection): permission_table = sm.permission_model.__table__ connection.execute( permission_table.insert() - .values(name=permission_name) + .values(name=permission_name), ) if not view_menu: view_menu_table = sm.viewmenu_model.__table__ connection.execute( view_menu_table.insert() - .values(name=view_menu_name) + .values(name=view_menu_name), ) permission = sm.find_permission(permission_name) @@ -155,8 +155,8 @@ def merge_perm(sm, permission_name, view_menu_name, connection): permission_view_table.insert() .values( permission_id=permission.id, - view_menu_id=view_menu.id - ) + view_menu_id=view_menu.id, + ), ) @@ -167,7 +167,7 @@ def set_perm(mapper, connection, target): # noqa connection.execute( link_table.update() .where(link_table.c.id == target.id) - .values(perm=target.get_perm()) + .values(perm=target.get_perm()), ) # add to view menu if not already exists diff --git a/superset/security.py b/superset/security.py index b1274f05f065b..8f96acbd7be94 100644 --- a/superset/security.py +++ b/superset/security.py @@ -133,13 +133,13 @@ def is_gamma_pvm(pvm): def is_sql_lab_pvm(pvm): return pvm.view_menu.name in {'SQL Lab'} or pvm.permission.name in { - 'can_sql_json', 'can_csv', 'can_search_queries' + 'can_sql_json', 'can_csv', 'can_search_queries', } def is_granter_pvm(pvm): return pvm.permission.name in { - 'can_override_role_permissions', 'can_approve' + 'can_override_role_permissions', 'can_approve', } diff --git a/superset/utils.py b/superset/utils.py index 0bdf30d0b04e5..74d8dfbb63dc2 100644 --- a/superset/utils.py +++ b/superset/utils.py @@ -163,7 +163,7 @@ def __init__(self, **args): 'type': 'dimSelector', 'dimension': args['dimension'], 'value': args['value'], - } + }, } diff --git a/superset/views/base.py b/superset/views/base.py index cba4591b18ff3..cf0b2b96338b2 100644 --- a/superset/views/base.py +++ b/superset/views/base.py @@ -270,7 +270,7 @@ def _delete(self, pk): __("Delete"), __("Delete all Really?"), "fa-trash", - single=False + single=False, ) def muldelete(self, items): if not items: diff --git a/superset/views/core.py b/superset/views/core.py index 1c56f00428d28..ab2e3d434e96d 100755 --- a/superset/views/core.py +++ b/superset/views/core.py @@ -151,8 +151,8 @@ def apply(self, query, func): # noqa db.session.query(Dash.id) .distinct() .join(Dash.slices) - .filter(Slice.id.in_(slice_ids_qry)) - ) + .filter(Slice.id.in_(slice_ids_qry)), + ), ) return query @@ -179,7 +179,7 @@ class DatabaseView(SupersetModelView, DeleteMixin): # noqa 'allow_dml', 'creator', 'modified'] order_columns = [ 'database_name', 'allow_run_sync', 'allow_run_async', 'allow_dml', - 'modified' + 'modified', ] add_columns = [ 'database_name', 'sqlalchemy_uri', 'cache_timeout', 'extra', @@ -256,7 +256,7 @@ class DatabaseView(SupersetModelView, DeleteMixin): # noqa 'extra': _("Extra"), 'allow_run_sync': _("Allow Run Sync"), 'allow_run_async': _("Allow Run Async"), - 'impersonate_user': _("Impersonate the logged on user") + 'impersonate_user': _("Impersonate the logged on user"), } def pre_add(self, db): @@ -365,10 +365,10 @@ class SliceModelView(SupersetModelView, DeleteMixin): # noqa "These parameters are generated dynamically when clicking " "the save or overwrite button in the explore view. This JSON " "object is exposed here for reference and for power users who may " - "want to alter specific parameters."), - 'cache_timeout': _( - "Duration (in seconds) of the caching timeout for this slice." + "want to alter specific parameters.", ), + 'cache_timeout': _( + "Duration (in seconds) of the caching timeout for this slice."), } base_filters = [['id', SliceFilter, lambda: []]] label_columns = { @@ -532,7 +532,7 @@ def download_dashboards(self): mimetype="application/text") return self.render_template( 'superset/export_dashboards.html', - dashboards_url='/dashboardmodelview/list' + dashboards_url='/dashboardmodelview/list', ) @@ -770,7 +770,7 @@ def override_role_permissions(self): db.session.commit() return self.json_response({ 'granted': granted_perms, - 'requested': list(db_ds_names) + 'requested': list(db_ds_names), }, status=201) @log_this @@ -1455,7 +1455,7 @@ def testconn(self): configuration.update( db_engine.get_configuration_for_impersonation(uri, impersonate_user, - username) + username), ) connect_args = ( @@ -1486,17 +1486,17 @@ def recent_activity(self, user_id): db.session.query(M.Log, M.Dashboard, M.Slice) .outerjoin( M.Dashboard, - M.Dashboard.id == M.Log.dashboard_id + M.Dashboard.id == M.Log.dashboard_id, ) .outerjoin( M.Slice, - M.Slice.id == M.Log.slice_id + M.Slice.id == M.Log.slice_id, ) .filter( sqla.and_( ~M.Log.action.in_(('queries', 'shortner', 'sql_json')), M.Log.user_id == user_id, - ) + ), ) .order_by(M.Log.dttm.desc()) .limit(1000) @@ -1553,10 +1553,10 @@ def fave_dashboards(self, user_id): models.FavStar.user_id == int(user_id), models.FavStar.class_name == 'Dashboard', models.Dashboard.id == models.FavStar.obj_id, - ) + ), ) .order_by( - models.FavStar.dttm.desc() + models.FavStar.dttm.desc(), ) ) payload = [] @@ -1590,10 +1590,10 @@ def created_dashboards(self, user_id): sqla.or_( Dash.created_by_fk == user_id, Dash.changed_by_fk == user_id, - ) + ), ) .order_by( - Dash.changed_on.desc() + Dash.changed_on.desc(), ) ) payload = [{ @@ -1618,7 +1618,7 @@ def created_slices(self, user_id): sqla.or_( Slice.created_by_fk == user_id, Slice.changed_by_fk == user_id, - ) + ), ) .order_by(Slice.changed_on.desc()) ) @@ -1647,10 +1647,10 @@ def fave_slices(self, user_id): models.FavStar.user_id == int(user_id), models.FavStar.class_name == 'slice', models.Slice.id == models.FavStar.obj_id, - ) + ), ) .order_by( - models.FavStar.dttm.desc() + models.FavStar.dttm.desc(), ) ) payload = [] @@ -1733,8 +1733,8 @@ def favstar(self, class_name, obj_id, action): class_name=class_name, obj_id=obj_id, user_id=g.user.get_id(), - dttm=datetime.now() - ) + dttm=datetime.now(), + ), ) count = 1 elif action == 'unselect': @@ -1993,7 +1993,7 @@ def select_star(self, database_id, table_name): models.Database).filter_by(id=database_id).first() return self.render_template( "superset/ajah.html", - content=mydb.select_star(table_name, show_cols=True) + content=mydb.select_star(table_name, show_cols=True), ) @expose("/theme/") @@ -2023,7 +2023,7 @@ def results(self, key): return json_error_response( "Data could not be retrieved. " "You may want to re-run the query.", - status=410 + status=410, ) query = db.session.query(Query).filter_by(results_key=key).one() @@ -2085,7 +2085,7 @@ def sql_json(self): if select_as_cta and mydb.force_ctas_schema: tmp_table_name = '{}.{}'.format( mydb.force_ctas_schema, - tmp_table_name + tmp_table_name, ) query = Query( @@ -2326,7 +2326,7 @@ def profile(self, username): for perm in role.permissions: if perm.permission and perm.view_menu: perms.add( - (perm.permission.name, perm.view_menu.name) + (perm.permission.name, perm.view_menu.name), ) if perm.permission.name in ('datasource_access', 'database_access'): permissions[perm.permission.name].add(perm.view_menu.name) @@ -2354,7 +2354,7 @@ def profile(self, username): title=user.username + "'s profile", navbar_container=True, entry='profile', - bootstrap_data=json.dumps(payload, default=utils.json_iso_dttm_ser) + bootstrap_data=json.dumps(payload, default=utils.json_iso_dttm_ser), ) @has_access @@ -2368,7 +2368,7 @@ def sqllab(self): return self.render_template( 'superset/basic.html', entry='sqllab', - bootstrap_data=json.dumps(d, default=utils.json_iso_dttm_ser) + bootstrap_data=json.dumps(d, default=utils.json_iso_dttm_ser), ) @api diff --git a/superset/viz.py b/superset/viz.py index 4a3d0489ce1fe..ab66401f8a425 100644 --- a/superset/viz.py +++ b/superset/viz.py @@ -197,7 +197,7 @@ def query_obj(self): 'extras': extras, 'timeseries_limit_metric': timeseries_limit_metric, 'form_data': form_data, - 'order_desc': order_desc + 'order_desc': order_desc, } return d @@ -387,7 +387,7 @@ def query_obj(self): if 'percent_metrics' in fd: d['metrics'] = d['metrics'] + list(filter( lambda m: m not in d['metrics'], - fd['percent_metrics'] + fd['percent_metrics'], )) d['is_timeseries'] = self.should_be_timeseries() @@ -416,7 +416,7 @@ def get_data(self, df): # Remove metrics that are not in the main metrics list for m in filter( lambda m: m not in fd['metrics'] and m in df.columns, - percent_metrics + percent_metrics, ): del df[m] @@ -766,7 +766,7 @@ def query_obj(self): form_data = self.form_data d = super(BubbleViz, self).query_obj() d['groupby'] = [ - form_data.get('entity') + form_data.get('entity'), ] if form_data.get('series'): d['groupby'].append(form_data.get('series')) @@ -1090,7 +1090,7 @@ def to_series(self, df, classed=''): chart_data = [] metrics = [ self.form_data.get('metric'), - self.form_data.get('metric_2') + self.form_data.get('metric_2'), ] for i, m in enumerate(metrics): ys = series[m] @@ -1105,7 +1105,7 @@ def to_series(self, df, classed=''): for ds in df.index ], "yAxis": i+1, - "type": "line" + "type": "line", } chart_data.append(d) return chart_data @@ -1702,14 +1702,14 @@ def get_data(self, df): "geometry": { "type": "Point", "coordinates": [lon, lat], - } + }, } for lon, lat, metric, point_radius in zip( df[fd.get('all_columns_x')], df[fd.get('all_columns_y')], metric_col, point_radius_col) - ] + ], } return { @@ -1912,7 +1912,7 @@ def nest_values(self, levels, level=0, metric=None, dims=()): 'name': i, 'val': levels[level][metric][dims][i], 'children': self.nest_values( - levels, level + 1, metric, dims + (i,) + levels, level + 1, metric, dims + (i,), ), } for i in levels[level][metric][dims].index] @@ -1933,7 +1933,7 @@ def nest_procs(self, procs, level=-1, dims=(), time=None): return [{ 'name': i, 'val': procs[level][dims][i][time], - 'children': self.nest_procs(procs, level + 1, dims + (i,), time) + 'children': self.nest_procs(procs, level + 1, dims + (i,), time), } for i in procs[level][dims].columns] def get_data(self, df): diff --git a/tests/access_tests.py b/tests/access_tests.py index 2e81fc01f2d68..1b950b5175734 100644 --- a/tests/access_tests.py +++ b/tests/access_tests.py @@ -23,9 +23,9 @@ 'name': 'main', 'schema': [{ 'name': '', - 'datasources': ['birth_names'] - }] - }] + 'datasources': ['birth_names'], + }], + }], } ROLE_ALL_PERM_DATA = { @@ -35,17 +35,17 @@ 'name': 'main', 'schema': [{ 'name': '', - 'datasources': ['birth_names'] - }] + 'datasources': ['birth_names'], + }], }, { 'datasource_type': 'druid', 'name': 'druid_test', 'schema': [{ 'name': '', - 'datasources': ['druid_ds_1', 'druid_ds_2'] - }] - } - ] + 'datasources': ['druid_ds_1', 'druid_ds_2'], + }], + }, + ], } EXTEND_ROLE_REQUEST = ( @@ -172,7 +172,7 @@ def test_override_role_permissions_drops_absent_perms(self): override_me.permissions.append( sm.find_permission_view_menu( view_menu_name=self.get_table_by_name('long_lat').perm, - permission_name='datasource_access') + permission_name='datasource_access'), ) db.session.flush() @@ -550,11 +550,11 @@ def test_update_role(self): 'username': 'gamma', 'first_name': 'Gamma', 'last_name': 'Gamma', - 'email': 'gamma@superset.com' + 'email': 'gamma@superset.com', }], - 'role_name': update_role_str + 'role_name': update_role_str, }), - follow_redirects=True + follow_redirects=True, ) update_role = sm.find_role(update_role_str) self.assertEquals( @@ -568,16 +568,16 @@ def test_update_role(self): 'username': 'alpha', 'first_name': 'Alpha', 'last_name': 'Alpha', - 'email': 'alpha@superset.com' + 'email': 'alpha@superset.com', }, { 'username': 'unknown', 'first_name': 'Unknown1', 'last_name': 'Unknown2', - 'email': 'unknown@superset.com' + 'email': 'unknown@superset.com', }], - 'role_name': update_role_str + 'role_name': update_role_str, }), - follow_redirects=True + follow_redirects=True, ) self.assertEquals(resp.status_code, 201) update_role = sm.find_role(update_role_str) diff --git a/tests/base_tests.py b/tests/base_tests.py index 0b76277fc49fd..af94596091919 100644 --- a/tests/base_tests.py +++ b/tests/base_tests.py @@ -102,12 +102,12 @@ def __init__(self, *args, **kwargs): druid_datasource1 = DruidDatasource( datasource_name='druid_ds_1', - cluster_name='druid_test' + cluster_name='druid_test', ) session.add(druid_datasource1) druid_datasource2 = DruidDatasource( datasource_name='druid_ds_2', - cluster_name='druid_test' + cluster_name='druid_test', ) session.add(druid_datasource2) session.commit() diff --git a/tests/celery_tests.py b/tests/celery_tests.py index c76341c097528..8763ec1d3e7a2 100644 --- a/tests/celery_tests.py +++ b/tests/celery_tests.py @@ -63,7 +63,7 @@ def test_create_table_as(self): self.assertEqual( "CREATE TABLE tmp AS \nSELECT * FROM planets WHERE\n" "Luke_Father = 'Darth Vader'", - q.as_create_table("tmp") + q.as_create_table("tmp"), ) @@ -113,12 +113,12 @@ def setUpClass(cls): def tearDownClass(cls): subprocess.call( "ps auxww | grep 'celeryd' | awk '{print $2}' | xargs kill -9", - shell=True + shell=True, ) subprocess.call( "ps auxww | grep 'superset worker' | awk '{print $2}' | " "xargs kill -9", - shell=True + shell=True, ) def run_sql(self, db_id, sql, client_id, cta='false', tmp_table='tmp', @@ -148,7 +148,7 @@ def test_add_limit_to_the_query(self): # In addition some of the engines do not include OFFSET 0. self.assertTrue( "SELECT * FROM (SELECT * FROM outer_space;) AS inner_qry " - "LIMIT 100" in ' '.join(updated_select_query.split()) + "LIMIT 100" in ' '.join(updated_select_query.split()), ) select_query_no_semicolon = "SELECT * FROM outer_space" @@ -157,7 +157,7 @@ def test_add_limit_to_the_query(self): self.assertTrue( "SELECT * FROM (SELECT * FROM outer_space) AS inner_qry " "LIMIT 100" in - ' '.join(updated_select_query_no_semicolon.split()) + ' '.join(updated_select_query_no_semicolon.split()), ) multi_line_query = ( @@ -167,7 +167,7 @@ def test_add_limit_to_the_query(self): self.assertTrue( "SELECT * FROM (SELECT * FROM planets WHERE " "Luke_Father = 'Darth Vader';) AS inner_qry LIMIT 100" in - ' '.join(updated_multi_line_query.split()) + ' '.join(updated_multi_line_query.split()), ) def test_run_sync_query_dont_exist(self): @@ -276,7 +276,7 @@ def test_get_columns(self): 'is_dim': False}, {'is_date': False, 'type': 'STRING', 'name': 'string3', 'is_dim': True}], 'name') - , cols + , cols, ) else: self.assertEqual(self.dictify_list_of_dicts([ @@ -296,7 +296,7 @@ def test_get_columns(self): 'is_dim': False}, {'is_date': False, 'type': 'STRING', 'name': 'string3', 'is_dim': True}], 'name') - , cols + , cols, ) diff --git a/tests/core_tests.py b/tests/core_tests.py index 062da5e721ef4..c7c2ff9a1625e 100644 --- a/tests/core_tests.py +++ b/tests/core_tests.py @@ -153,8 +153,8 @@ def test_save_slice(self): tbl_id, copy_name, 'saveas', - json.dumps(form_data) - ) + json.dumps(form_data), + ), ) slices = db.session.query(models.Slice) \ .filter_by(slice_name=copy_name).all() @@ -174,8 +174,8 @@ def test_save_slice(self): tbl_id, new_slice_name, 'overwrite', - json.dumps(form_data) - ) + json.dumps(form_data), + ), ) slc = db.session.query(models.Slice).filter_by(id=new_slice_id).first() assert slc.slice_name == new_slice_name @@ -281,7 +281,7 @@ def test_testconn(self, username='admin'): data = json.dumps({ 'uri': database.safe_sqlalchemy_uri(), 'name': 'main', - 'impersonate_user': False + 'impersonate_user': False, }) response = self.client.post('/superset/testconn', data=data, content_type='application/json') assert response.status_code == 200 @@ -291,7 +291,7 @@ def test_testconn(self, username='admin'): data = json.dumps({ 'uri': database.sqlalchemy_uri_decrypted, 'name': 'main', - 'impersonate_user': False + 'impersonate_user': False, }) response = self.client.post('/superset/testconn', data=data, content_type='application/json') assert response.status_code == 200 @@ -389,7 +389,7 @@ def test_save_dash(self, username='admin'): 'css': '', 'expanded_slices': {}, 'positions': positions, - 'dashboard_title': dash.dashboard_title + 'dashboard_title': dash.dashboard_title, } url = '/superset/save_dash/{}/'.format(dash.id) resp = self.get_resp(url, data=dict(data=json.dumps(data))) @@ -416,7 +416,7 @@ def test_save_dash_with_filter(self, username='admin'): 'expanded_slices': {}, 'positions': positions, 'dashboard_title': dash.dashboard_title, - 'default_filters': default_filters + 'default_filters': default_filters, } url = '/superset/save_dash/{}/'.format(dash.id) @@ -452,7 +452,7 @@ def test_save_dash_with_dashboard_title(self, username='admin'): 'css': '', 'expanded_slices': {}, 'positions': positions, - 'dashboard_title': 'new title' + 'dashboard_title': 'new title', } url = '/superset/save_dash/{}/'.format(dash.id) self.get_resp(url, data=dict(data=json.dumps(data))) @@ -513,7 +513,7 @@ def test_add_slices(self, username='admin'): slice_name="Name Cloud").first() data = { "slice_ids": [new_slice.data["slice_id"], - existing_slice.data["slice_id"]] + existing_slice.data["slice_id"]], } url = '/superset/add_slices/{}/'.format(dash.id) resp = self.client.post(url, data=dict(data=json.dumps(data))) @@ -774,7 +774,7 @@ def test_viz_get_fillna_for_columns(self): fillna_columns = slc.viz.get_fillna_for_columns(results.df.columns) self.assertDictEqual( fillna_columns, - {'name': ' NULL', 'sum__num': 0} + {'name': ' NULL', 'sum__num': 0}, ) diff --git a/tests/druid_tests.py b/tests/druid_tests.py index e51475123f036..276a010822f32 100644 --- a/tests/druid_tests.py +++ b/tests/druid_tests.py @@ -36,7 +36,7 @@ def __reduce__(self): "size": 100000, "cardinality": 1504, "errorMessage": None}, "metric1": { "type": "FLOAT", "hasMultipleValues": False, - "size": 100000, "cardinality": None, "errorMessage": None} + "size": 100000, "cardinality": None, "errorMessage": None}, }, "aggregators": { "metric1": { @@ -45,7 +45,7 @@ def __reduce__(self): "fieldName": "metric1"}, }, "size": 300000, - "numRows": 5000000 + "numRows": 5000000, }] GB_RESULT_SET = [ @@ -55,7 +55,7 @@ def __reduce__(self): "event": { "dim1": 'Canada', "metric1": 12345678, - } + }, }, { "version": "v1", @@ -63,7 +63,7 @@ def __reduce__(self): "event": { "dim1": 'USA', "metric1": 12345678 / 2, - } + }, }, ] @@ -195,10 +195,10 @@ def test_druid_sync_from_config(self): "ts_column": "d", "sources": [{ "table": "clicks", - "partition": "d='{{ ds }}'" - }] - } - } + "partition": "d='{{ ds }}'", + }], + }, + }, } def check(): resp = self.client.post('/superset/sync_druid/', data=json.dumps(cfg)) @@ -227,9 +227,9 @@ def check(): "dimensions": ["affiliate_id", "second_seen"], "metrics_spec": [ {"type": "bla", "name": "sum"}, - {"type": "unique", "name": "unique"} + {"type": "unique", "name": "unique"}, ], - } + }, } resp = self.client.post('/superset/sync_druid/', data=json.dumps(cfg)) druid_ds = db.session.query(DruidDatasource).filter_by( @@ -308,7 +308,7 @@ def test_sync_druid_perm(self, PyDruid): db.session.add(cluster) cluster.get_datasources = PickableMock( - return_value=['test_datasource'] + return_value=['test_datasource'], ) cluster.get_druid_version = PickableMock(return_value='0.9.1') @@ -349,14 +349,14 @@ def test_metrics_and_post_aggs(self): verbose_name='APPROXIMATE_HISTOGRAM(*)', metric_type='approxHistogramFold', json=json.dumps( - {'type': 'approxHistogramFold', 'name': 'a_histogram'}) + {'type': 'approxHistogramFold', 'name': 'a_histogram'}), ), 'aCustomMetric': DruidMetric( metric_name='aCustomMetric', verbose_name='MY_AWESOME_METRIC(*)', metric_type='aCustomType', json=json.dumps( - {'type': 'customMetric', 'name': 'aCustomMetric'}) + {'type': 'customMetric', 'name': 'aCustomMetric'}), ), 'quantile_p95': DruidMetric( metric_name='quantile_p95', @@ -424,7 +424,7 @@ def test_get_filters_constructs_filter_not_in(self): self.assertIn('field', res.filter['filter']) self.assertEqual( 3, - len(res.filter['filter']['field'].filter['filter']['fields']) + len(res.filter['filter']['field'].filter['filter']['fields']), ) def test_get_filters_constructs_filter_equals(self): @@ -440,7 +440,7 @@ def test_get_filters_constructs_filter_not_equals(self): self.assertEqual('not', res.filter['filter']['type']) self.assertEqual( 'h', - res.filter['filter']['field'].filter['filter']['value'] + res.filter['filter']['field'].filter['filter']['value'], ) def test_get_filters_constructs_bounds_filter(self): diff --git a/tests/import_export_tests.py b/tests/import_export_tests.py index 83ae46b98550f..d0b8c101c8b56 100644 --- a/tests/import_export_tests.py +++ b/tests/import_export_tests.py @@ -77,7 +77,7 @@ def create_slice(self, name, ds_id=None, id=None, db_name='main', viz_type='bubble', params=json.dumps(params), datasource_id=ds_id, - id=id + id=id, ) def create_dashboard(self, title, id=0, slcs=[]): @@ -88,7 +88,7 @@ def create_dashboard(self, title, id=0, slcs=[]): slices=slcs, position_json='{"size_y": 2, "size_x": 2}', slug='{}_imported'.format(title.lower()), - json_metadata=json.dumps(json_metadata) + json_metadata=json.dumps(json_metadata), ) def create_table( @@ -98,7 +98,7 @@ def create_table( id=id, schema=schema, table_name=name, - params=json.dumps(params) + params=json.dumps(params), ) for col_name in cols_names: table.columns.append( @@ -114,7 +114,7 @@ def create_druid_datasource( id=id, datasource_name=name, cluster_name='druid_test', - params=json.dumps(params) + params=json.dumps(params), ) for col_name in cols_names: datasource.columns.append( @@ -229,13 +229,13 @@ def test_export_2_dashboards(self): self.assert_dash_equals(birth_dash, exported_dashboards[0]) self.assertEquals( birth_dash.id, - json.loads(exported_dashboards[0].json_metadata)['remote_id'] + json.loads(exported_dashboards[0].json_metadata)['remote_id'], ) self.assert_dash_equals(world_health_dash, exported_dashboards[1]) self.assertEquals( world_health_dash.id, - json.loads(exported_dashboards[1].json_metadata)['remote_id'] + json.loads(exported_dashboards[1].json_metadata)['remote_id'], ) exported_tables = sorted( @@ -337,8 +337,8 @@ def test_import_dashboard_2_slices(self): "filter_immune_slices": ["{}".format(e_slc.id)], "expanded_slices": { "{}".format(e_slc.id): True, - "{}".format(b_slc.id): False - } + "{}".format(b_slc.id): False, + }, }) imported_dash_id = models.Dashboard.import_obj( @@ -358,8 +358,8 @@ def test_import_dashboard_2_slices(self): "filter_immune_slices": ["{}".format(i_e_slc.id)], "expanded_slices": { '{}'.format(i_e_slc.id): True, - '{}'.format(i_b_slc.id): False - } + '{}'.format(i_b_slc.id): False, + }, } self.assertEquals(expected_json_metadata, json.loads(imported_dash.json_metadata)) diff --git a/tests/utils_tests.py b/tests/utils_tests.py index 1aea191c511a7..0ed1b71bc21c0 100644 --- a/tests/utils_tests.py +++ b/tests/utils_tests.py @@ -68,18 +68,18 @@ def test_merge_extra_filters(self): # copy over extra filters into empty filters form_data = {'extra_filters': [ {'col': 'a', 'op': 'in', 'val': 'someval'}, - {'col': 'B', 'op': '==', 'val': ['c1', 'c2']} + {'col': 'B', 'op': '==', 'val': ['c1', 'c2']}, ]} expected = {'filters': [ {'col': 'a', 'op': 'in', 'val': 'someval'}, - {'col': 'B', 'op': '==', 'val': ['c1', 'c2']} + {'col': 'B', 'op': '==', 'val': ['c1', 'c2']}, ]} merge_extra_filters(form_data) self.assertEquals(form_data, expected) # adds extra filters to existing filters form_data = {'extra_filters': [ {'col': 'a', 'op': 'in', 'val': 'someval'}, - {'col': 'B', 'op': '==', 'val': ['c1', 'c2']} + {'col': 'B', 'op': '==', 'val': ['c1', 'c2']}, ], 'filters': [{'col': 'D', 'op': '!=', 'val': ['G1', 'g2']}]} expected = {'filters': [ {'col': 'D', 'op': '!=', 'val': ['G1', 'g2']}, diff --git a/tests/viz_tests.py b/tests/viz_tests.py index 111c86c19f9cf..0aa0b6cc23b0c 100644 --- a/tests/viz_tests.py +++ b/tests/viz_tests.py @@ -24,7 +24,7 @@ def test_get_fillna_returns_default_on_null_columns(self): test_viz = viz.BaseViz(datasource, form_data); self.assertEqual( test_viz.default_fillna, - test_viz.get_fillna_for_columns() + test_viz.get_fillna_for_columns(), ) def test_get_df_returns_empty_df(self): @@ -164,13 +164,13 @@ def test_query_obj_merges_percent_metrics(self, super_query_obj): } test_viz = viz.TableViz(datasource, form_data) f_query_obj = { - 'metrics': form_data['metrics'] + 'metrics': form_data['metrics'], } super_query_obj.return_value = f_query_obj query_obj = test_viz.query_obj() self.assertEqual([ 'sum__A', 'count', 'avg__C', - 'avg__B', 'max__Y' + 'avg__B', 'max__Y', ], query_obj['metrics']) @patch('superset.viz.BaseViz.query_obj') @@ -195,7 +195,7 @@ def test_query_obj_merges_all_columns(self, super_query_obj): datasource = Mock() form_data = { 'all_columns': ['colA', 'colB', 'colC'], - 'order_by_cols': ['["colA", "colB"]', '["colC"]'] + 'order_by_cols': ['["colA", "colB"]', '["colC"]'], } super_query_obj.return_value = { 'columns': ['colD', 'colC'], @@ -212,18 +212,18 @@ def test_query_obj_uses_sortby(self, super_query_obj): datasource = Mock() form_data = { 'timeseries_limit_metric': '__time__', - 'order_desc': False + 'order_desc': False, } super_query_obj.return_value = { - 'metrics': ['colA', 'colB'] + 'metrics': ['colA', 'colB'], } test_viz = viz.TableViz(datasource, form_data) query_obj = test_viz.query_obj() self.assertEqual([ - 'colA', 'colB', '__time__' + 'colA', 'colB', '__time__', ], query_obj['metrics']) self.assertEqual([( - '__time__', True + '__time__', True, )], query_obj['orderby']) def test_should_be_timeseries_raises_when_no_granularity(self): @@ -238,7 +238,7 @@ class PairedTTestTestCase(unittest.TestCase): def test_get_data_transforms_dataframe(self): form_data = { 'groupby': ['groupA', 'groupB', 'groupC'], - 'metrics': ['metric1', 'metric2', 'metric3'] + 'metrics': ['metric1', 'metric2', 'metric3'], } datasource = {'type': 'table'} # Test data @@ -330,7 +330,7 @@ def test_get_data_transforms_dataframe(self): def test_get_data_empty_null_keys(self): form_data = { 'groupby': [], - 'metrics': ['', None] + 'metrics': ['', None], } datasource = {'type': 'table'} # Test data @@ -548,7 +548,7 @@ def test_nest_procs_returns_hierarchy(self): len(nest[0]['children'] [0]['children'] [0]['children'] - [0]['children']) + [0]['children']), ) def test_get_data_calls_correct_method(self): diff --git a/tox.ini b/tox.ini index e94a5e772c38b..b711dce38fd15 100644 --- a/tox.ini +++ b/tox.ini @@ -13,10 +13,10 @@ exclude = .tox docs superset/assets + superset/data superset/migrations superset/templates ignore = - C812 E111 E114 E116