X-Git-Url: https://git.jsancho.org/?p=datasette-pytables.git;a=blobdiff_plain;f=datasette_pytables%2F__init__.py;h=4c1cc0f9837aff45375c1f6c1654b8e71d359094;hp=2e6920eceab3f96c3f28e17a566962023534fb44;hb=29e72871ce6657fd858dddb1689de8b64d7b6f54;hpb=db48f57c02549d132c28e0d382ffbd47c6d980a9 diff --git a/datasette_pytables/__init__.py b/datasette_pytables/__init__.py index 2e6920e..4c1cc0f 100644 --- a/datasette_pytables/__init__.py +++ b/datasette_pytables/__init__.py @@ -1,90 +1,138 @@ -from moz_sql_parser import parse -import re import tables +import datasette_connectors as dc +from .utils import parse_sql -_connector_type = 'pytables' -def inspect(path): - "Open file and return tables info" - h5tables = {} - views = [] - h5file = tables.open_file(path) +class PyTablesConnection(dc.Connection): + def __init__(self, path, connector): + super().__init__(path, connector) + self.h5file = tables.open_file(path) + - for table in filter(lambda node: not(isinstance(node, tables.group.Group)), h5file): +class PyTablesConnector(dc.Connector): + connector_type = 'pytables' + connection_class = PyTablesConnection + + operators = { + 'eq': '==', + 'neq': '!=', + 'gt': '>', + 'gte': '>=', + 'lt': '<', + 'lte': '<=', + 'and': '&', + 'or': '|', + 'binary_and': '&', + 'binary_or': '|', + } + + def _serialize_table_name(self, table_name): + return table_name.replace('/', '%') + + def _deserialize_table_name(self, table_name): + return table_name.replace('%', '/') + + def table_names(self): + return [ + self._serialize_table_name(node._v_pathname) + for node in self.conn.h5file + if not(isinstance(node, tables.group.Group)) + ] + + def table_count(self, table_name): + table = self.conn.h5file.get_node(self._deserialize_table_name(table_name)) + return int(table.nrows) + + def table_info(self, table_name): + table = self.conn.h5file.get_node(self._deserialize_table_name(table_name)) + columns = [ + { + 'name': 'value', + 'type': table.dtype.name, + } + ] + if isinstance(table, tables.table.Table): + columns = [ + { + 'name': colname, + 'type': table.coltypes[colname], + } + for colname in table.colnames + ] + + return [ + { + 'cid': cid, + 'name': column['name'], + 'type': column['type'], + 'notnull': True, + 'default_value': None, + 'is_pk': False, + } + for cid, column in enumerate(columns) + ] + + def hidden_table_names(self): + return [] + + def detect_spatialite(self): + return False + + def view_names(self): + return [] + + def detect_fts(self, table_name): + return False + + def foreign_keys(self, table_name): + return [] + + def table_exists(self, table_name): + try: + self.conn.h5file.get_node(self._deserialize_table_name(table_name)) + return True + except: + return False + + def table_definition(self, table_type, table_name): + table_name = self._deserialize_table_name(table_name) + table = self.conn.h5file.get_node(table_name) colnames = ['value'] if isinstance(table, tables.table.Table): colnames = table.colnames - h5tables[table._v_pathname] = { - 'name': table._v_pathname, - 'columns': colnames, - 'primary_keys': [], - 'count': int(table.nrows), - 'label_column': None, - 'hidden': False, - 'fts_table': None, - 'foreign_keys': {'incoming': [], 'outgoing': []}, - } - - h5file.close() - return h5tables, views, _connector_type - -def _parse_sql(sql, params): - # Table name - sql = re.sub('(?i)from \[(.*)]', 'from "\g<1>"', sql) - # Params - for param in params: - sql = sql.replace(":" + param, param) - - try: - parsed = parse(sql) - except: - # Propably it's a PyTables expression - for token in ['group by', 'order by', 'limit', '']: - res = re.search('(?i)where (.*)' + token, sql) - if res: - modified_sql = re.sub('(?i)where (.*)(' + token + ')', '\g<2>', sql) - parsed = parse(modified_sql) - parsed['where'] = res.group(1).strip() - break - - # Always a list of fields - if type(parsed['select']) is not list: - parsed['select'] = [parsed['select']] - - return parsed - -_operators = { - 'eq': '==', - 'neq': '!=', - 'gt': '>', - 'gte': '>=', - 'lt': '<', - 'lte': '<=', - 'and': '&', - 'or': '|', -} - -class Connection: - def __init__(self, path): - self.path = path - self.h5file = tables.open_file(path) - - def execute(self, sql, params=None, truncate=False, page_size=None, max_returned_rows=None): - if params is None: - params = {} - rows = [] + return 'CREATE TABLE {} ({})'.format( + table_name, + ', '.join(colnames), + ) + + def indices_definition(self, table_name): + return [] + + def execute( + self, + sql, + params=None, + truncate=False, + custom_time_limit=None, + page_size=None, + log_sql_errors=True, + ): + results = [] truncated = False - description = [] + description = () + + # Some Datasette queries uses glob operand, not supported by Pytables + if ' glob ' in sql: + return results, truncated, description - parsed_sql = _parse_sql(sql, params) + parsed_sql = parse_sql(sql, params) - if parsed_sql['from'] == 'sqlite_master': - rows = self._execute_datasette_query(sql, params) - description = (('value',)) - return rows, truncated, description + while isinstance(parsed_sql['from'], dict): + # Pytables does not support subqueries + parsed_sql['from'] = parsed_sql['from']['value']['from'] - table = self.h5file.get_node(parsed_sql['from']) + table = self.conn.h5file.get_node(self._deserialize_table_name(parsed_sql['from'])) table_rows = [] fields = parsed_sql['select'] colnames = ['value'] @@ -95,12 +143,16 @@ class Connection: start = 0 end = table.nrows - # Use 'where' statement or get all the rows - def _cast_param(field, pname): - # Cast value to the column type + def _get_field_type(field): coltype = table.dtype.name if type(table) is tables.table.Table: coltype = table.coltypes[field] + return coltype + + # Use 'where' statement or get all the rows + def _cast_param(field, pname): + # Cast value to the column type + coltype = _get_field_type(field) fcast = None if coltype == 'string': fcast = str @@ -121,7 +173,7 @@ class Connection: subexpr = [_translate_where(e) for e in where[operator]] subexpr = filter(lambda e: e, subexpr) subexpr = ["({})".format(e) for e in subexpr] - expr = " {} ".format(_operators[operator]).join(subexpr) + expr = " {} ".format(self.operators[operator]).join(subexpr) elif operator == 'exists': pass elif where == {'eq': ['rowid', 'p0']}: @@ -131,12 +183,22 @@ class Connection: start = int(params['p0']) + 1 else: left, right = where[operator] - if left in params: + + if isinstance(left, dict): + left = "(" + _translate_where(left) + ")" + elif left in params: _cast_param(right, left) + + if isinstance(right, dict): + right = "(" + _translate_where(right) + ")" elif right in params: _cast_param(left, right) - expr = "{left} {operator} {right}".format(left=left, operator=_operators.get(operator, operator), right=right) + expr = "{left} {operator} {right}".format( + left=left, + operator=self.operators.get(operator, operator), + right=right, + ) return expr @@ -161,6 +223,11 @@ class Connection: if 'limit' in parsed_sql: limit = int(parsed_sql['limit']) + # Offset + offset = None + if 'offset' in parsed_sql: + offset = int(parsed_sql['offset']) + # Truncate if needed if page_size and max_returned_rows and truncate: if max_returned_rows == page_size: @@ -204,89 +271,49 @@ class Connection: return row return get_row_value - if len(fields) == 1 and type(fields[0]['value']) is dict and \ - fields[0]['value'].get('count') == '*': - rows.append(Row({'count(*)': int(table.nrows)})) - else: - get_rowid = make_get_rowid() - get_row_value = make_get_row_value() - count = 0 - for table_row in table_rows: - count += 1 - if limit and count > limit: - break - if truncate and max_returned_rows and count > max_returned_rows: - truncated = True - break - row = Row() - for field in fields: + # Get results + get_rowid = make_get_rowid() + get_row_value = make_get_row_value() + if offset: + table_rows = table_rows[offset:] + count = 0 + for table_row in table_rows: + count += 1 + if limit is not None and count > limit: + break + if truncate and max_returned_rows and count > max_returned_rows: + truncated = True + break + row = {} + for field in fields: + field_name = field + if isinstance(field, dict): field_name = field['value'] - if type(field_name) is dict and 'distinct' in field_name: - field_name = field_name['distinct'] - if field_name == 'rowid': - row['rowid'] = get_rowid(table_row) - elif field_name == '*': - for col in colnames: - row[col] = normalize_field_value(get_row_value(table_row, col)) + if isinstance(field_name, dict) and 'distinct' in field_name: + field_name = field_name['distinct'] + if field_name == 'rowid': + row['rowid'] = get_rowid(table_row) + elif field_name == '*': + for col in colnames: + row[col] = normalize_field_value(get_row_value(table_row, col)) + elif isinstance(field_name, dict): + if field_name.get('count') == '*': + row['count(*)'] = int(table.nrows) + elif field_name.get('json_type'): + field_name = field_name.get('json_type') + row['json_type(' + field_name + ')'] = _get_field_type(field_name) else: - row[field_name] = normalize_field_value(get_row_value(table_row, field_name)) - rows.append(row) + raise Exception("Function not recognized") + else: + row[field_name] = normalize_field_value(get_row_value(table_row, field_name)) + results.append(row) # Prepare query description - for field in [f['value'] for f in fields]: + for field in [f['value'] if isinstance(f, dict) else f for f in fields]: if field == '*': for col in colnames: - description.append((col,)) - else: - description.append((field,)) - - # Return the rows - return rows, truncated, tuple(description) - - def _execute_datasette_query(self, sql, params): - "Datasette special queries for getting tables info" - if sql == "SELECT count(*) from sqlite_master WHERE type = 'view' and name=:n": - row = Row() - row['count(*)'] = 0 - return [row] - elif sql == 'select sql from sqlite_master where name = :n and type="table"': - try: - table = self.h5file.get_node(params['n']) - colnames = ['value'] - if type(table) is tables.table.Table: - colnames = table.colnames - row = Row() - row['sql'] = 'CREATE TABLE {} ({})'.format(params['n'], ", ".join(colnames)) - return [row] - except: - return [] - else: - raise Exception("SQLite queries cannot be executed with this connector") - - -class Row(list): - def __init__(self, values=None): - self.labels = [] - self.values = [] - if values: - for idx in values: - self.__setitem__(idx, values[idx]) - - def __setitem__(self, idx, value): - if type(idx) is str: - if idx in self.labels: - self.values[self.labels.index(idx)] = value + description += ((col,),) else: - self.labels.append(idx) - self.values.append(value) - else: - self.values[idx] = value - - def __getitem__(self, idx): - if type(idx) is str: - return self.values[self.labels.index(idx)] - else: - return self.values[idx] + description += ((field,),) - def __iter__(self): - return self.values.__iter__() + return results, truncated, description