X-Git-Url: https://git.jsancho.org/?p=datasette-pytables.git;a=blobdiff_plain;f=datasette_pytables%2F__init__.py;h=2e6920eceab3f96c3f28e17a566962023534fb44;hp=4a7c5c8c3e83af5c4ef5a1f35f97f64b84b2bb63;hb=ca01a7a7a6320ba50a2374fab68d69dc5aacec4e;hpb=3c2e826f2fd5c6a05ada7dceb09d9e9e1b8cf5e6 diff --git a/datasette_pytables/__init__.py b/datasette_pytables/__init__.py index 4a7c5c8..2e6920e 100644 --- a/datasette_pytables/__init__.py +++ b/datasette_pytables/__init__.py @@ -1,4 +1,3 @@ -from collections import OrderedDict from moz_sql_parser import parse import re import tables @@ -12,7 +11,7 @@ def inspect(path): h5file = tables.open_file(path) for table in filter(lambda node: not(isinstance(node, tables.group.Group)), h5file): - colnames = [] + colnames = ['value'] if isinstance(table, tables.table.Table): colnames = table.colnames @@ -41,12 +40,12 @@ def _parse_sql(sql, params): parsed = parse(sql) except: # Propably it's a PyTables expression - for token in ['group by', 'order by', 'limit']: + for token in ['group by', 'order by', 'limit', '']: res = re.search('(?i)where (.*)' + token, sql) if res: modified_sql = re.sub('(?i)where (.*)(' + token + ')', '\g<2>', sql) parsed = parse(modified_sql) - parsed['where'] = res.group(1) + parsed['where'] = res.group(1).strip() break # Always a list of fields @@ -71,7 +70,7 @@ class Connection: self.path = path self.h5file = tables.open_file(path) - def execute(self, sql, params=None, truncate=False): + def execute(self, sql, params=None, truncate=False, page_size=None, max_returned_rows=None): if params is None: params = {} rows = [] @@ -79,9 +78,18 @@ class Connection: description = [] parsed_sql = _parse_sql(sql, params) + + if parsed_sql['from'] == 'sqlite_master': + rows = self._execute_datasette_query(sql, params) + description = (('value',)) + return rows, truncated, description + table = self.h5file.get_node(parsed_sql['from']) table_rows = [] fields = parsed_sql['select'] + colnames = ['value'] + if type(table) is tables.table.Table: + colnames = table.colnames query = '' start = 0 @@ -90,7 +98,9 @@ class Connection: # Use 'where' statement or get all the rows def _cast_param(field, pname): # Cast value to the column type - coltype = table.coltypes[field] + coltype = table.dtype.name + if type(table) is tables.table.Table: + coltype = table.coltypes[field] fcast = None if coltype == 'string': fcast = str @@ -103,6 +113,7 @@ class Connection: def _translate_where(where): # Translate SQL to PyTables expression + nonlocal start, end expr = '' operator = list(where)[0] @@ -114,9 +125,10 @@ class Connection: elif operator == 'exists': pass elif where == {'eq': ['rowid', 'p0']}: - nonlocal start, end start = int(params['p0']) end = start + 1 + elif where == {'gt': ['rowid', 'p0']}: + start = int(params['p0']) + 1 else: left, right = where[operator] if left in params: @@ -134,83 +146,147 @@ class Connection: else: query = parsed_sql['where'] + # Sort by column + orderby = '' + if 'orderby' in parsed_sql: + orderby = parsed_sql['orderby'] + if type(orderby) is list: + orderby = orderby[0] + orderby = orderby['value'] + if orderby == 'rowid': + orderby = '' + # Limit number of rows + limit = None if 'limit' in parsed_sql: - max_rows = int(parsed_sql['limit']) - if end - start > max_rows: - end = start + max_rows + limit = int(parsed_sql['limit']) + + # Truncate if needed + if page_size and max_returned_rows and truncate: + if max_returned_rows == page_size: + max_returned_rows += 1 # Execute query if query: table_rows = table.where(query, params, start, end) + elif orderby: + table_rows = table.itersorted(orderby, start=start, stop=end) else: table_rows = table.iterrows(start, end) # Prepare rows - if len(fields) == 1 and type(fields[0]['value']) is dict and \ - fields[0]['value'].get('count') == '*': - rows.append(Row({'count(*)': int(table.nrows)})) - else: + def normalize_field_value(value): + if type(value) is bytes: + return value.decode('utf-8') + elif not type(value) in (int, float, complex): + return str(value) + else: + return value + + def make_get_rowid(): if type(table) is tables.table.Table: - for table_row in table_rows: - row = Row() - for field in fields: - field_name = field['value'] - if type(field_name) is dict and 'distinct' in field_name: - field_name = field_name['distinct'] - if field_name == 'rowid': - row['rowid'] = int(table_row.nrow) - elif field_name == '*': - for col in table.colnames: - value = table_row[col] - if type(value) is bytes: - value = value.decode('utf-8') - row[col] = value - else: - row[field_name] = table_row[field_name] - rows.append(row) + def get_rowid(row): + return int(row.nrow) else: - # Any kind of array rowid = start - 1 - for table_row in table_rows: - row = Row() + def get_rowid(row): + nonlocal rowid rowid += 1 - for field in fields: - field_name = field['value'] - if type(field_name) is dict and 'distinct' in field_name: - field_name = field_name['distinct'] - if field_name == 'rowid': - row['rowid'] = rowid - else: - value = table_row - if type(value) is bytes: - value = value.decode('utf-8') - row['value'] = value - rows.append(row) + return rowid + return get_rowid + + def make_get_row_value(): + if type(table) is tables.table.Table: + def get_row_value(row, field): + return row[field] + else: + def get_row_value(row, field): + return row + return get_row_value + + if len(fields) == 1 and type(fields[0]['value']) is dict and \ + fields[0]['value'].get('count') == '*': + rows.append(Row({'count(*)': int(table.nrows)})) + else: + get_rowid = make_get_rowid() + get_row_value = make_get_row_value() + count = 0 + for table_row in table_rows: + count += 1 + if limit and count > limit: + break + if truncate and max_returned_rows and count > max_returned_rows: + truncated = True + break + row = Row() + for field in fields: + field_name = field['value'] + if type(field_name) is dict and 'distinct' in field_name: + field_name = field_name['distinct'] + if field_name == 'rowid': + row['rowid'] = get_rowid(table_row) + elif field_name == '*': + for col in colnames: + row[col] = normalize_field_value(get_row_value(table_row, col)) + else: + row[field_name] = normalize_field_value(get_row_value(table_row, field_name)) + rows.append(row) # Prepare query description for field in [f['value'] for f in fields]: if field == '*': - if type(table) is tables.table.Table: - for col in table.colnames: - description.append((col,)) - else: - description.append(('value',)) + for col in colnames: + description.append((col,)) else: description.append((field,)) # Return the rows - if truncate: - return rows, truncated, tuple(description) + return rows, truncated, tuple(description) + + def _execute_datasette_query(self, sql, params): + "Datasette special queries for getting tables info" + if sql == "SELECT count(*) from sqlite_master WHERE type = 'view' and name=:n": + row = Row() + row['count(*)'] = 0 + return [row] + elif sql == 'select sql from sqlite_master where name = :n and type="table"': + try: + table = self.h5file.get_node(params['n']) + colnames = ['value'] + if type(table) is tables.table.Table: + colnames = table.colnames + row = Row() + row['sql'] = 'CREATE TABLE {} ({})'.format(params['n'], ", ".join(colnames)) + return [row] + except: + return [] + else: + raise Exception("SQLite queries cannot be executed with this connector") + + +class Row(list): + def __init__(self, values=None): + self.labels = [] + self.values = [] + if values: + for idx in values: + self.__setitem__(idx, values[idx]) + + def __setitem__(self, idx, value): + if type(idx) is str: + if idx in self.labels: + self.values[self.labels.index(idx)] = value + else: + self.labels.append(idx) + self.values.append(value) else: - return rows + self.values[idx] = value -class Row(OrderedDict): - def __getitem__(self, label): - if type(label) is int: - return super(OrderedDict, self).__getitem__(list(self.keys())[label]) + def __getitem__(self, idx): + if type(idx) is str: + return self.values[self.labels.index(idx)] else: - return super(OrderedDict, self).__getitem__(label) + return self.values[idx] def __iter__(self): - return self.values().__iter__() + return self.values.__iter__()