- def _build_select(self, table, kvfilter=None, kvout=None, columns=None):
- SELECT = "SELECT %(cols)s FROM %(table)s %(where)s"
- cols = "*"
- if columns:
- cols = ",".join(columns)
- where = ""
- if kvfilter is not None:
- where = self._build_where(kvfilter, kvout)
- return SELECT % {'table': table, 'cols': cols, 'where': where}
-
- def _select(self, cursor, table, kvfilter=None, columns=None):
- kv = dict()
- select = self._build_select(table, kvfilter, kv, columns)
- cursor.execute(select, kv)
- return cursor.fetchall()
-
- def _create(self, cursor, table, columns):
- CREATE = "CREATE TABLE IF NOT EXISTS %(table)s(%(cols)s)"
- cols = ",".join(columns)
- create = CREATE % {'table': table, 'cols': cols}
- cursor.execute(create)
-
- def _update(self, cursor, table, values, kvfilter):
- UPDATE = "UPDATE %(table)s SET %(setval)s %(where)s"
- kv = dict()
-
- setval = ""
- sep = ""
- for k in values:
- mk = "setval_%s" % k
- kv[mk] = values[k]
- setval += "%s%s=:%s" % (sep, k, mk)
- sep = " , "
-
- where = self._build_where(kvfilter, kv)
-
- update = UPDATE % {'table': table, 'setval': setval, 'where': where}
- cursor.execute(update, kv)
-
- def _insert(self, cursor, table, values):
- INSERT = "INSERT INTO %(table)s VALUES(%(values)s)"
- vals = ""
- sep = ""
- for _ in values:
- vals += "%s?" % sep
- sep = ","
- insert = INSERT % {'table': table, 'values': vals}
- cursor.execute(insert, values)
-
- def _delete(self, cursor, table, kvfilter):
- DELETE = "DELETE FROM %(table)s %(where)s"
- kv = dict()
- where = self._build_where(kvfilter, kv)
- delete = DELETE % {'table': table, 'where': where}
- cursor.execute(delete, kv)
+ def _columns(self, columns=None):
+ cols = None
+ if columns is not None:
+ cols = []
+ for c in columns:
+ cols.append(self._table.columns[c])
+ else:
+ cols = self._table.columns
+ return cols
+
+ def rollback(self):
+ self._trans.rollback()
+
+ def commit(self):
+ self._trans.commit()
+
+ def create(self):
+ self._table.create(checkfirst=True)
+
+ def drop(self):
+ self._table.drop(checkfirst=True)
+
+ def select(self, kvfilter=None, columns=None):
+ return self._con.execute(select(self._columns(columns),
+ self._where(kvfilter)))
+
+ def insert(self, values):
+ self._con.execute(self._table.insert(values))
+
+ def update(self, values, kvfilter):
+ self._con.execute(self._table.update(self._where(kvfilter), values))
+
+ def delete(self, kvfilter):
+ self._con.execute(self._table.delete(self._where(kvfilter)))
+
+
+class FileStore(BaseStore):
+
+ def __init__(self, name):
+ self._filename = name
+ self.is_readonly = True
+ self._timestamp = None
+ self._config = None
+
+ def get_config(self):
+ try:
+ stat = os.stat(self._filename)
+ except OSError, e:
+ self.error("Unable to check config file %s: [%s]" % (
+ self._filename, e))
+ self._config = None
+ raise
+ timestamp = stat.st_mtime
+ if self._config is None or timestamp > self._timestamp:
+ self._config = ConfigParser.RawConfigParser()
+ self._config.optionxform = str
+ self._config.read(self._filename)
+ return self._config
+
+ def add_constraint(self, table):
+ raise NotImplementedError()
+
+ def add_index(self, index):
+ raise NotImplementedError()
+
+
+class FileQuery(Log):
+
+ def __init__(self, fstore, table, table_def, trans=True):
+ # We don't need indexes in a FileQuery, so drop that info
+ if isinstance(table_def, dict):
+ columns = table_def['columns']
+ else:
+ columns = table_def
+ self._fstore = fstore
+ self._config = fstore.get_config()
+ self._section = table
+ if len(columns) > 3 or columns[-1] != 'value':
+ raise ValueError('Unsupported configuration format')
+ self._columns = columns
+
+ def rollback(self):
+ return
+
+ def commit(self):
+ return
+
+ def create(self):
+ raise NotImplementedError
+
+ def drop(self):
+ raise NotImplementedError
+
+ def select(self, kvfilter=None, columns=None):
+ if self._section not in self._config.sections():
+ return []
+
+ opts = self._config.options(self._section)
+
+ prefix = None
+ prefix_ = ''
+ if self._columns[0] in kvfilter:
+ prefix = kvfilter[self._columns[0]]
+ prefix_ = prefix + ' '
+
+ name = None
+ if len(self._columns) == 3 and self._columns[1] in kvfilter:
+ name = kvfilter[self._columns[1]]
+
+ value = None
+ if self._columns[-1] in kvfilter:
+ value = kvfilter[self._columns[-1]]
+
+ res = []
+ for o in opts:
+ if len(self._columns) == 3:
+ # 3 cols
+ if prefix and not o.startswith(prefix_):
+ continue
+
+ col1, col2 = o.split(' ', 1)
+ if name and col2 != name:
+ continue
+
+ col3 = self._config.get(self._section, o)
+ if value and col3 != value:
+ continue
+
+ r = [col1, col2, col3]
+ else:
+ # 2 cols
+ if prefix and o != prefix:
+ continue
+ r = [o, self._config.get(self._section, o)]
+
+ if columns:
+ s = []
+ for c in columns:
+ s.append(r[self._columns.index(c)])
+ res.append(s)
+ else:
+ res.append(r)
+
+ self.debug('SELECT(%s, %s, %s) -> %s' % (self._section,
+ repr(kvfilter),
+ repr(columns),
+ repr(res)))
+ return res
+
+ def insert(self, values):
+ raise NotImplementedError
+
+ def update(self, values, kvfilter):
+ raise NotImplementedError
+
+ def delete(self, kvfilter):
+ raise NotImplementedError
+
+
+class Store(Log):
+ # Static, Store-level variables
+ _is_upgrade = False
+ __cleanups = {}
+
+ # Static, class-level variables
+ # Either set this to False, or implement _cleanup, in child classes
+ _should_cleanup = True
+
+ def __init__(self, config_name=None, database_url=None):
+ if config_name is None and database_url is None:
+ raise ValueError('config_name or database_url must be provided')
+ if config_name:
+ if config_name not in cherrypy.config:
+ raise NameError('Unknown database %s' % config_name)
+ name = cherrypy.config[config_name]
+ else:
+ name = database_url
+ if name.startswith('configfile://'):
+ _, filename = name.split('://')
+ self._db = FileStore(filename)
+ self._query = FileQuery
+ else:
+ self._db = SqlStore.get_connection(name)
+ self._query = SqlQuery
+
+ if not self._is_upgrade:
+ self._check_database()
+ if self._should_cleanup:
+ self._schedule_cleanup()
+
+ def _schedule_cleanup(self):
+ store_name = self.__class__.__name__
+ if self.is_readonly:
+ # No use in cleanups on a readonly database
+ self.debug('Not scheduling cleanup for %s due to readonly' %
+ store_name)
+ return
+ if store_name in Store.__cleanups:
+ # This class was already scheduled, skip
+ return
+ self.debug('Scheduling cleanups for %s' % store_name)
+ # Check once every minute whether we need to clean
+ task = cherrypy.process.plugins.BackgroundTask(
+ 60, self._maybe_run_cleanup)
+ task.start()
+ Store.__cleanups[store_name] = task
+
+ def _maybe_run_cleanup(self):
+ # Let's see if we need to do cleanup
+ last_clean = self.load_options('dbinfo').get('%s_last_clean' %
+ self.__class__.__name__,
+ {})
+ time_diff = cherrypy.config.get('cleanup_interval', 30) * 60
+ next_ts = int(time.time()) - time_diff
+ self.debug('Considering cleanup for %s: %s. Next at: %s'
+ % (self.__class__.__name__, last_clean, next_ts))
+ if ('timestamp' not in last_clean or
+ int(last_clean['timestamp']) <= next_ts):
+ # First store the current time so that other servers don't start
+ self.save_options('dbinfo', '%s_last_clean'
+ % self.__class__.__name__,
+ {'timestamp': int(time.time()),
+ 'removed_entries': -1})
+
+ # Cleanup has been long enough ago, let's run
+ self.debug('Cleaning up for %s' % self.__class__.__name__)
+ removed_entries = self._cleanup()
+ self.debug('Cleaned up %i entries for %s' %
+ (removed_entries, self.__class__.__name__))
+ self.save_options('dbinfo', '%s_last_clean'
+ % self.__class__.__name__,
+ {'timestamp': int(time.time()),
+ 'removed_entries': removed_entries})
+
+ def _cleanup(self):
+ # The default cleanup is to do nothing
+ # This function should return the number of rows it cleaned up.
+ # This information may be used to automatically tune the clean period.
+ self.error('Cleanup for %s not implemented' %
+ self.__class__.__name__)
+ return 0
+
+ def _code_schema_version(self):
+ # This function makes it possible for separate plugins to have
+ # different schema versions. We default to the global schema
+ # version.
+ return CURRENT_SCHEMA_VERSION
+
+ def _get_schema_version(self):
+ # We are storing multiple versions: one per class
+ # That way, we can support plugins with differing schema versions from
+ # the main codebase, and even in the same database.
+ q = self._query(self._db, 'dbinfo', OPTIONS_TABLE, trans=False)
+ q.create()
+ q._con.close() # pylint: disable=protected-access
+ cls_name = self.__class__.__name__
+ current_version = self.load_options('dbinfo').get('%s_schema'
+ % cls_name, {})
+ if 'version' in current_version:
+ return int(current_version['version'])
+ else:
+ # Also try the old table name.
+ # "scheme" was a typo, but we need to retain that now for compat
+ fallback_version = self.load_options('dbinfo').get('scheme',
+ {})
+ if 'version' in fallback_version:
+ # Explanation for this is in def upgrade_database(self)
+ return -1
+ else:
+ return None
+
+ def _check_database(self):
+ if self.is_readonly:
+ # If the database is readonly, we cannot do anything to the
+ # schema. Let's just return, and assume people checked the
+ # upgrade notes
+ return
+
+ current_version = self._get_schema_version()
+ if current_version is None:
+ self.error('Database initialization required! ' +
+ 'Please run ipsilon-upgrade-database')
+ raise DatabaseError('Database initialization required for %s' %
+ self.__class__.__name__)
+ if current_version != self._code_schema_version():
+ self.error('Database upgrade required! ' +
+ 'Please run ipsilon-upgrade-database')
+ raise DatabaseError('Database upgrade required for %s' %
+ self.__class__.__name__)
+
+ def _store_new_schema_version(self, new_version):
+ cls_name = self.__class__.__name__
+ self.save_options('dbinfo', '%s_schema' % cls_name,
+ {'version': new_version})
+
+ def _initialize_schema(self):
+ raise NotImplementedError()
+
+ def _upgrade_schema(self, old_version):
+ # Datastores need to figure out what to do with bigger old_versions
+ # themselves.
+ # They might implement downgrading if that's feasible, or just throw
+ # NotImplementedError
+ # Should return the new schema version
+ raise NotImplementedError()
+
+ def upgrade_database(self):
+ # Do whatever is needed to get schema to current version
+ old_schema_version = self._get_schema_version()
+ if old_schema_version is None:
+ # Just initialize a new schema
+ self._initialize_schema()
+ self._store_new_schema_version(self._code_schema_version())
+ elif old_schema_version == -1:
+ # This is a special-case from 1.0: we only created tables at the
+ # first time they were actually used, but the upgrade code assumes
+ # that the tables exist. So let's fix this.
+ self._initialize_schema()
+ # The old version was schema version 1
+ self._store_new_schema_version(1)
+ self.upgrade_database()
+ elif old_schema_version != self._code_schema_version():
+ # Upgrade from old_schema_version to code_schema_version
+ self.debug('Upgrading from schema version %i' % old_schema_version)
+ new_version = self._upgrade_schema(old_schema_version)
+ if not new_version:
+ error = ('Schema upgrade error: %s did not provide a ' +
+ 'new schema version number!' %
+ self.__class__.__name__)
+ self.error(error)
+ raise Exception(error)
+ self._store_new_schema_version(new_version)
+ # Check if we are now up-to-date
+ self.upgrade_database()
+
+ @property
+ def is_readonly(self):
+ return self._db.is_readonly