1 # Copyright (C) 2013 Ipsilon project Contributors, for license see COPYING
5 from ipsilon.util.log import Log
6 from sqlalchemy import create_engine
7 from sqlalchemy import MetaData, Table, Column, Text
8 from sqlalchemy.pool import QueuePool, SingletonThreadPool
9 from sqlalchemy.schema import (PrimaryKeyConstraint, Index, AddConstraint,
11 from sqlalchemy.sql import select, and_
19 CURRENT_SCHEMA_VERSION = 2
20 OPTIONS_TABLE = {'columns': ['name', 'option', 'value'],
21 'primary_key': ('name', 'option'),
22 'indexes': [('name',)]
24 UNIQUE_DATA_TABLE = {'columns': ['uuid', 'name', 'value'],
25 'primary_key': ('uuid', 'name'),
26 'indexes': [('uuid',)]
30 class DatabaseError(Exception):
35 # Some helper functions used for upgrades
36 def add_constraint(self, table):
37 raise NotImplementedError()
39 def add_index(self, index):
40 raise NotImplementedError()
43 class SqlStore(BaseStore):
47 def get_connection(cls, name):
48 if name not in cls.__instances:
49 if cherrypy.config.get('db.conn.log', False):
50 logging.debug('SqlStore new: %s', name)
51 cls.__instances[name] = SqlStore(name)
52 return cls.__instances[name]
54 def __init__(self, name):
55 self.db_conn_log = cherrypy.config.get('db.conn.log', False)
56 self.debug('SqlStore init: %s' % name)
59 if '://' not in engine_name:
60 engine_name = 'sqlite:///' + engine_name
61 # This pool size is per configured database. The minimum needed,
62 # determined by binary search, is 23. We're using 25 so we have a bit
63 # more playroom, and then the overflow should make sure things don't
64 # break when we suddenly need more.
65 pool_args = {'poolclass': QueuePool,
68 if engine_name.startswith('sqlite://'):
69 # It's not possible to share connections for SQLite between
70 # threads, so let's use the SingletonThreadPool for them
71 pool_args = {'poolclass': SingletonThreadPool}
72 self._dbengine = create_engine(engine_name, **pool_args)
73 self.is_readonly = False
75 def add_constraint(self, constraint):
76 if self._dbengine.dialect.name != 'sqlite':
77 # It is impossible to add constraints to a pre-existing table for
79 # source: http://www.sqlite.org/omitted.html
80 create_constraint = AddConstraint(constraint, bind=self._dbengine)
81 create_constraint.execute()
83 def add_index(self, index):
84 add_index = CreateIndex(index, bind=self._dbengine)
87 def debug(self, fact):
89 super(SqlStore, self).debug(fact)
95 self.debug('SqlStore connect: %s' % self.name)
96 conn = self._dbengine.connect()
98 def cleanup_connection():
99 self.debug('SqlStore cleanup: %s' % self.name)
101 cherrypy.request.hooks.attach('on_end_request', cleanup_connection)
107 def __init__(self, db_obj, table, table_def, trans=True):
109 self._con = self._db.connection()
110 self._trans = self._con.begin() if trans else None
111 self._table = self._get_table(table, table_def)
113 def _get_table(self, name, table_def):
114 if isinstance(table_def, list):
115 table_def = {'columns': table_def,
119 for col_name in table_def['columns']:
120 table_creation.append(Column(col_name, Text()))
121 if table_def['primary_key']:
122 table_creation.append(PrimaryKeyConstraint(
123 *table_def['primary_key']))
124 for index in table_def['indexes']:
125 idx_name = 'idx_%s_%s' % (name, '_'.join(index))
126 table_creation.append(Index(idx_name, *index))
127 table = Table(name, MetaData(self._db.engine()), *table_creation)
130 def _where(self, kvfilter):
132 if kvfilter is not None:
134 w = self._table.columns[k] == kvfilter[k]
141 def _columns(self, columns=None):
143 if columns is not None:
146 cols.append(self._table.columns[c])
148 cols = self._table.columns
152 self._trans.rollback()
158 self._table.create(checkfirst=True)
161 self._table.drop(checkfirst=True)
163 def select(self, kvfilter=None, columns=None):
164 return self._con.execute(select(self._columns(columns),
165 self._where(kvfilter)))
167 def insert(self, values):
168 self._con.execute(self._table.insert(values))
170 def update(self, values, kvfilter):
171 self._con.execute(self._table.update(self._where(kvfilter), values))
173 def delete(self, kvfilter):
174 self._con.execute(self._table.delete(self._where(kvfilter)))
177 class FileStore(BaseStore):
179 def __init__(self, name):
180 self._filename = name
181 self.is_readonly = True
182 self._timestamp = None
185 def get_config(self):
187 stat = os.stat(self._filename)
189 self.error("Unable to check config file %s: [%s]" % (
193 timestamp = stat.st_mtime
194 if self._config is None or timestamp > self._timestamp:
195 self._config = ConfigParser.RawConfigParser()
196 self._config.optionxform = str
197 self._config.read(self._filename)
200 def add_constraint(self, table):
201 raise NotImplementedError()
203 def add_index(self, index):
204 raise NotImplementedError()
207 class FileQuery(Log):
209 def __init__(self, fstore, table, table_def, trans=True):
210 # We don't need indexes in a FileQuery, so drop that info
211 if isinstance(table_def, dict):
212 columns = table_def['columns']
215 self._fstore = fstore
216 self._config = fstore.get_config()
217 self._section = table
218 if len(columns) > 3 or columns[-1] != 'value':
219 raise ValueError('Unsupported configuration format')
220 self._columns = columns
229 raise NotImplementedError
232 raise NotImplementedError
234 def select(self, kvfilter=None, columns=None):
235 if self._section not in self._config.sections():
238 opts = self._config.options(self._section)
242 if self._columns[0] in kvfilter:
243 prefix = kvfilter[self._columns[0]]
244 prefix_ = prefix + ' '
247 if len(self._columns) == 3 and self._columns[1] in kvfilter:
248 name = kvfilter[self._columns[1]]
251 if self._columns[-1] in kvfilter:
252 value = kvfilter[self._columns[-1]]
256 if len(self._columns) == 3:
258 if prefix and not o.startswith(prefix_):
261 col1, col2 = o.split(' ', 1)
262 if name and col2 != name:
265 col3 = self._config.get(self._section, o)
266 if value and col3 != value:
269 r = [col1, col2, col3]
272 if prefix and o != prefix:
274 r = [o, self._config.get(self._section, o)]
279 s.append(r[self._columns.index(c)])
284 self.debug('SELECT(%s, %s, %s) -> %s' % (self._section,
290 def insert(self, values):
291 raise NotImplementedError
293 def update(self, values, kvfilter):
294 raise NotImplementedError
296 def delete(self, kvfilter):
297 raise NotImplementedError
301 # Static, Store-level variables
305 # Static, class-level variables
306 # Either set this to False, or implement _cleanup, in child classes
307 _should_cleanup = True
309 def __init__(self, config_name=None, database_url=None):
310 if config_name is None and database_url is None:
311 raise ValueError('config_name or database_url must be provided')
313 if config_name not in cherrypy.config:
314 raise NameError('Unknown database %s' % config_name)
315 name = cherrypy.config[config_name]
318 if name.startswith('configfile://'):
319 _, filename = name.split('://')
320 self._db = FileStore(filename)
321 self._query = FileQuery
323 self._db = SqlStore.get_connection(name)
324 self._query = SqlQuery
326 if not self._is_upgrade:
327 self._check_database()
328 if self._should_cleanup:
329 self._schedule_cleanup()
331 def _schedule_cleanup(self):
332 store_name = self.__class__.__name__
334 # No use in cleanups on a readonly database
335 self.debug('Not scheduling cleanup for %s due to readonly' %
338 if store_name in Store.__cleanups:
339 # This class was already scheduled, skip
341 self.debug('Scheduling cleanups for %s' % store_name)
342 # Check once every minute whether we need to clean
343 task = cherrypy.process.plugins.BackgroundTask(
344 60, self._maybe_run_cleanup)
346 Store.__cleanups[store_name] = task
348 def _maybe_run_cleanup(self):
349 # Let's see if we need to do cleanup
350 last_clean = self.load_options('dbinfo').get('%s_last_clean' %
351 self.__class__.__name__,
353 time_diff = cherrypy.config.get('cleanup_interval', 30) * 60
354 next_ts = int(time.time()) - time_diff
355 self.debug('Considering cleanup for %s: %s. Next at: %s'
356 % (self.__class__.__name__, last_clean, next_ts))
357 if ('timestamp' not in last_clean or
358 int(last_clean['timestamp']) <= next_ts):
359 # First store the current time so that other servers don't start
360 self.save_options('dbinfo', '%s_last_clean'
361 % self.__class__.__name__,
362 {'timestamp': int(time.time()),
363 'removed_entries': -1})
365 # Cleanup has been long enough ago, let's run
366 self.debug('Cleaning up for %s' % self.__class__.__name__)
367 removed_entries = self._cleanup()
368 self.debug('Cleaned up %i entries for %s' %
369 (removed_entries, self.__class__.__name__))
370 self.save_options('dbinfo', '%s_last_clean'
371 % self.__class__.__name__,
372 {'timestamp': int(time.time()),
373 'removed_entries': removed_entries})
376 # The default cleanup is to do nothing
377 # This function should return the number of rows it cleaned up.
378 # This information may be used to automatically tune the clean period.
379 self.error('Cleanup for %s not implemented' %
380 self.__class__.__name__)
383 def _code_schema_version(self):
384 # This function makes it possible for separate plugins to have
385 # different schema versions. We default to the global schema
387 return CURRENT_SCHEMA_VERSION
389 def _get_schema_version(self):
390 # We are storing multiple versions: one per class
391 # That way, we can support plugins with differing schema versions from
392 # the main codebase, and even in the same database.
393 q = self._query(self._db, 'dbinfo', OPTIONS_TABLE, trans=False)
395 q._con.close() # pylint: disable=protected-access
396 cls_name = self.__class__.__name__
397 current_version = self.load_options('dbinfo').get('%s_schema'
399 if 'version' in current_version:
400 return int(current_version['version'])
402 # Also try the old table name.
403 # "scheme" was a typo, but we need to retain that now for compat
404 fallback_version = self.load_options('dbinfo').get('scheme',
406 if 'version' in fallback_version:
407 # Explanation for this is in def upgrade_database(self)
412 def _check_database(self):
414 # If the database is readonly, we cannot do anything to the
415 # schema. Let's just return, and assume people checked the
419 current_version = self._get_schema_version()
420 if current_version is None:
421 self.error('Database initialization required! ' +
422 'Please run ipsilon-upgrade-database')
423 raise DatabaseError('Database initialization required for %s' %
424 self.__class__.__name__)
425 if current_version != self._code_schema_version():
426 self.error('Database upgrade required! ' +
427 'Please run ipsilon-upgrade-database')
428 raise DatabaseError('Database upgrade required for %s' %
429 self.__class__.__name__)
431 def _store_new_schema_version(self, new_version):
432 cls_name = self.__class__.__name__
433 self.save_options('dbinfo', '%s_schema' % cls_name,
434 {'version': new_version})
436 def _initialize_schema(self):
437 raise NotImplementedError()
439 def _upgrade_schema(self, old_version):
440 # Datastores need to figure out what to do with bigger old_versions
442 # They might implement downgrading if that's feasible, or just throw
443 # NotImplementedError
444 # Should return the new schema version
445 raise NotImplementedError()
447 def upgrade_database(self):
448 # Do whatever is needed to get schema to current version
449 old_schema_version = self._get_schema_version()
450 if old_schema_version is None:
451 # Just initialize a new schema
452 self._initialize_schema()
453 self._store_new_schema_version(self._code_schema_version())
454 elif old_schema_version == -1:
455 # This is a special-case from 1.0: we only created tables at the
456 # first time they were actually used, but the upgrade code assumes
457 # that the tables exist. So let's fix this.
458 self._initialize_schema()
459 # The old version was schema version 1
460 self._store_new_schema_version(1)
461 self.upgrade_database()
462 elif old_schema_version != self._code_schema_version():
463 # Upgrade from old_schema_version to code_schema_version
464 self.debug('Upgrading from schema version %i' % old_schema_version)
465 new_version = self._upgrade_schema(old_schema_version)
467 error = ('Schema upgrade error: %s did not provide a ' +
468 'new schema version number!' %
469 self.__class__.__name__)
471 raise Exception(error)
472 self._store_new_schema_version(new_version)
473 # Check if we are now up-to-date
474 self.upgrade_database()
477 def is_readonly(self):
478 return self._db.is_readonly
480 def _row_to_dict_tree(self, data, row):
486 self._row_to_dict_tree(d2, row[1:])
490 if data[name] is list:
491 data[name].append(value)
494 data[name] = [v, value]
498 def _rows_to_dict_tree(self, rows):
501 self._row_to_dict_tree(data, r)
504 def _load_data(self, table, columns, kvfilter=None):
507 q = self._query(self._db, table, columns, trans=False)
508 rows = q.select(kvfilter)
509 except Exception, e: # pylint: disable=broad-except
510 self.error("Failed to load data for table %s: [%s]" % (table, e))
511 return self._rows_to_dict_tree(rows)
513 def load_config(self):
515 columns = ['name', 'value']
516 return self._load_data(table, columns)
518 def load_options(self, table, name=None):
521 kvfilter['name'] = name
522 options = self._load_data(table, OPTIONS_TABLE, kvfilter)
523 if name and name in options:
527 def save_options(self, table, name, options):
531 q = self._query(self._db, table, OPTIONS_TABLE)
532 rows = q.select({'name': name}, ['option', 'value'])
534 curvals[row[0]] = row[1]
538 q.update({'value': options[opt]},
539 {'name': name, 'option': opt})
541 q.insert((name, opt, options[opt]))
544 except Exception, e: # pylint: disable=broad-except
547 self.error("Failed to save options: [%s]" % e)
550 def delete_options(self, table, name, options=None):
551 kvfilter = {'name': name}
554 q = self._query(self._db, table, OPTIONS_TABLE)
559 kvfilter['option'] = opt
562 except Exception, e: # pylint: disable=broad-except
565 self.error("Failed to delete from %s: [%s]" % (table, e))
568 def new_unique_data(self, table, data):
569 newid = str(uuid.uuid4())
572 q = self._query(self._db, table, UNIQUE_DATA_TABLE)
574 q.insert((newid, name, data[name]))
576 except Exception, e: # pylint: disable=broad-except
579 self.error("Failed to store %s data: [%s]" % (table, e))
583 def get_unique_data(self, table, uuidval=None, name=None, value=None):
586 kvfilter['uuid'] = uuidval
588 kvfilter['name'] = name
590 kvfilter['value'] = value
591 return self._load_data(table, UNIQUE_DATA_TABLE, kvfilter)
593 def save_unique_data(self, table, data):
596 q = self._query(self._db, table, UNIQUE_DATA_TABLE)
599 rows = q.select({'uuid': uid}, ['name', 'value'])
606 if datum[name] is None:
607 q.delete({'uuid': uid, 'name': name})
609 q.update({'value': datum[name]},
610 {'uuid': uid, 'name': name})
612 if datum[name] is not None:
613 q.insert((uid, name, datum[name]))
616 except Exception, e: # pylint: disable=broad-except
619 self.error("Failed to store data in %s: [%s]" % (table, e))
622 def del_unique_data(self, table, uuidval):
623 kvfilter = {'uuid': uuidval}
625 q = self._query(self._db, table, UNIQUE_DATA_TABLE, trans=False)
627 except Exception, e: # pylint: disable=broad-except
628 self.error("Failed to delete data from %s: [%s]" % (table, e))
630 def _reset_data(self, table):
633 q = self._query(self._db, table, UNIQUE_DATA_TABLE)
637 except Exception, e: # pylint: disable=broad-except
640 self.error("Failed to erase all data from %s: [%s]" % (table, e))
643 class AdminStore(Store):
644 _should_cleanup = False
647 super(AdminStore, self).__init__('admin.config.db')
649 def get_data(self, plugin, idval=None, name=None, value=None):
650 return self.get_unique_data(plugin+"_data", idval, name, value)
652 def save_data(self, plugin, data):
653 return self.save_unique_data(plugin+"_data", data)
655 def new_datum(self, plugin, datum):
656 table = plugin+"_data"
657 return self.new_unique_data(table, datum)
659 def del_datum(self, plugin, idval):
660 table = plugin+"_data"
661 return self.del_unique_data(table, idval)
663 def wipe_data(self, plugin):
664 table = plugin+"_data"
665 self._reset_data(table)
667 def _initialize_schema(self):
668 for table in ['config',
672 q = self._query(self._db, table, OPTIONS_TABLE, trans=False)
674 q._con.close() # pylint: disable=protected-access
676 def _upgrade_schema(self, old_version):
678 # In schema version 2, we added indexes and primary keys
679 for table in ['config',
683 # pylint: disable=protected-access
684 table = self._query(self._db, table, OPTIONS_TABLE,
686 self._db.add_constraint(table.primary_key)
687 for index in table.indexes:
688 self._db.add_index(index)
691 raise NotImplementedError()
693 def create_plugin_data_table(self, plugin_name):
694 if not self.is_readonly:
695 table = plugin_name+'_data'
696 q = self._query(self._db, table, UNIQUE_DATA_TABLE,
699 q._con.close() # pylint: disable=protected-access
702 class UserStore(Store):
703 _should_cleanup = False
705 def __init__(self, path=None):
706 super(UserStore, self).__init__('user.prefs.db')
708 def save_user_preferences(self, user, options):
709 self.save_options('users', user, options)
711 def load_user_preferences(self, user):
712 return self.load_options('users', user)
714 def save_plugin_data(self, plugin, user, options):
715 self.save_options(plugin+"_data", user, options)
717 def load_plugin_data(self, plugin, user):
718 return self.load_options(plugin+"_data", user)
720 def _initialize_schema(self):
721 q = self._query(self._db, 'users', OPTIONS_TABLE, trans=False)
723 q._con.close() # pylint: disable=protected-access
725 def _upgrade_schema(self, old_version):
727 # In schema version 2, we added indexes and primary keys
728 # pylint: disable=protected-access
729 table = self._query(self._db, 'users', OPTIONS_TABLE,
731 self._db.add_constraint(table.primary_key)
732 for index in table.indexes:
733 self._db.add_index(index)
736 raise NotImplementedError()
739 class TranStore(Store):
741 def __init__(self, path=None):
742 super(TranStore, self).__init__('transactions.db')
744 def _initialize_schema(self):
745 q = self._query(self._db, 'transactions', UNIQUE_DATA_TABLE,
748 q._con.close() # pylint: disable=protected-access
750 def _upgrade_schema(self, old_version):
752 # In schema version 2, we added indexes and primary keys
753 # pylint: disable=protected-access
754 table = self._query(self._db, 'transactions', UNIQUE_DATA_TABLE,
756 self._db.add_constraint(table.primary_key)
757 for index in table.indexes:
758 self._db.add_index(index)
761 raise NotImplementedError()
764 class SAML2SessionStore(Store):
766 def __init__(self, database_url):
767 super(SAML2SessionStore, self).__init__(database_url=database_url)
768 self.table = 'saml2_sessions'
769 # pylint: disable=protected-access
770 table = SqlQuery(self._db, self.table, UNIQUE_DATA_TABLE)._table
771 table.create(checkfirst=True)
773 def _get_unique_id_from_column(self, name, value):
775 The query is going to return only the column in the query.
776 Use this method to get the uuidval which can be used to fetch
779 Returns None or the uuid of the first value found.
781 data = self.get_unique_data(self.table, name=name, value=value)
786 raise ValueError("Multiple entries returned")
787 return data.keys()[0]
790 # pylint: disable=protected-access
791 table = SqlQuery(self._db, self.table, UNIQUE_DATA_TABLE)._table
792 sel = select([table.columns.uuid]). \
793 where(and_(table.c.name == 'expiration_time',
794 table.c.value <= datetime.datetime.now()))
795 # pylint: disable=no-value-for-parameter
796 d = table.delete().where(table.c.uuid.in_(sel))
797 return d.execute().rowcount
799 def get_data(self, idval=None, name=None, value=None):
800 return self.get_unique_data(self.table, idval, name, value)
802 def new_session(self, datum):
803 if 'supported_logout_mechs' in datum:
804 datum['supported_logout_mechs'] = ','.join(
805 datum['supported_logout_mechs']
807 return self.new_unique_data(self.table, datum)
809 def get_session(self, session_id=None, request_id=None):
811 uuidval = self._get_unique_id_from_column('session_id', session_id)
813 uuidval = self._get_unique_id_from_column('request_id', request_id)
815 raise ValueError("Unable to find session")
818 data = self.get_unique_data(self.table, uuidval=uuidval)
819 return uuidval, data[uuidval]
821 def get_user_sessions(self, user):
823 Return a list of all sessions for a given user.
825 rows = self.get_unique_data(self.table, name='user', value=user)
827 # We have a list of sessions for this user, now get the details
830 data = self.get_unique_data(self.table, uuidval=r)
831 data[r]['supported_logout_mechs'] = data[r].get(
832 'supported_logout_mechs', '').split(',')
833 logged_in.append(data)
837 def update_session(self, datum):
838 self.save_unique_data(self.table, datum)
840 def remove_session(self, uuidval):
841 self.del_unique_data(self.table, uuidval)
844 self._reset_data(self.table)
846 def _initialize_schema(self):
847 q = self._query(self._db, self.table, UNIQUE_DATA_TABLE,
850 q._con.close() # pylint: disable=protected-access
852 def _upgrade_schema(self, old_version):
854 # In schema version 2, we added indexes and primary keys
855 # pylint: disable=protected-access
856 table = self._query(self._db, self.table, UNIQUE_DATA_TABLE,
858 self._db.add_constraint(table.primary_key)
859 for index in table.indexes:
860 self._db.add_index(index)
863 raise NotImplementedError()