1 # Copyright (C) 2013 Ipsilon project Contributors, for license see COPYING
5 from ipsilon.util.log import Log
6 from sqlalchemy import create_engine
7 from sqlalchemy import MetaData, Table, Column, Text
8 from sqlalchemy.pool import QueuePool, SingletonThreadPool
9 from sqlalchemy.schema import (PrimaryKeyConstraint, Index, AddConstraint,
11 from sqlalchemy.sql import select, and_
19 CURRENT_SCHEMA_VERSION = 2
20 OPTIONS_TABLE = {'columns': ['name', 'option', 'value'],
21 'primary_key': ('name', 'option'),
22 'indexes': [('name',)]
24 UNIQUE_DATA_TABLE = {'columns': ['uuid', 'name', 'value'],
25 'primary_key': ('uuid', 'name'),
26 'indexes': [('uuid',)]
30 class DatabaseError(Exception):
35 # Some helper functions used for upgrades
36 def add_constraint(self, table):
37 raise NotImplementedError()
39 def add_index(self, index):
40 raise NotImplementedError()
43 class SqlStore(BaseStore):
47 def get_connection(cls, name):
48 if name not in cls.__instances:
49 if cherrypy.config.get('db.conn.log', False):
50 logging.debug('SqlStore new: %s', name)
51 cls.__instances[name] = SqlStore(name)
52 return cls.__instances[name]
54 def __init__(self, name):
55 self.db_conn_log = cherrypy.config.get('db.conn.log', False)
56 self.debug('SqlStore init: %s' % name)
59 if '://' not in engine_name:
60 engine_name = 'sqlite:///' + engine_name
61 # This pool size is per configured database. The minimum needed,
62 # determined by binary search, is 23. We're using 25 so we have a bit
63 # more playroom, and then the overflow should make sure things don't
64 # break when we suddenly need more.
65 pool_args = {'poolclass': QueuePool,
68 if engine_name.startswith('sqlite://'):
69 # It's not possible to share connections for SQLite between
70 # threads, so let's use the SingletonThreadPool for them
71 pool_args = {'poolclass': SingletonThreadPool}
72 self._dbengine = create_engine(engine_name,
73 echo=cherrypy.config.get('db.echo',
76 self.is_readonly = False
78 def add_constraint(self, constraint):
79 if self._dbengine.dialect.name != 'sqlite':
80 # It is impossible to add constraints to a pre-existing table for
82 # source: http://www.sqlite.org/omitted.html
83 create_constraint = AddConstraint(constraint, bind=self._dbengine)
84 create_constraint.execute()
86 def add_index(self, index):
87 add_index = CreateIndex(index, bind=self._dbengine)
90 def debug(self, fact):
92 super(SqlStore, self).debug(fact)
98 self.debug('SqlStore connect: %s' % self.name)
99 conn = self._dbengine.connect()
101 def cleanup_connection():
102 self.debug('SqlStore cleanup: %s' % self.name)
104 cherrypy.request.hooks.attach('on_end_request', cleanup_connection)
110 def __init__(self, db_obj, table, table_def, trans=True):
112 self._con = self._db.connection()
113 self._trans = self._con.begin() if trans else None
114 self._table = self._get_table(table, table_def)
116 def _get_table(self, name, table_def):
117 if isinstance(table_def, list):
118 table_def = {'columns': table_def,
122 for col_name in table_def['columns']:
123 table_creation.append(Column(col_name, Text()))
124 if table_def['primary_key']:
125 table_creation.append(PrimaryKeyConstraint(
126 *table_def['primary_key']))
127 for index in table_def['indexes']:
128 idx_name = 'idx_%s_%s' % (name, '_'.join(index))
129 table_creation.append(Index(idx_name, *index))
130 table = Table(name, MetaData(self._db.engine()), *table_creation)
133 def _where(self, kvfilter):
135 if kvfilter is not None:
137 w = self._table.columns[k] == kvfilter[k]
144 def _columns(self, columns=None):
146 if columns is not None:
149 cols.append(self._table.columns[c])
151 cols = self._table.columns
155 self._trans.rollback()
161 self._table.create(checkfirst=True)
164 self._table.drop(checkfirst=True)
166 def select(self, kvfilter=None, columns=None):
167 return self._con.execute(select(self._columns(columns),
168 self._where(kvfilter)))
170 def insert(self, values):
171 self._con.execute(self._table.insert(values))
173 def update(self, values, kvfilter):
174 self._con.execute(self._table.update(self._where(kvfilter), values))
176 def delete(self, kvfilter):
177 self._con.execute(self._table.delete(self._where(kvfilter)))
180 class FileStore(BaseStore):
182 def __init__(self, name):
183 self._filename = name
184 self.is_readonly = True
185 self._timestamp = None
188 def get_config(self):
190 stat = os.stat(self._filename)
192 self.error("Unable to check config file %s: [%s]" % (
196 timestamp = stat.st_mtime
197 if self._config is None or timestamp > self._timestamp:
198 self._config = ConfigParser.RawConfigParser()
199 self._config.optionxform = str
200 self._config.read(self._filename)
203 def add_constraint(self, table):
204 raise NotImplementedError()
206 def add_index(self, index):
207 raise NotImplementedError()
210 class FileQuery(Log):
212 def __init__(self, fstore, table, table_def, trans=True):
213 # We don't need indexes in a FileQuery, so drop that info
214 if isinstance(table_def, dict):
215 columns = table_def['columns']
218 self._fstore = fstore
219 self._config = fstore.get_config()
220 self._section = table
221 if len(columns) > 3 or columns[-1] != 'value':
222 raise ValueError('Unsupported configuration format')
223 self._columns = columns
232 raise NotImplementedError
235 raise NotImplementedError
237 def select(self, kvfilter=None, columns=None):
238 if self._section not in self._config.sections():
241 opts = self._config.options(self._section)
245 if self._columns[0] in kvfilter:
246 prefix = kvfilter[self._columns[0]]
247 prefix_ = prefix + ' '
250 if len(self._columns) == 3 and self._columns[1] in kvfilter:
251 name = kvfilter[self._columns[1]]
254 if self._columns[-1] in kvfilter:
255 value = kvfilter[self._columns[-1]]
259 if len(self._columns) == 3:
261 if prefix and not o.startswith(prefix_):
264 col1, col2 = o.split(' ', 1)
265 if name and col2 != name:
268 col3 = self._config.get(self._section, o)
269 if value and col3 != value:
272 r = [col1, col2, col3]
275 if prefix and o != prefix:
277 r = [o, self._config.get(self._section, o)]
282 s.append(r[self._columns.index(c)])
287 self.debug('SELECT(%s, %s, %s) -> %s' % (self._section,
293 def insert(self, values):
294 raise NotImplementedError
296 def update(self, values, kvfilter):
297 raise NotImplementedError
299 def delete(self, kvfilter):
300 raise NotImplementedError
304 # Static, Store-level variables
308 # Static, class-level variables
309 # Either set this to False, or implement _cleanup, in child classes
310 _should_cleanup = True
312 def __init__(self, config_name=None, database_url=None):
313 if config_name is None and database_url is None:
314 raise ValueError('config_name or database_url must be provided')
316 if config_name not in cherrypy.config:
317 raise NameError('Unknown database %s' % config_name)
318 name = cherrypy.config[config_name]
321 if name.startswith('configfile://'):
322 _, filename = name.split('://')
323 self._db = FileStore(filename)
324 self._query = FileQuery
326 self._db = SqlStore.get_connection(name)
327 self._query = SqlQuery
329 if not self._is_upgrade:
330 self._check_database()
331 if self._should_cleanup:
332 self._schedule_cleanup()
334 def _schedule_cleanup(self):
335 store_name = self.__class__.__name__
337 # No use in cleanups on a readonly database
338 self.debug('Not scheduling cleanup for %s due to readonly' %
341 if store_name in Store.__cleanups:
342 # This class was already scheduled, skip
344 self.debug('Scheduling cleanups for %s' % store_name)
345 # Check once every minute whether we need to clean
346 task = cherrypy.process.plugins.BackgroundTask(
347 60, self._maybe_run_cleanup)
349 Store.__cleanups[store_name] = task
351 def _maybe_run_cleanup(self):
352 # Let's see if we need to do cleanup
353 last_clean = self.load_options('dbinfo').get('%s_last_clean' %
354 self.__class__.__name__,
356 time_diff = cherrypy.config.get('cleanup_interval', 30) * 60
357 next_ts = int(time.time()) - time_diff
358 self.debug('Considering cleanup for %s: %s. Next at: %s'
359 % (self.__class__.__name__, last_clean, next_ts))
360 if ('timestamp' not in last_clean or
361 int(last_clean['timestamp']) <= next_ts):
362 # First store the current time so that other servers don't start
363 self.save_options('dbinfo', '%s_last_clean'
364 % self.__class__.__name__,
365 {'timestamp': int(time.time()),
366 'removed_entries': -1})
368 # Cleanup has been long enough ago, let's run
369 self.debug('Cleaning up for %s' % self.__class__.__name__)
370 removed_entries = self._cleanup()
371 self.debug('Cleaned up %i entries for %s' %
372 (removed_entries, self.__class__.__name__))
373 self.save_options('dbinfo', '%s_last_clean'
374 % self.__class__.__name__,
375 {'timestamp': int(time.time()),
376 'removed_entries': removed_entries})
379 # The default cleanup is to do nothing
380 # This function should return the number of rows it cleaned up.
381 # This information may be used to automatically tune the clean period.
382 self.error('Cleanup for %s not implemented' %
383 self.__class__.__name__)
386 def _code_schema_version(self):
387 # This function makes it possible for separate plugins to have
388 # different schema versions. We default to the global schema
390 return CURRENT_SCHEMA_VERSION
392 def _get_schema_version(self):
393 # We are storing multiple versions: one per class
394 # That way, we can support plugins with differing schema versions from
395 # the main codebase, and even in the same database.
396 q = self._query(self._db, 'dbinfo', OPTIONS_TABLE, trans=False)
398 q._con.close() # pylint: disable=protected-access
399 cls_name = self.__class__.__name__
400 current_version = self.load_options('dbinfo').get('%s_schema'
402 if 'version' in current_version:
403 return int(current_version['version'])
405 # Also try the old table name.
406 # "scheme" was a typo, but we need to retain that now for compat
407 fallback_version = self.load_options('dbinfo').get('scheme',
409 if 'version' in fallback_version:
410 # Explanation for this is in def upgrade_database(self)
415 def _check_database(self):
417 # If the database is readonly, we cannot do anything to the
418 # schema. Let's just return, and assume people checked the
422 current_version = self._get_schema_version()
423 if current_version is None:
424 self.error('Database initialization required! ' +
425 'Please run ipsilon-upgrade-database')
426 raise DatabaseError('Database initialization required for %s' %
427 self.__class__.__name__)
428 if current_version != self._code_schema_version():
429 self.error('Database upgrade required! ' +
430 'Please run ipsilon-upgrade-database')
431 raise DatabaseError('Database upgrade required for %s' %
432 self.__class__.__name__)
434 def _store_new_schema_version(self, new_version):
435 cls_name = self.__class__.__name__
436 self.save_options('dbinfo', '%s_schema' % cls_name,
437 {'version': new_version})
439 def _initialize_schema(self):
440 raise NotImplementedError()
442 def _upgrade_schema(self, old_version):
443 # Datastores need to figure out what to do with bigger old_versions
445 # They might implement downgrading if that's feasible, or just throw
446 # NotImplementedError
447 # Should return the new schema version
448 raise NotImplementedError()
450 def upgrade_database(self):
451 # Do whatever is needed to get schema to current version
452 old_schema_version = self._get_schema_version()
453 if old_schema_version is None:
454 # Just initialize a new schema
455 self._initialize_schema()
456 self._store_new_schema_version(self._code_schema_version())
457 elif old_schema_version == -1:
458 # This is a special-case from 1.0: we only created tables at the
459 # first time they were actually used, but the upgrade code assumes
460 # that the tables exist. So let's fix this.
461 self._initialize_schema()
462 # The old version was schema version 1
463 self._store_new_schema_version(1)
464 self.upgrade_database()
465 elif old_schema_version != self._code_schema_version():
466 # Upgrade from old_schema_version to code_schema_version
467 self.debug('Upgrading from schema version %i' % old_schema_version)
468 new_version = self._upgrade_schema(old_schema_version)
470 error = ('Schema upgrade error: %s did not provide a ' +
471 'new schema version number!' %
472 self.__class__.__name__)
474 raise Exception(error)
475 self._store_new_schema_version(new_version)
476 # Check if we are now up-to-date
477 self.upgrade_database()
480 def is_readonly(self):
481 return self._db.is_readonly
483 def _row_to_dict_tree(self, data, row):
489 self._row_to_dict_tree(d2, row[1:])
493 if data[name] is list:
494 data[name].append(value)
497 data[name] = [v, value]
501 def _rows_to_dict_tree(self, rows):
504 self._row_to_dict_tree(data, r)
507 def _load_data(self, table, columns, kvfilter=None):
510 q = self._query(self._db, table, columns, trans=False)
511 rows = q.select(kvfilter)
512 except Exception, e: # pylint: disable=broad-except
513 self.error("Failed to load data for table %s for store %s: [%s]"
514 % (table, self.__class__.__name__, e))
515 return self._rows_to_dict_tree(rows)
517 def load_config(self):
519 columns = ['name', 'value']
520 return self._load_data(table, columns)
522 def load_options(self, table, name=None):
525 kvfilter['name'] = name
526 options = self._load_data(table, OPTIONS_TABLE, kvfilter)
527 if name and name in options:
531 def save_options(self, table, name, options):
535 q = self._query(self._db, table, OPTIONS_TABLE)
536 rows = q.select({'name': name}, ['option', 'value'])
538 curvals[row[0]] = row[1]
542 q.update({'value': options[opt]},
543 {'name': name, 'option': opt})
545 q.insert((name, opt, options[opt]))
548 except Exception, e: # pylint: disable=broad-except
551 self.error("Failed to save options: [%s]" % e)
554 def delete_options(self, table, name, options=None):
555 kvfilter = {'name': name}
558 q = self._query(self._db, table, OPTIONS_TABLE)
563 kvfilter['option'] = opt
566 except Exception, e: # pylint: disable=broad-except
569 self.error("Failed to delete from %s: [%s]" % (table, e))
572 def new_unique_data(self, table, data):
573 newid = str(uuid.uuid4())
576 q = self._query(self._db, table, UNIQUE_DATA_TABLE)
578 q.insert((newid, name, data[name]))
580 except Exception, e: # pylint: disable=broad-except
583 self.error("Failed to store %s data: [%s]" % (table, e))
587 def get_unique_data(self, table, uuidval=None, name=None, value=None):
590 kvfilter['uuid'] = uuidval
592 kvfilter['name'] = name
594 kvfilter['value'] = value
595 return self._load_data(table, UNIQUE_DATA_TABLE, kvfilter)
597 def save_unique_data(self, table, data):
600 q = self._query(self._db, table, UNIQUE_DATA_TABLE)
603 rows = q.select({'uuid': uid}, ['name', 'value'])
610 if datum[name] is None:
611 q.delete({'uuid': uid, 'name': name})
613 q.update({'value': datum[name]},
614 {'uuid': uid, 'name': name})
616 if datum[name] is not None:
617 q.insert((uid, name, datum[name]))
620 except Exception, e: # pylint: disable=broad-except
623 self.error("Failed to store data in %s: [%s]" % (table, e))
626 def del_unique_data(self, table, uuidval):
627 kvfilter = {'uuid': uuidval}
629 q = self._query(self._db, table, UNIQUE_DATA_TABLE, trans=False)
631 except Exception, e: # pylint: disable=broad-except
632 self.error("Failed to delete data from %s: [%s]" % (table, e))
634 def _reset_data(self, table):
637 q = self._query(self._db, table, UNIQUE_DATA_TABLE)
641 except Exception, e: # pylint: disable=broad-except
644 self.error("Failed to erase all data from %s: [%s]" % (table, e))
647 class AdminStore(Store):
648 _should_cleanup = False
651 super(AdminStore, self).__init__('admin.config.db')
653 def get_data(self, plugin, idval=None, name=None, value=None):
654 return self.get_unique_data(plugin+"_data", idval, name, value)
656 def save_data(self, plugin, data):
657 return self.save_unique_data(plugin+"_data", data)
659 def new_datum(self, plugin, datum):
660 table = plugin+"_data"
661 return self.new_unique_data(table, datum)
663 def del_datum(self, plugin, idval):
664 table = plugin+"_data"
665 return self.del_unique_data(table, idval)
667 def wipe_data(self, plugin):
668 table = plugin+"_data"
669 self._reset_data(table)
671 def _initialize_schema(self):
672 for table in ['config',
676 q = self._query(self._db, table, OPTIONS_TABLE, trans=False)
678 q._con.close() # pylint: disable=protected-access
680 def _upgrade_schema(self, old_version):
682 # In schema version 2, we added indexes and primary keys
683 for table in ['config',
687 # pylint: disable=protected-access
688 table = self._query(self._db, table, OPTIONS_TABLE,
690 self._db.add_constraint(table.primary_key)
691 for index in table.indexes:
692 self._db.add_index(index)
695 raise NotImplementedError()
697 def create_plugin_data_table(self, plugin_name):
698 if not self.is_readonly:
699 table = plugin_name+'_data'
700 q = self._query(self._db, table, UNIQUE_DATA_TABLE,
703 q._con.close() # pylint: disable=protected-access
706 class UserStore(Store):
707 _should_cleanup = False
709 def __init__(self, path=None):
710 super(UserStore, self).__init__('user.prefs.db')
712 def save_user_preferences(self, user, options):
713 self.save_options('users', user, options)
715 def load_user_preferences(self, user):
716 return self.load_options('users', user)
718 def save_plugin_data(self, plugin, user, options):
719 self.save_options(plugin+"_data", user, options)
721 def load_plugin_data(self, plugin, user):
722 return self.load_options(plugin+"_data", user)
724 def _initialize_schema(self):
725 q = self._query(self._db, 'users', OPTIONS_TABLE, trans=False)
727 q._con.close() # pylint: disable=protected-access
729 def _upgrade_schema(self, old_version):
731 # In schema version 2, we added indexes and primary keys
732 # pylint: disable=protected-access
733 table = self._query(self._db, 'users', OPTIONS_TABLE,
735 self._db.add_constraint(table.primary_key)
736 for index in table.indexes:
737 self._db.add_index(index)
740 raise NotImplementedError()
742 def create_plugin_data_table(self, plugin_name):
743 if not self.is_readonly:
744 table = plugin_name+'_data'
745 q = self._query(self._db, table, OPTIONS_TABLE,
748 q._con.close() # pylint: disable=protected-access
751 class TranStore(Store):
753 def __init__(self, path=None):
754 super(TranStore, self).__init__('transactions.db')
755 self.table = 'transactions'
757 def _initialize_schema(self):
758 q = self._query(self._db, self.table, UNIQUE_DATA_TABLE,
761 q._con.close() # pylint: disable=protected-access
763 def _upgrade_schema(self, old_version):
765 # In schema version 2, we added indexes and primary keys
766 # pylint: disable=protected-access
767 table = self._query(self._db, self.table, UNIQUE_DATA_TABLE,
769 self._db.add_constraint(table.primary_key)
770 for index in table.indexes:
771 self._db.add_index(index)
774 raise NotImplementedError()
777 # pylint: disable=protected-access
778 table = SqlQuery(self._db, self.table, UNIQUE_DATA_TABLE)._table
779 in_one_hour = datetime.datetime.now() - datetime.timedelta(hours=1)
780 sel = select([table.columns.uuid]). \
781 where(and_(table.c.name == 'origintime',
782 table.c.value <= in_one_hour))
783 # pylint: disable=no-value-for-parameter
784 d = table.delete().where(table.c.uuid.in_(sel))
785 return d.execute().rowcount
788 class SAML2SessionStore(Store):
790 def __init__(self, database_url):
791 super(SAML2SessionStore, self).__init__(database_url=database_url)
792 self.table = 'saml2_sessions'
793 # pylint: disable=protected-access
794 table = SqlQuery(self._db, self.table, UNIQUE_DATA_TABLE)._table
795 table.create(checkfirst=True)
797 def _get_unique_id_from_column(self, name, value):
799 The query is going to return only the column in the query.
800 Use this method to get the uuidval which can be used to fetch
803 Returns None or the uuid of the first value found.
805 data = self.get_unique_data(self.table, name=name, value=value)
810 raise ValueError("Multiple entries returned")
811 return data.keys()[0]
814 # pylint: disable=protected-access
815 table = SqlQuery(self._db, self.table, UNIQUE_DATA_TABLE)._table
816 sel = select([table.columns.uuid]). \
817 where(and_(table.c.name == 'expiration_time',
818 table.c.value <= datetime.datetime.now()))
819 # pylint: disable=no-value-for-parameter
820 d = table.delete().where(table.c.uuid.in_(sel))
821 return d.execute().rowcount
823 def get_data(self, idval=None, name=None, value=None):
824 return self.get_unique_data(self.table, idval, name, value)
826 def new_session(self, datum):
827 if 'supported_logout_mechs' in datum:
828 datum['supported_logout_mechs'] = ','.join(
829 datum['supported_logout_mechs']
831 return self.new_unique_data(self.table, datum)
833 def get_session(self, session_id=None, request_id=None):
835 uuidval = self._get_unique_id_from_column('session_id', session_id)
837 uuidval = self._get_unique_id_from_column('request_id', request_id)
839 raise ValueError("Unable to find session")
842 data = self.get_unique_data(self.table, uuidval=uuidval)
843 return uuidval, data[uuidval]
845 def get_user_sessions(self, user):
847 Return a list of all sessions for a given user.
849 rows = self.get_unique_data(self.table, name='user', value=user)
851 # We have a list of sessions for this user, now get the details
854 data = self.get_unique_data(self.table, uuidval=r)
855 data[r]['supported_logout_mechs'] = data[r].get(
856 'supported_logout_mechs', '').split(',')
857 logged_in.append(data)
861 def update_session(self, datum):
862 self.save_unique_data(self.table, datum)
864 def remove_session(self, uuidval):
865 self.del_unique_data(self.table, uuidval)
868 self._reset_data(self.table)
870 def _initialize_schema(self):
871 q = self._query(self._db, self.table, UNIQUE_DATA_TABLE,
874 q._con.close() # pylint: disable=protected-access
876 def _upgrade_schema(self, old_version):
878 # In schema version 2, we added indexes and primary keys
879 # pylint: disable=protected-access
880 table = self._query(self._db, self.table, UNIQUE_DATA_TABLE,
882 self._db.add_constraint(table.primary_key)
883 for index in table.indexes:
884 self._db.add_index(index)
887 raise NotImplementedError()