[repo cache] there are some relations we don't want to cache, they may cause memory leak
# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr## This file is part of CubicWeb.## CubicWeb is free software: you can redistribute it and/or modify it under the# terms of the GNU Lesser General Public License as published by the Free# Software Foundation, either version 2.1 of the License, or (at your option)# any later version.## CubicWeb is distributed in the hope that it will be useful, but WITHOUT# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more# details.## You should have received a copy of the GNU Lesser General Public License along# with CubicWeb. If not, see <http://www.gnu.org/licenses/>."""custom storages for the system source"""fromosimportunlink,pathasospfromcontextlibimportcontextmanagerfromyams.schemaimportrole_namefromcubicwebimportBinary,ValidationErrorfromcubicweb.serverimporthookfromcubicweb.server.editionimportEditedEntitydefset_attribute_storage(repo,etype,attr,storage):repo.system_source.set_storage(etype,attr,storage)defunset_attribute_storage(repo,etype,attr):repo.system_source.unset_storage(etype,attr)classStorage(object):"""abstract storage * If `source_callback` is true (by default), the callback will be run during query result process of fetched attribute's value and should have the following prototype:: callback(self, source, session, value) where `value` is the value actually stored in the backend. None values will be skipped (eg callback won't be called). * if `source_callback` is false, the callback will be run during sql generation when some attribute with a custom storage is accessed and should have the following prototype:: callback(self, generator, relation, linkedvar) where `generator` is the sql generator, `relation` the current rql syntax tree relation and linkedvar the principal syntax tree variable holding the attribute. """is_source_callback=Truedefcallback(self,*args):"""see docstring for prototype, which vary according to is_source_callback """raiseNotImplementedError()defentity_added(self,entity,attr):"""an entity using this storage for attr has been added"""raiseNotImplementedError()defentity_updated(self,entity,attr):"""an entity using this storage for attr has been updatded"""raiseNotImplementedError()defentity_deleted(self,entity,attr):"""an entity using this storage for attr has been deleted"""raiseNotImplementedError()defmigrate_entity(self,entity,attribute):"""migrate an entity attribute to the storage"""raiseNotImplementedError()# TODO# * make it configurable without code# * better file path attribution# * handle backup/restoredefuniquify_path(dirpath,basename):"""return a unique file name for `basename` in `dirpath`, or None if all attemps failed. XXX subject to race condition. """path=osp.join(dirpath,basename.replace(osp.sep,'-'))ifnotosp.isfile(path):returnpathbase,ext=osp.splitext(path)foriinxrange(1,256):path='%s%s%s'%(base,i,ext)ifnotosp.isfile(path):returnpathreturnNone@contextmanagerdeffsimport(session):present='fs_importing'insession.transaction_dataold_value=session.transaction_data.get('fs_importing')session.transaction_data['fs_importing']=Trueyieldifpresent:session.transaction_data['fs_importing']=old_valueelse:delsession.transaction_data['fs_importing']classBytesFileSystemStorage(Storage):"""store Bytes attribute value on the file system"""def__init__(self,defaultdir,fsencoding='utf-8'):self.default_directory=defaultdirself.fsencoding=fsencodingdefcallback(self,source,session,value):"""sql generator callback when some attribute with a custom storage is accessed """fpath=source.binary_to_str(value)try:returnBinary(file(fpath,'rb').read())exceptOSError,ex:source.critical("can't open %s: %s",value,ex)returnNonedefentity_added(self,entity,attr):"""an entity using this storage for attr has been added"""ifentity._cw.transaction_data.get('fs_importing'):binary=Binary(file(entity.cw_edited[attr].getvalue(),'rb').read())else:binary=entity.cw_edited.pop(attr)fpath=self.new_fs_path(entity,attr)# bytes storage used to store file's pathentity.cw_edited.edited_attribute(attr,Binary(fpath))file(fpath,'wb').write(binary.getvalue())AddFileOp.get_instance(entity._cw).add_data(fpath)returnbinarydefentity_updated(self,entity,attr):"""an entity using this storage for attr has been updatded"""# get the name of the previous file containing the valueoldpath=self.current_fs_path(entity,attr)ifentity._cw.transaction_data.get('fs_importing'):# If we are importing from the filesystem, the file already exists.# We do not need to create it but we need to fetch the content of# the file as the actual content of the attributefpath=entity.cw_edited[attr].getvalue()binary=Binary(file(fpath,'rb').read())else:# We must store the content of the attributes# into a file to stay consistent with the behaviour of entity_add.# Moreover, the BytesFileSystemStorage expects to be able to# retrieve the current value of the attribute at anytime by reading# the file on disk. To be able to rollback things, use a new file# and keep the old one that will be removed on commit if everything# went ok.## fetch the current attribute value in memorybinary=entity.cw_edited.pop(attr)# Get filename for itfpath=self.new_fs_path(entity,attr)assertnotosp.exists(fpath)# write attribute value on diskfile(fpath,'wb').write(binary.getvalue())# Mark the new file as added during the transaction.# The file will be removed on rollbackAddFileOp.get_instance(entity._cw).add_data(fpath)ifoldpath!=fpath:# register the new location for the file.entity.cw_edited.edited_attribute(attr,Binary(fpath))# Mark the old file as useless so the file will be removed at# commit.DeleteFileOp.get_instance(entity._cw).add_data(oldpath)returnbinarydefentity_deleted(self,entity,attr):"""an entity using this storage for attr has been deleted"""fpath=self.current_fs_path(entity,attr)DeleteFileOp.get_instance(entity._cw).add_data(fpath)defnew_fs_path(self,entity,attr):# We try to get some hint about how to name the file using attribute's# name metadata, so we use the real file name and extension when# available. Keeping the extension is useful for example in the case of# PIL processing that use filename extension to detect content-type, as# well as providing more understandable file names on the fs.basename=[str(entity.eid),attr]name=entity.cw_attr_metadata(attr,'name')ifnameisnotNone:basename.append(name.encode(self.fsencoding))fspath=uniquify_path(self.default_directory,'_'.join(basename))iffspathisNone:msg=entity._cw._('failed to uniquify path (%s, %s)')%(self.default_directory,'_'.join(basename))raiseValidationError(entity.eid,{role_name(attr,'subject'):msg})returnfspathdefcurrent_fs_path(self,entity,attr):sysource=entity._cw.pool.source('system')cu=sysource.doexec(entity._cw,'SELECT cw_%s FROM cw_%s WHERE cw_eid=%s'%(attr,entity.__regid__,entity.eid))rawvalue=cu.fetchone()[0]ifrawvalueisNone:# no previous valuereturnself.new_fs_path(entity,attr)returnsysource._process_value(rawvalue,cu.description[0],binarywrap=str)defmigrate_entity(self,entity,attribute):"""migrate an entity attribute to the storage"""entity.cw_edited=EditedEntity(entity,**entity.cw_attr_cache)self.entity_added(entity,attribute)session=entity._cwsource=session.repo.system_sourceattrs=source.preprocess_entity(entity)sql=source.sqlgen.update('cw_'+entity.__regid__,attrs,['cw_eid'])source.doexec(session,sql,attrs)entity.cw_edited=NoneclassAddFileOp(hook.DataOperationMixIn,hook.Operation):defrollback_event(self):forfilepathinself.get_data():try:unlink(filepath)exceptException,ex:self.error('cant remove %s: %s'%(filepath,ex))classDeleteFileOp(hook.DataOperationMixIn,hook.Operation):defpostcommit_event(self):forfilepathinself.get_data():try:unlink(filepath)exceptException,ex:self.error('cant remove %s: %s'%(filepath,ex))