Logo Search packages:      
Sourcecode: babiloo version File versions  Download package

engine.py

#!/usr/bin/env python
# -*- coding: utf-8 -*-

#    Copyright (C) 2008-2010 Ivan Garcia <contact@ivangarcia.org>
#    This program is free software; you can redistribute it and/or modify
#    it under the terms of the GNU General Public License as published by
#    the Free Software Foundation; either version 3 of the License, or
#    (at your option) any later version.
#
#    This program is distributed in the hope that it will be useful,
#    but WITHOUT ANY WARRANTY; without even the implied warranty of
#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#    GNU General Public License for more details.
#
#    You should have received a copy of the GNU General Public License along
#    with this program; if not, write to the Free Software Foundation, Inc.,
#    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.

import logging

import traceback
import os
from core.dictionary import Dictionary
from core.dictionary.dictionary import DictionaryType
from core.modules.compressed import CompressedFile
import core.modules.utils as utils

log = logging.getLogger("babiloo.core.dictionary.engine")

class DictionaryEngine(object):
    def __init__(self):
        self.dictionaries = {}
        self.lastFilter = {}

        self.words = {}
        self.caseSensitive = False
        self.ignoreAccents = False

    def loadDictionaries(self, dictionaries):
        new_dicts = []
        for mydict in dictionaries:
                new_dict = self.loadDictionary(mydict)
                if new_dict:
                    new_dicts.append(new_dict)
        return new_dicts

    def loadDictionary(self, dictionary):
        try:
                cf = CompressedFile(dictionary)
                if cf.compressed_file:
                    extracted_file_list = cf.extract()
                    extracted_file_sizes = [(os.path.getsize(f), f) for f in extracted_file_list]
                    extracted_file_sizes.sort()
                    dictionary = extracted_file_sizes[-1][1]

                new_dict = Dictionary(dictionary)
                if not isinstance(new_dict, DictionaryType):
                        return { 'loaded':False ,  'error': 'BAD_TYPE', 'file_path': dictionary}
                else:
                        dictId = new_dict.getUniqueId()
                        if not self.dictionaries.has_key(dictId):
                            self.dictionaries[dictId] = new_dict

                            return {'dict': new_dict,  'loaded':True }
                        else:
                            return { 'loaded':False ,  'error': 'ALREADY_INSTALLED', 'file_path': dictionary}

        except :
                traceback.print_exc()
                return { 'loaded':False ,  'error': 'EXCEPTION', 'file_path': dictionary}

    def loadIndexes(self, mydict = None):
        log.debug('Engine::CreateIndexes() START')
        words = []
        normalizedWords = []
        for word in self.dictionaries[mydict].getWords():
                words.append(word)
                normalizedWords.append(utils.normalize(word))

        #the list of words will not be sorted, we guess the dictionaries indexes will be sorted already
        self.words = words
        self.normalizedWords =  normalizedWords
        log.debug('Engine::CreateIndexes() END')

    def getHistory(self):
        return self.history

    def getWords(self, dictId = None):
        return self.words

    def filter(self, wordFilter, dictId = None,  caseSensitive = False,  ignoreAccents = False):
        if wordFilter:
            #log.debug("Filtering by %s ..." % wordFilter)

#        if self.lastFilter.has_key(wordFilter):
#            return self.lastFilter[wordFilter]
#
#        filtersToDelete = [key for key in self.lastFilter.keys() if len(key) >= len(wordFilter)]
#        for filter in filtersToDelete:
#            del self.lastFilter[filter]
#
#        if len(wordFilter):
#                wordFilterOneLess = wordFilter[:-1]
#                if not wordFilterOneLess or not self.lastFilter.has_key(wordFilterOneLess):
#                    words = self.wordsOriginal
#                    self.lastFilter = {}
#                else:
#                        words = self.lastFilter[wordFilterOneLess]
                filtered = []

                normalized_wordFilter = utils.normalize(wordFilter)
                for  num, word in enumerate(self.normalizedWords):
                        if word.startswith(normalized_wordFilter):
                            filtered.append(self.words[num])

                #if the filter contains some accented character
                if normalized_wordFilter != wordFilter:
                    accented_filtered = []
                    for word in filtered:
                        if word.startswith(wordFilter):
                            accented_filtered.append(word)
                    filtered = accented_filtered

                return filtered
        else:
                    #if the search is blank
                    #self.lastFilter = {}
                    return self.getWords()

    def search(self, word, dictId = None):
        log.debug('Searching for %s ...' % word)
        word, definition = self.dictionaries[dictId][word]
        return definition

    def getDictionary(self, dictId):
        if self.dictionaries.has_key(dictId):
            return self.dictionaries[dictId]
        else:
            raise IndexError

    def getDictionaries(self):
        return self.dictionaries.values()

    def deleteDictionary(self, dictId):
        if self.dictionaries.has_key(dictId):
            del self.dictionaries[dictId]
            return True
        else:
            raise IndexError


Generated by  Doxygen 1.6.0   Back to index