nltk-比较文档相似度

python自然语言处理nltk比较文档之间的相似度,目前可实现中文,英文文档的相似性比较!
一、比较英文文档相似度
说明:

* 其中基准数据,可以来自外部,处理过程为:

– 处理为词袋

– 经过数据集的tfidf结果

* 无法处理中文

– 分词器不支持

* 未另外加载语料库

#!/usr/bin/env python
#-*-coding=utf-8-*-

"""原始数据"""
#缩水版的courses,实际数据的格式应该为 课程名\t课程简介\t课程详情,并已去除html等干扰因素
courses = ['Writing II: Rhetorical Composing', 'Genetics and Society: A Course for Educators', 'General Game Playing', 'Genes and the Human Condition (From Behavior to Biotechnology)', 'A Brief History of Humankind', 'New Models of Business in Society', 'Analyse Numrique pour Ingnieurs', 'Evolution: A Course for Educators', 'Coding the Matrix: Linear Algebra through Computer Science Applications', 'The Dynamic Earth: A Course for Educators']
#实际的 courses_name = [course.split('\t')[0] for course in courses]
courses_name = ['Writing II: Rhetorical Composing', 'Genetics and Society: A Course for Educators', 'General Game Playing', 'Genes and the Human Condition (From Behavior to Biotechnology)', 'A Brief History of Humankind', 'New Models of Business in Society', 'Analyse Numrique pour Ingnieurs', 'Evolution: A Course for Educators', 'Coding the Matrix: Linear Algebra through Computer Science Applications', 'The Dynamic Earth: A Course for Educators']



"""预处理(easy_install nltk)"""

#引入nltk
import nltk
#nltk.download()    #下载要用的语料库等,时间比较长,最好提前准备好

#分词
from nltk.corpus import brown
texts_lower = [[word for word in document.lower().split()] for document in courses]
from nltk.tokenize import word_tokenize
texts_tokenized = [[word.lower() for word in word_tokenize(document)] for document in courses]

#去除停用词
from nltk.corpus import stopwords
english_stopwords = stopwords.words('english')
texts_filtered_stopwords = [[word for word in document if not word in english_stopwords] for document in texts_tokenized]
#去除标点符号
english_punctuations = [',', '.', ':', ';', '?', '(', ')', '[', ']', '&', '!', '*', '@', '#', '$', '%']
texts_filtered = [[word for word in document if not word in english_punctuations] for document in texts_filtered_stopwords]

#词干化
from nltk.stem.lancaster import LancasterStemmer
st = LancasterStemmer()
texts_stemmed = [[st.stem(word) for word in docment] for docment in texts_filtered]


#去除过低频词
all_stems = sum(texts_stemmed, [])
stems_once = set(stem for stem in set(all_stems) if all_stems.count(stem) == 1)
texts = [[stem for stem in text if stem not in stems_once] for text in texts_stemmed]

"""
注意:本例子中只用了course_name字段,大多数word频率过低,造成去除低频词后,有些document可能为空
          因此后面的处理结果只做示范
"""


"""
引入gensim,正式开始处理(easy_install gensim)

输入:
     1.去掉了停用词
     2.去掉了标点符号
     3.处理为词干
     4.去掉了低频词

"""
from gensim import corpora, models, similarities

#为了能看到过程日志
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)

dictionary = corpora.Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]     #doc2bow(): 将collection words 转为词袋,用两元组(word_id, word_frequency)表示
tfidf = models.TfidfModel(corpus)
corpus_tfidf = tfidf[corpus]

#拍脑袋的:训练topic数量为10的LSI模型
lsi = models.LsiModel(corpus_tfidf, id2word=dictionary, num_topics=10)
index = similarities.MatrixSimilarity(lsi[corpus])     # index 是 gensim.similarities.docsim.MatrixSimilarity 实例


"""
对具体对象相似度匹配
"""
#选择一个基准数据
ml_course = texts[2]
ml_bow = dictionary.doc2bow(ml_course)     
#在上面选择的模型数据 lsi 中,计算其他数据与其的相似度
ml_lsi = lsi[ml_bow]     #ml_lsi 形式如 (topic_id, topic_value)
sims = index[ml_lsi]     #sims 是最终结果了, index[xxx] 调用内置方法 __getitem__() 来计算ml_lsi
#排序,为输出方便
sort_sims = sorted(enumerate(sims), key=lambda item: -item[1])

#查看结果
print sort_sims[0:10]   #看下前10个最相似的,第一个是基准数据自身
print courses_name[2]   #看下实际最相似的数据叫什么

二、比较中文档相似度

使用中文分词器(如我选用了结巴分词)

对中文字符做编码处理,使用unicode编码方式

python的源码编码统一声明为 gbk

使用支持中文的语料库

#!/usr/bin/env python
#-*-coding=gbk-*-

"""
     原始数据,用于建立模型
"""
#缩水版的courses,实际数据的格式应该为 课程名\t课程简介\t课程详情,并已去除html等干扰因素
courses = [           
            u'Writing II: Rhetorical Composing',
            u'Genetics and Society: A Course for Educators',
            u'General Game Playing',
            u'Genes and the Human Condition (From Behavior to Biotechnology)',
            u'A Brief History of Humankind',
            u'New Models of Business in Society',
            u'Analyse Numrique pour Ingnieurs',
            u'Evolution: A Course for Educators',
            u'Coding the Matrix: Linear Algebra through Computer Science Applications',
            u'The Dynamic Earth: A Course for Educators',
            u'Tiny Wings\tYou have always dreamed of flying - but your wings are tiny. Luckily the world is full of beautiful hills. Use the hills as jumps - slide down, flap your wings and fly! At least for a moment - until this annoying gravity brings you back down to earth. But the next hill is waiting for you already. Watch out for the night and fly as fast as you can. ',
            u'Angry Birds Free',
            u'没有\它很相似',
            u'没有\t它很相似',
            u'没有\t他很相似',
            u'没有\t他不很相似',
            u'没有',
            u'可以没有',
            u'也没有',
            u'有没有也不管',
            u'Angry Birds Stella',
            u'Flappy Wings - FREE\tFly into freedom!A parody of the #1 smash hit game!',
            u'没有一个',
            u'没有一个2',
           ]

#只是为了最后的查看方便
#实际的 courses_name = [course.split('\t')[0] for course in courses]
courses_name = courses


"""
    预处理(easy_install nltk)
"""
def pre_process_cn(courses, low_freq_filter = True):
    """
     简化的 中文+英文 预处理
        1.去掉停用词
        2.去掉标点符号
        3.处理为词干
        4.去掉低频词

    """
    import nltk
    import jieba.analyse
    from nltk.tokenize import word_tokenize
   
    texts_tokenized = []
    for document in courses:
        texts_tokenized_tmp = []
        for word in word_tokenize(document):
            texts_tokenized_tmp += jieba.analyse.extract_tags(word,10)
        texts_tokenized.append(texts_tokenized_tmp)   
   
    texts_filtered_stopwords = texts_tokenized

    #去除标点符号
    english_punctuations = [',', '.', ':', ';', '?', '(', ')', '[', ']', '&', '!', '*', '@', '#', '$', '%']
    texts_filtered = [[word for word in document if not word in english_punctuations] for document in texts_filtered_stopwords]

    #词干化
    from nltk.stem.lancaster import LancasterStemmer
    st = LancasterStemmer()
    texts_stemmed = [[st.stem(word) for word in docment] for docment in texts_filtered]
   
    #去除过低频词
    if low_freq_filter:
        all_stems = sum(texts_stemmed, [])
        stems_once = set(stem for stem in set(all_stems) if all_stems.count(stem) == 1)
        texts = [[stem for stem in text if stem not in stems_once] for text in texts_stemmed]
    else:
        texts = texts_stemmed
    return texts

lib_texts = pre_process_cn(courses)



"""
    引入gensim,正式开始处理(easy_install gensim)
"""

def train_by_lsi(lib_texts):
    """
        通过LSI模型的训练
    """
    from gensim import corpora, models, similarities

    #为了能看到过程日志
    #import logging
    #logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)

    dictionary = corpora.Dictionary(lib_texts)
    corpus = [dictionary.doc2bow(text) for text in lib_texts]     #doc2bow(): 将collection words 转为词袋,用两元组(word_id, word_frequency)表示
    tfidf = models.TfidfModel(corpus)
    corpus_tfidf = tfidf[corpus]

    #拍脑袋的:训练topic数量为10的LSI模型
    lsi = models.LsiModel(corpus_tfidf, id2word=dictionary, num_topics=10)
    index = similarities.MatrixSimilarity(lsi[corpus])     # index 是 gensim.similarities.docsim.MatrixSimilarity 实例
   
    return (index, dictionary, lsi)

   
#库建立完成 -- 这部分可能数据很大,可以预先处理好,存储起来
(index,dictionary,lsi) = train_by_lsi(lib_texts)
   
   
#要处理的对象登场
target_courses = [u'没有']
target_text = pre_process_cn(target_courses, low_freq_filter=False)


"""
对具体对象相似度匹配
"""

#选择一个基准数据
ml_course = target_text[0]

#词袋处理
ml_bow = dictionary.doc2bow(ml_course)  

#在上面选择的模型数据 lsi 中,计算其他数据与其的相似度
ml_lsi = lsi[ml_bow]     #ml_lsi 形式如 (topic_id, topic_value)
sims = index[ml_lsi]     #sims 是最终结果了, index[xxx] 调用内置方法 __getitem__() 来计算ml_lsi

#排序,为输出方便
sort_sims = sorted(enumerate(sims), key=lambda item: -item[1])

#查看结果
print sort_sims[0:10]   #看下前10个最相似的,第一个是基准数据自身
print courses_name[sort_sims[1][0]]   #看下实际最相似的数据叫什么
print courses_name[sort_sims[2][0]]   #看下实际最相似的数据叫什么
print courses_name[sort_sims[3][0]]   #看下实际最相似的数据叫什么

原文:深蓝苹果 https://my.oschina.net/kakablue/home

Leave a Comment