文本相似度方案
文章目录
- SequenceMatcher
- 余弦相似度
- 基于逆向文档频率向量化
SequenceMatcher
from difflib import SequenceMatcher s1 = "1.2 章节标题【abc】"
s2 = "1.2 章节标题【abc】、【she】、【this】"
SequenceMatcher(None, s1, s2).ratio()
# 0.6666666666666666
余弦相似度
- 文本向量化
import jieba # 分词库# 基于词频的向量化
s1_list = list(jieba.cut(s1))
# ['1.2', ' ', '章节', '标题', '【', 'abc', '】']s2_list = list(jieba.cut(s2))
# ['1.2', ' ', '章节', '标题', '【', 'abc', '】', '、', '【', 'she', '】', '、', '【', 'this', '】']# 统计语料库
corpus = set(s1_list) | set(s2_list)
# {' ', '1.2', 'abc', 'she', 'this', '、', '【', '】', '标题', '章节'}# 文档词频统计
arr1 = np.array([s1_list.count(i) for i in corpus])
# array([0, 1, 1, 1, 0, 0, 1, 1, 1, 1])arr2 = np.array([s2_list.count(i) for i in corpus])
# array([2, 1, 1, 1, 1, 1, 1, 1, 3, 3])
- 计算cosine_theta值
from sklearn.metrics.pairwise import cosine_similarity# 计算余弦相似度
score = cosine_similarity(arr1.reshape(1, -1), arr2.reshape(1, -1))
# array([[0.77204865]])
可以看出余弦相似度比普通的SequenceMatcher具有更好的效果。
基于逆向文档频率向量化
ss1 = " ".join(s1_list)In [42]: ss1
Out[42]: '1.2 章节 标题 【 abc 】'In [43]: ss2 = " ".join(s2_list)In [44]: ss2
Out[44]: '1.2 章节 标题 【 abc 】 、 【 she 】 、 【 this 】'In [45]: from sklearn.feature_extraction.text import TfidfVectorizerIn [46]: vector = TfidfVectorizer()In [47]: r = vector.fit_transform([ss1, ss2])In [48]: r
Out[48]:
<2x5 sparse matrix of type '<class 'numpy.float64'>'with 8 stored elements in Compressed Sparse Row format>In [49]: r.toarray()
Out[49]:
array([[0.57735027, 0. , 0. , 0.57735027, 0.57735027],[0.37930349, 0.53309782, 0.53309782, 0.37930349, 0.37930349]])In [50]: rr = r.toarray()In [51]: r
Out[51]:
<2x5 sparse matrix of type '<class 'numpy.float64'>'with 8 stored elements in Compressed Sparse Row format>In [52]: rr
Out[52]:
array([[0.57735027, 0. , 0. , 0.57735027, 0.57735027],[0.37930349, 0.53309782, 0.53309782, 0.37930349, 0.37930349]])