import requests
import re
def News_Spider():#定義一個爬蟲
url = 'https://news.sina.com.cn/'#url地址,新浪新聞
headers = {#請求頭
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36'
}
response = requests.get(url,headers,verify=False)#針對https,采用verify=False
response.encoding='utf-8'#編碼方式
html = response.text#獲取頁面源代碼
#print(html)#打印源代碼
reg = 'target="_blank">(.*?)/a>'#設(shè)置規(guī)則
content = re.findall(reg,html)#從頁面源代碼中篩選
ls = []#定義一個空列表
for c in content:
if '' in c:
continue
else:
if len(c) > 6 and '客戶端' not in c:
#print(c)
ls.append(c)
else:
continue
docu_set = {}#定義一個字典
for l in range(len(ls)):
docu_set['d{}'.format(l+1)] = ls[l]#格式化方法,從1開始
return docu_set
def change_set():
all_words = []#定義一個空列表用于存儲
docu_set = News_Spider()
for i in docu_set.values():
cut = i.split()#分詞
all_words.extend(cut)#添加分詞
set_all_words = set(all_words)
return set_all_words
#print(set_all_words)
def reverse_index():
invert_index = dict()#定義空字典
set_all_words = change_set()#將返回值傳遞給變量
docu_set = News_Spider()
for b in set_all_words:
temp = []
for k in docu_set.keys():
field = docu_set[k]
split_field = field.split()
if b in split_field:
temp.append(k)
invert_index[b] = temp
print(invert_index)
return invert_index
def Select():
docu_set = News_Spider()
invert_index = reverse_index()
news = []
# for i in invert_index:
# print(invert_index[i])
while True:
Find = str(input('請輸入查找內(nèi)容:'))
if Find == '不查了':
break
for Contetnt in invert_index:#循環(huán)每一個鍵
if Find in Contetnt:#如果輸入在鍵的字符串中
Result = invert_index[Contetnt]#循環(huán)出字典中每一個對應(yīng)的值
#print(Result)
for r in Result:#循環(huán)每一個值
if r in docu_set.keys():#如果值在字典中
news.append(docu_set[r])#列表增加字典docu_set的值
print(docu_set[r])#打印輸出字典的值
else:
continue
else:
if Find not in Contetnt:
news.append('很抱歉,沒有找到更多內(nèi)容?。?)
#news = set(news)
for n in news:
if '很抱歉' in n:
print(n)
break
else:
print(n)
def main_function():#定義一個主方法
News_Spider()
change_set()
reverse_index()
Select()
if __name__ == '__main__':#程序入口
main_function()
到此這篇關(guān)于Python實現(xiàn)簡單的索引排序與搜索功能的文章就介紹到這了,更多相關(guān)python實現(xiàn)索引排序和搜索內(nèi)容請搜索腳本之家以前的文章或繼續(xù)瀏覽下面的相關(guān)文章希望大家以后多多支持腳本之家!