from bs4 import BeautifulSoup
import requests
import os,re,time
import urllib3
from win32com.client import Dispatch
class DownloadVideo:
def __init__(self):
self.r = requests.session()
self.url=self.get_url()
self.download_urla=[]
self.download_urlb=[]
self.url_set=["%s/shipin/list-短視頻.html"%self.url]
#獲取最新網址
def get_url(self):
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
a=self.r.get('https://www.k58.com',verify=False)
b=a.url
return b
#幾頁內容的網址
def url_set1(self,n):
if n==2:
url="%s/shipin/list-短視頻-2.html"%self.url
self.url_set.append(url)
elif n>=3:
m=n+1
for i in range(2,m):
url="%s/shipin/list-短視頻-%d.html"%(self.url,i)
self.url_set.append(url)
else:
pass
#分別加載每一個頁內容的網址
def download_url1(self):
for j in self.url_set:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
r=self.r.get(j,verify=False)
sp1=r.content
soup = BeautifulSoup(sp1, "html.parser")
sp2 = soup.find_all(class_="shown")
for i in sp2:
url1=re.findall('a href="(.*?)" rel="external nofollow" ',str(i))
u=self.url+url1[0]
self.download_urla.append(u)
#分別獲取各個視頻的下載鏈接
def download_url2(self):
for i in self.download_urla:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
r=self.r.get(i,verify=False)
sp1=r.content
soup = BeautifulSoup(sp1, "html.parser")
sp2 = soup.find_all(class_="form-control input-sm copy_btn app_disable")
for j in sp2:
url2=j["data-clipboard-text"]
self.download_urlb.append(url2)
#將鏈接寫入txt中
# self.write_txt(url2)
#迅雷下載
def thunder_download(self):
try:
thunder = Dispatch("ThunderAgent.Agent64.1")
for i in self.download_urlb:
thunder.AddTask(i)
thunder.CommitTasks()
time.sleep(2)
except:
print("請下載迅雷,并在其設置中心的下載管理中設置為一鍵下載")
def mkdir(self,path):
folder = os.path.exists(path)
if not folder:
os.makedirs(path)
else:
pass
def write_txt(self,c):
self.mkdir(r"D:\AAAAA")
file_name=time.strftime('%Y%m%d_%H%M%S.txt')
with open(r"D:\AAAAA\%s"%file_name,'a') as f:
f.write(c+"\n")
if __name__ == '__main__':
d=DownloadVideo()
#數字表示幾頁的內容
d.url_set1(5)
d.download_url1()
d.download_url2()
d.thunder_download()
到此這篇關于python爬蟲爬取某網站視頻的示例代碼的文章就介紹到這了,更多相關python爬蟲爬取網站視頻內容請搜索腳本之家以前的文章或繼續(xù)瀏覽下面的相關文章希望大家以后多多支持腳本之家!