Skip to content

Commit

Permalink
fix: 订阅搜索报错
Browse files Browse the repository at this point in the history
  • Loading branch information
linyuan0213 committed Jul 13, 2024
1 parent 379c8f2 commit 551050b
Show file tree
Hide file tree
Showing 11 changed files with 13 additions and 3 deletions.
1 change: 1 addition & 0 deletions app/indexer/client/_spider.py
Original file line number Diff line number Diff line change
Expand Up @@ -267,6 +267,7 @@ def download_midware(self, request):
else:
chrome = DrissionPageHelper()
tries = 3
html_text = ''
if chrome.get_status():
while tries > 0:
try:
Expand Down
1 change: 1 addition & 0 deletions app/plugins/modules/_autosignin/btschool.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,7 @@ def signin(self, site_info: dict):

def __chrome_visit(self, chrome, url, ua, site_cookie, proxy, site):
tries = 3
html_text = ''
while tries > 0:
try:
html_text = chrome.get_page_html(url=url, ua=ua, cookies=site_cookie, proxies=proxy)
Expand Down
1 change: 1 addition & 0 deletions app/plugins/modules/_autosignin/tjupt.py
Original file line number Diff line number Diff line change
Expand Up @@ -192,6 +192,7 @@ def signin(self, site_info: dict):
image_search_url = f"https://lens.google.com/uploadbyurl?url={img_url}"
chrome = DrissionPageHelper()
tries = 3
html_text = ''
while tries > 0:
try:
html_text = chrome.get_page_html(url=image_search_url, ua=ua, proxies=Config().get_proxies() if proxy else None)
Expand Down
2 changes: 2 additions & 0 deletions app/plugins/modules/autosignin.py
Original file line number Diff line number Diff line change
Expand Up @@ -466,6 +466,7 @@ def __signin_base(self, site_info):
if "1ptba" in home_url:
home_url = f"{home_url}/index.php"
tries = 3
html_text = ''
while tries > 0:
try:
html_text = chrome.get_page_html(url=home_url, ua=ua, cookies=site_cookie, proxies=site_info.get("proxy"))
Expand Down Expand Up @@ -500,6 +501,7 @@ def __signin_base(self, site_info):
# 开始仿真
try:
tries = 3
html_text = ''
while tries > 0:
try:
html_text = chrome.get_page_html(url=home_url,
Expand Down
1 change: 1 addition & 0 deletions app/plugins/modules/opensubtitles.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,6 +215,7 @@ def __parse_opensubtitles_results(cls, url):
return []
# 源码
tries = 3
html_text = ''
while tries > 0:
try:
html_text = chrome.get_page_html(url=url)
Expand Down
2 changes: 1 addition & 1 deletion app/searcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ def search_one_media(self, media_info,
unique_media_list = []
media_seen = set()
for d in media_list:
org_string = StringUtils.md5_hash(d.org_string + d.site + d.description)
org_string = StringUtils.md5_hash(f'{d.org_string}{d.site}{d.description or ""}')
if org_string not in media_seen:
unique_media_list.append(d)
media_seen.add(org_string)
Expand Down
3 changes: 2 additions & 1 deletion app/sites/site_userinfo.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,13 +71,14 @@ def build(self, url, site_id, site_name,
chrome = DrissionPageHelper()
if emulate:
tries = 3
html_text = ''
while tries > 0:
try:
html_text = chrome.get_page_html(url=url, ua=ua, cookies=site_cookie, proxies=Config().get_proxies() if proxy else None)
if html_text:
break
except Exception as e:
self.debug(f'获取网页HTML失败: {str(e)} 重试中...')
log.debug(f'获取网页HTML失败: {str(e)} 重试中...')
finally:
tries -= 1
sleep(2)
Expand Down
1 change: 1 addition & 0 deletions app/sites/siteconf.py
Original file line number Diff line number Diff line change
Expand Up @@ -265,6 +265,7 @@ def __get_site_page_html(url, cookie, ua, headers=None, render=False, proxy=Fals
if render and chrome.get_status():
# 开渲染
tries = 3
html_text = ''
while tries > 0:
try:
html_text = chrome.get_page_html(url=url, cookies=cookie, ua=ua, proxies=proxy)
Expand Down
1 change: 1 addition & 0 deletions app/sites/sites.py
Original file line number Diff line number Diff line change
Expand Up @@ -307,6 +307,7 @@ def test_connection(self, site_id):
chrome = DrissionPageHelper()
start_time = datetime.now()
tries = 3
html_text = ''
while tries > 0:
try:
html_text = chrome.get_page_html(url=site_url, ua=ua, cookies=site_cookie, proxies=Config().get_proxies() if site_info.get("proxy") else None)
Expand Down
1 change: 1 addition & 0 deletions app/sites/siteuserinfo/_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -260,6 +260,7 @@ def _get_page_content(self, url, params=None, headers=None):
chrome = DrissionPageHelper()
if self._emulate and chrome.get_status():
tries = 3
html_text = ''
while tries > 0:
try:
html_text = chrome.get_page_html(url=url, ua=self._ua, cookies=self._site_cookie, proxies=proxies)
Expand Down
2 changes: 1 addition & 1 deletion web/backend/search_torrents.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ def search_medias_for_web(content, ident_flag=True, filters=None, tmdbid=None, m
unique_media_list = []
media_seen = set()
for d in media_list:
org_string = StringUtils.md5_hash(d.org_string + d.site + d.description)
org_string = StringUtils.md5_hash(f'{d.org_string}{d.site}{d.description or ""}')
if org_string not in media_seen:
unique_media_list.append(d)
media_seen.add(org_string)
Expand Down

0 comments on commit 551050b

Please sign in to comment.