diff --git a/DrissionPage/_functions/tools.py b/DrissionPage/_functions/tools.py
index c36af3d..d5e140f 100644
--- a/DrissionPage/_functions/tools.py
+++ b/DrissionPage/_functions/tools.py
@@ -266,7 +266,7 @@ def raise_error(r):
'No node with given id found', 'Node with given id does not belong to the document',
'No node found for given backend id'):
raise ElementLostError
- elif error == 'tab closed':
+ elif error == ('tab closed', 'No target with given id found'):
raise PageClosedError
elif error == 'timeout':
raise TimeoutError
diff --git a/DrissionPage/_pages/chromium_base.py b/DrissionPage/_pages/chromium_base.py
index 50bc643..63f7c19 100644
--- a/DrissionPage/_pages/chromium_base.py
+++ b/DrissionPage/_pages/chromium_base.py
@@ -172,6 +172,8 @@ class ChromiumBase(BasePage):
except:
if self._debug:
print('获取文档失败。')
+ from traceback import print_exc
+ print_exc()
print('请把报错信息和重现方法告知作者,感谢。\nhttps://gitee.com/g1879/DrissionPage/issues/new')
raise
# return False
diff --git a/DrissionPage/_pages/chromium_frame.py b/DrissionPage/_pages/chromium_frame.py
index 2c77f01..228d96f 100644
--- a/DrissionPage/_pages/chromium_frame.py
+++ b/DrissionPage/_pages/chromium_frame.py
@@ -4,7 +4,7 @@
@Contact : g1879@qq.com
"""
from copy import copy
-from re import search, findall
+from re import search, findall, DOTALL
from time import sleep, perf_counter
from .._elements.chromium_element import ChromiumElement
@@ -304,7 +304,7 @@ class ChromiumFrame(ChromiumBase):
"""返回元素outerHTML文本"""
tag = self.tag
out_html = self._target_page.run_cdp('DOM.getOuterHTML', backendNodeId=self.frame_ele._backend_id)['outerHTML']
- sign = search(rf'<{tag}.*?>', out_html).group(0)
+ sign = search(rf'<{tag}.*?>', out_html, DOTALL).group(0)
return f'{sign}{self.inner_html}{tag}>'
@property
diff --git a/DrissionPage/_pages/session_page.py b/DrissionPage/_pages/session_page.py
index 038bf38..9dfcbf5 100644
--- a/DrissionPage/_pages/session_page.py
+++ b/DrissionPage/_pages/session_page.py
@@ -4,7 +4,7 @@
@Contact : g1879@qq.com
"""
from pathlib import Path
-from re import search
+from re import search, DOTALL
from time import sleep
from urllib.parse import urlparse, quote
@@ -379,7 +379,7 @@ def set_charset(response):
# 在headers中获取不到编码,且如果是网页
elif content_type.replace(' ', '').startswith('text/html'):
- re_result = search(b']+).*?>', response.content)
+ re_result = search(b']+).*?>', response.content, DOTALL)
if re_result:
charset = re_result.group(1).decode()