refactor: simplify text splitting logic in stream processor

- Remove complex conditional logic for short/medium/long text differentiation
- Unify text splitting to always use consistent CHUNK_SIZE-based splitting
- Rely on dynamic delay calculation for output speed adjustment
- Reduce code complexity in both main smoother and lightweight smoother
This commit is contained in:
fawney19
2025-12-19 09:48:11 +08:00
parent 070121717d
commit 6c0373fda6

View File

@@ -631,26 +631,21 @@ class StreamProcessor:
def _split_content(self, content: str) -> list[str]: def _split_content(self, content: str) -> list[str]:
""" """
根据文本长度智能拆分 按块拆分文本
短文本:逐字符拆分(打字效果更真实) 统一使用 CHUNK_SIZE 拆分,通过动态延迟控制打字感。
长文本:按 CHUNK_SIZE 拆分(避免过多延迟)
""" """
text_length = len(content) text_length = len(content)
if text_length <= self.CHUNK_SIZE: if text_length <= self.CHUNK_SIZE:
return [content] return [content]
# 长文本按块拆分 # 统一按块拆分
if text_length >= self.LONG_TEXT_THRESHOLD:
chunks = [] chunks = []
for i in range(0, text_length, self.CHUNK_SIZE): for i in range(0, text_length, self.CHUNK_SIZE):
chunks.append(content[i : i + self.CHUNK_SIZE]) chunks.append(content[i : i + self.CHUNK_SIZE])
return chunks return chunks
# 短/中文本逐字符拆分
return list(content)
async def _cleanup( async def _cleanup(
self, self,
response_ctx: Any, response_ctx: Any,
@@ -734,9 +729,7 @@ class _LightweightSmoother:
text_length = len(content) text_length = len(content)
if text_length <= self.CHUNK_SIZE: if text_length <= self.CHUNK_SIZE:
return [content] return [content]
if text_length >= self.LONG_TEXT_THRESHOLD:
return [content[i : i + self.CHUNK_SIZE] for i in range(0, text_length, self.CHUNK_SIZE)] return [content[i : i + self.CHUNK_SIZE] for i in range(0, text_length, self.CHUNK_SIZE)]
return list(content)
async def smooth( async def smooth(
self, stream_generator: AsyncGenerator[bytes, None] self, stream_generator: AsyncGenerator[bytes, None]