chinese_text_splitter.py 1.8 KB

1234567891011121314151617181920212223242526272829303132333435363738
  1. from langchain.text_splitter import CharacterTextSplitter
  2. import re
  3. from typing import List
  4. from modelscope.pipelines import pipeline
  5. p = pipeline(
  6. task="document-segmentation",
  7. model='damo/nlp_bert_document-segmentation_chinese-base',
  8. device="cpu")
  9. class ChineseTextSplitter(CharacterTextSplitter):
  10. def __init__(self, pdf: bool = False, **kwargs):
  11. super().__init__(**kwargs)
  12. self.pdf = pdf
  13. def split_text(self, text: str, use_document_segmentation: bool=False) -> List[str]:
  14. # use_document_segmentation参数指定是否用语义切分文档,此处采取的文档语义分割模型为达摩院开源的nlp_bert_document-segmentation_chinese-base,论文见https://arxiv.org/abs/2107.09278
  15. # 如果使用模型进行文档语义切分,那么需要安装modelscope[nlp]:pip install "modelscope[nlp]" -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html
  16. # 考虑到使用了三个模型,可能对于低配置gpu不太友好,因此这里将模型load进cpu计算,有需要的话可以替换device为自己的显卡id
  17. if self.pdf:
  18. text = re.sub(r"\n{3,}", "\n", text)
  19. text = re.sub('\s', ' ', text)
  20. text = text.replace("\n\n", "")
  21. if use_document_segmentation:
  22. result = p(documents=text)
  23. sent_list = [i for i in result["text"].split("\n\t") if i]
  24. else:
  25. sent_sep_pattern = re.compile('([﹒﹔﹖﹗.。!?]["’”」』]{0,2}|(?=["‘“「『]{1,2}|$))') # del :;
  26. sent_list = []
  27. for ele in sent_sep_pattern.split(text):
  28. if sent_sep_pattern.match(ele) and sent_list:
  29. sent_list[-1] += ele
  30. elif ele:
  31. sent_list.append(ele)
  32. return sent_list