-
Notifications
You must be signed in to change notification settings - Fork 1
/
wordprocessor.py
87 lines (56 loc) · 2.43 KB
/
wordprocessor.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
import json
import re
import numpy as np
import pandas as pd
from khaiii import KhaiiiApi
class WordProcessor:
def __init__(self):
pass
def re_sub(self, series):
series = series.str.replace(pat=r'[ㄱ-ㅎ]', repl=r'', regex=True) # ㅋ 제거용
series = series.str.replace(pat=r'[^\w\s]', repl=r'', regex=True) # 특수문자 제거
series = series.str.replace(pat=r'[ ]{2,}', repl=r' ', regex=True) # 공백 제거
series = series.str.replace(pat=r'[\u3000]+', repl=r'', regex=True) # u3000 제거
return series
def get_token(self, title, tokenizer):
if len(title) == 0 or title == ' ': # 제목이 공백인 경우 tokenizer에러 발생
return []
result = tokenizer.analyze(title)
result = [(morph.lex, morph.tag) for split in result for morph in split.morphs] # (형태소, 품사) 튜플의 리스트
return result
def get_all_tags(self, df):
tag_list = df['tags'].values.tolist()
return tag_list
def get_all_ttls(self, df):
ttl_list = df['plylst_title'].values.tolist()
return ttl_list
def make_title_tokens(self, df):
tokenizer = KhaiiiApi()
df['plylst_title'] = self.re_sub(df['plylst_title'])
all_ttl = self.get_all_ttls(df)
token_ttl = [self.get_token(x, tokenizer) for x in all_ttl]
df['ttl_token'] = token_ttl
using_pos = ['NNG', 'SL', 'NNP', 'MAG', 'SN', 'XR']
df['ttl_token_freq'] = df['ttl_token'].map(lambda x: list(filter(lambda x: x[1] in using_pos, x)))
return df
def make_tag_tokens(self, df):
tokenizer = KhaiiiApi()
all_tag = self.get_all_tags(df)
token_tag = [[self.get_token(x, tokenizer) for x in lst] for lst in all_tag]
df.tag_token = token_tag
def to_one_list(x):
try:
return [e[0] for e in x]
except:
return []
df.tag_token = df.tag_token.apply(to_one_list)
using_pos = ['NNG', 'SL', 'NNP', 'MAG', 'SN', 'XR']
df['tag_token_freq'] = df['tag_token'].map(lambda x: list(filter(lambda x: x[1] in using_pos, x)))
return df
def tokenize(self, df):
tmp = self.make_title_tokens(df)
tmp2 = self.make_tag_tokens(tmp)
return tmp2
def save(self, df):
tokenized = self.tokenize(df)
tokenized.to_json('full_token.json', orient='records')