import nltk
nltk.download('punkt')
from nltk import word_tokenize, bigrams
sentence = 'I am studying semantic network analysis.'
tokens = word_tokenize(sentence)
bgram = bigrams(tokens)
bgram_list = [x for x in bgram]
print(bgram_list)

from nltk import ConditionalFreqDist
sentences = ['I am studying semantic network analysis.', 'I am studing Machine Learning', 'i love coding']
tokens = [word_tokenize(x) for x in sentences]
bgrams = [bigrams(x) for x in tokens]
token = []
for i in bgrams:
token += ([x for x in i])
cfd = ConditionalFreqDist(token)
cfd.conditions()

token의 노드와 엣지는 다음과 같이 표현된다.



nx.degree_centrality(G)
nx.eigenvector_centrality(G, weight='weight')
nx.closeness_centrality(G, distance='weight')

nx.current_flow_betweenness_centrality(G)
nx.pagerank(G)