pytorch RNN 분류
데이터 로드(단순)
from __future__ import unicode_literals, print_function, division
from io import open
import glob
import os
import torch
def findFiles(path): return glob.glob(path)
#print(findFiles('data/names/*.txt'))
import unicodedata
import string
all_letters = string.ascii_letters + " .,;'"
n_letters = len(all_letters)
print(all_letters,n_letters)
# Turn a Unicode string to plain ASCII, thanks to https://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
and c in all_letters
)
print(unicodeToAscii('Ślusàrski'))
데이터 프리프로세싱(인명을 문자로 tensor로 변환)
# Build the category_lines dictionary, a list of names per language
category_lines = {}#
all_categories = []
# Read a file and split into lines
def readLines(filename):
lines = open(filename, encoding='utf-8').read().strip().split('
')
#print([unicodeToAscii(line) for line in lines])
return [unicodeToAscii(line) for line in lines]
for filename in findFiles('data/names/*.txt'):
category = os.path.splitext(os.path.basename(filename))[0]
all_categories.append(category)
lines = readLines(filename)# list
category_lines[category] = lines# , list
n_categories = len(all_categories)
print(all_categories)
# Find letter index from all_letters, e.g. "a" = 0
def letterToIndex(letter):# index
return all_letters.find(letter)
# Just for demonstration, turn a letter into a <1 ,n_letters> Tensor,one-hot
def letterToTensor(letter):
tensor = torch.zeros(1, n_letters)
tensor[0][letterToIndex(letter)] = 1# , index 1, 0
return tensor
# Turn a line into a ,
# or an array of one-hot letter vectors
def lineToTensor(line):
tensor = torch.zeros(len(line), 1, n_letters)
for li, letter in enumerate(line):# , letterToTensor(1*n_letters)
tensor[li][0][letterToIndex(letter)] = 1# line_length*1*n_letters
return tensor
print(letterToTensor('J'))
print(lineToTensor('Jones').size())
def categoryFromOutput(output):
top_n, top_i = output.topk(1)# (18 18 ),
category_i = top_i[0].item()# 18 index
return all_categories[category_i], category_i
print(categoryFromOutput(output))
import random
def randomChoice(l):
temp=l[random.randint(0, len(l) - 1)]
print(temp)
return temp
def randomTrainingExample():
category = randomChoice(all_categories)#
line = randomChoice(category_lines[category])#dict key category,value list, list
category_tensor = torch.tensor([all_categories.index(category)], dtype=torch.long)# index
line_tensor = lineToTensor(line)#
return category, line, category_tensor, line_tensor
for i in range(10):
category, line, category_tensor, line_tensor = randomTrainingExample()
print('category =', category, '/ line =', line)
( ):
category = Korean / line(random_name) = Cho
category = Portuguese / line(random_name) = Castro
category = Polish / line(random_name) = Niemczyk
category = Russian / line(random_name) = Yaminsky
category = Spanish / line(random_name) = Marti
category = Portuguese / line(random_name) = Esteves
category = Vietnamese / line(random_name) = Vinh
category = French / line(random_name) = Laurent
category = Italian / line(random_name) = Napoletani
category = Spanish / line(random_name) = Gallego
네트워크 구축
import torch.nn as nn
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.i2h = nn.Linear(input_size + hidden_size, hidden_size)
self.i2o = nn.Linear(input_size + hidden_size, output_size)
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, input, hidden):
combined = torch.cat((input, hidden), 1)#RNN ,combined= +
hidden = self.i2h(combined)
output = self.i2o(combined)
output = self.softmax(output)
return output, hidden
def initHidden(self):
return torch.zeros(1, self.hidden_size)
n_hidden = 128
rnn = RNN(n_letters, n_hidden, n_categories)
트레이닝 네트워크
learning_rate = 0.005 # If you set this too high, it might explode. If too low, it might not learn
def categoryFromOutput(output):
top_n, top_i = output.topk(1)# (18 18 ),
category_i = top_i[0].item()# 18 index
return all_categories[category_i], category_i
def train(category_tensor, line_tensor):
hidden = rnn.initHidden()
rnn.zero_grad()
for i in range(line_tensor.size()[0]):
output, hidden = rnn(line_tensor[i], hidden)# hidden RNN
loss = criterion(output, category_tensor)
loss.backward()
# Add parameters' gradients to their values, multiplied by learning rate
for p in rnn.parameters():
p.data.add_(-learning_rate, p.grad.data)
return output, loss.item()
########################
n_iters = 100000
print_every = 5000
# Keep track of losses for plotting
current_loss = 0
all_losses = []
for iter in range(1, n_iters + 1):
category, line, category_tensor, line_tensor = randomTrainingExample()#
output, loss = train(category_tensor, line_tensor)#
current_loss += loss# loss
# output, categoryFromOutput category, category
if iter % print_every == 0:
guess, guess_i = categoryFromOutput(output)
correct = '✓' if guess == category else '✗ (%s)' % category
print('%d %d%% (%s) %.4f %s / %s %s' % (iter, iter / n_iters * 100, timeSince(start), loss, line, guess, correct))
이 내용에 흥미가 있습니까?
현재 기사가 여러분의 문제를 해결하지 못하는 경우 AI 엔진은 머신러닝 분석(스마트 모델이 방금 만들어져 부정확한 경우가 있을 수 있음)을 통해 가장 유사한 기사를 추천합니다:
BM25를 사용한 클래식 토픽 모델링이 문서는 AI 기반 시맨틱 검색 플랫폼인 에 대한 자습서 시리즈의 일부입니다. 은 머신 러닝 워크플로를 실행하여 데이터를 변환하고 AI 기반 시맨틱 검색 애플리케이션을 구축합니다. 시맨틱 그래프는 임베딩 인스턴스에...
텍스트를 자유롭게 공유하거나 복사할 수 있습니다.하지만 이 문서의 URL은 참조 URL로 남겨 두십시오.
CC BY-SA 2.5, CC BY-SA 3.0 및 CC BY-SA 4.0에 따라 라이센스가 부여됩니다.