-2
超平面在神经网络和感知器的讨论中是什么?感知器中的超平面
下面的感知器是用超平面实现的。
perceptron_minin.m
(八度)
function [errors, sepplane ] = perceptron_minin (pclass , nclass)
sepplane = rand (1 , columns (pclass) + 1) - 0.5;
tset = [ones(rows(pclass), 1), pclass ; -ones(rows(nclass), 1), -nclass];
i = 1;
do
misind = tset * sepplane' < 0;
correction = sum (tset (misind , :), 1)/sqrt (i);
sepplane = sepplane + correction;
++i;
until (norm(sepplane) * 0.0005) - norm(correction) > 0 || i > 1000);
errors = mean(tset * sepplane' < 0);
dzeros = tvec(tlab == 1 , :);
dones = tvec(tlab == 2 , :);
perceptron(dzeros, dones)
end
但是,在下面的程序,所述逻辑被设计而无需使用超平面。
perceptron_test.m
(MATLAB)
bias = -1;
coeff = 0.7;
rand('state', sum(100 * clock));
weights = -1*2 .* rand(3,1);
train_iter = 10000;
train_data_count = 4;
test_data_count = 100;
%% training section
train_data = [ 0 0;
0 1;
1 0;
1 1];
class_labels = [0;
1;
1;
1];
bias_vector(1:train_data_count, 1) = bias;
train_data_biased = [bias_vector, train_data];
for i=1:train_iter
output = zeros(train_data_count,1);
for j=1:train_data_count
y = product(train_data_biased(j,:), weights);
output(j) = activ_func(y);
delta = class_labels(j) - output(j);
inc = train_data_biased(j,:) * (coeff * delta);
weights = weights + inc';
end
end
table(train_data(:,1), train_data(:,2), output, 'VariableNames', {'A' 'B' 'A_xor_B'})
%% test Section
test_data = randi([0 , 1], [test_data_count, 2]) +
(2 * rand(test_data_count,2) - 1)/20;
for i=1:test_data_count
y = bias*weights(1,1)+...
test_data(i,1)*weights(2,1)+...
test_data(i,2)*weights(3,1);
output(i) = 1/(1+exp(-y));
end
table(test_data(:,1),test_data(:,2), output,
'VariableNames',{'A' 'B' 'A_xor_B'})
现在,我有几个问题,
(1)是第一个源代码是否正确?
(2)如果YES,解释为什么他们都工作。
哪*是什么Hyperplane? –
@ScottHunter,'sepplane'实际上是超平面。第一个源代码中使用了这个逻辑在哪里? – anonymous