diff --git a/decoder/src/Beam.cpp b/decoder/src/Beam.cpp index 1b0aafae59b9d1f2146b0f14843382e28402361d..e39593c8aa3189cc4cbb0ba3a0be82043f0fff37 100644 --- a/decoder/src/Beam.cpp +++ b/decoder/src/Beam.cpp @@ -48,7 +48,7 @@ void Beam::update(ReadingMachine & machine, bool debug) elements[index].config.setAppliableTransitions(appliableTransitions); auto context = classifier.getNN()->extractContext(elements[index].config).back(); - auto neuralInput = torch::from_blob(context.data(), {(long)context.size()}, torch::TensorOptions(torch::kLong).device(NeuralNetworkImpl::device)); + auto neuralInput = torch::from_blob(context.data(), {(long)context.size()}, torch::TensorOptions(torch::kLong)).clone().to(NeuralNetworkImpl::device); auto prediction = classifier.isRegression() ? classifier.getNN()(neuralInput).squeeze(0) : torch::softmax(classifier.getNN()(neuralInput).squeeze(0), 0); float entropy = classifier.isRegression() ? 0.0 : NeuralNetworkImpl::entropy(prediction); diff --git a/trainer/src/Trainer.cpp b/trainer/src/Trainer.cpp index a85fff0b08800ba7cc565ca5d91a1e08ccac5aef..7f1aaec54deb294576221006fc9b5469f824f87c 100644 --- a/trainer/src/Trainer.cpp +++ b/trainer/src/Trainer.cpp @@ -93,7 +93,7 @@ void Trainer::extractExamples(std::vector<SubConfig> & configs, bool debug, std: if (dynamicOracle and util::choiceWithProbability(1.0) and config.getState() != "tokenizer" and config.getState() != "segmenter") { auto & classifier = *machine.getClassifier(config.getState()); - auto neuralInput = torch::from_blob(context.data(), {(long)context.size()}, torch::TensorOptions(torch::kLong).device(NeuralNetworkImpl::device)); + auto neuralInput = torch::from_blob(context.data(), {(long)context.size()}, torch::TensorOptions(torch::kLong)).clone().to(NeuralNetworkImpl::device); auto prediction = classifier.isRegression() ? classifier.getNN()(neuralInput).squeeze(0) : torch::softmax(classifier.getNN()(neuralInput).squeeze(0), 0); entropy = NeuralNetworkImpl::entropy(prediction);