Skip to content
GitLab
Menu
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
Franck Dary
macaon
Commits
ffb2e08b
Commit
ffb2e08b
authored
Apr 18, 2021
by
Franck Dary
Browse files
Shuffle dataset on CPU to avoid CUDA sync error
parent
6d627fa1
Changes
1
Hide whitespace changes
Inline
Side-by-side
torch_modules/src/ConfigDataset.cpp
View file @
ffb2e08b
...
...
@@ -93,8 +93,8 @@ void ConfigDataset::Holder::reset()
loadedTensorIndex
=
0
;
nextIndexToGive
=
0
;
nbGiven
=
0
;
torch
::
load
(
loadedTensor
,
files
[
loadedTensorIndex
]
,
NeuralNetworkImpl
::
getDevice
()
);
loadedTensor
=
torch
::
index_select
(
loadedTensor
,
0
,
torch
::
randperm
(
loadedTensor
.
size
(
0
),
torch
::
TensorOptions
(
at
::
kLong
)
.
device
(
NeuralNetworkImpl
::
getDevice
())
))
;
torch
::
load
(
loadedTensor
,
files
[
loadedTensorIndex
]);
loadedTensor
=
torch
::
index_select
(
loadedTensor
,
0
,
torch
::
randperm
(
loadedTensor
.
size
(
0
),
torch
::
TensorOptions
(
at
::
kLong
)
)).
to
(
NeuralNetworkImpl
::
getDevice
());
}
c10
::
optional
<
std
::
tuple
<
torch
::
Tensor
,
torch
::
Tensor
,
std
::
string
>>
ConfigDataset
::
Holder
::
get_batch
(
std
::
size_t
batchSize
)
...
...
@@ -107,8 +107,8 @@ c10::optional<std::tuple<torch::Tensor,torch::Tensor,std::string>> ConfigDataset
if
(
loadedTensorIndex
>=
(
int
)
files
.
size
())
return
c10
::
optional
<
std
::
tuple
<
torch
::
Tensor
,
torch
::
Tensor
,
std
::
string
>>
();
nextIndexToGive
=
0
;
torch
::
load
(
loadedTensor
,
files
[
loadedTensorIndex
]
,
NeuralNetworkImpl
::
getDevice
()
);
loadedTensor
=
torch
::
index_select
(
loadedTensor
,
0
,
torch
::
randperm
(
loadedTensor
.
size
(
0
),
torch
::
TensorOptions
(
at
::
kLong
)
.
device
(
NeuralNetworkImpl
::
getDevice
())
))
;
torch
::
load
(
loadedTensor
,
files
[
loadedTensorIndex
]);
loadedTensor
=
torch
::
index_select
(
loadedTensor
,
0
,
torch
::
randperm
(
loadedTensor
.
size
(
0
),
torch
::
TensorOptions
(
at
::
kLong
)
)).
to
(
NeuralNetworkImpl
::
getDevice
());
}
int
nbElementsToGive
=
std
::
min
<
int
>
(
batchSize
,
loadedTensor
.
size
(
0
)
-
nextIndexToGive
);
...
...
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment