diff --git a/src/Examples.Utils/Examples.Utils.csproj b/src/Examples.Utils/Examples.Utils.csproj
index a542b181d..620d5b487 100644
--- a/src/Examples.Utils/Examples.Utils.csproj
+++ b/src/Examples.Utils/Examples.Utils.csproj
@@ -17,7 +17,7 @@
-
+
diff --git a/src/Examples/AdversarialExampleGeneration.cs b/src/Examples/AdversarialExampleGeneration.cs
index 7bfc174b2..8a21662d7 100644
--- a/src/Examples/AdversarialExampleGeneration.cs
+++ b/src/Examples/AdversarialExampleGeneration.cs
@@ -105,7 +105,9 @@ internal static void Main(string[] args)
private static Tensor Attack(Tensor image, double ε, Tensor data_grad)
{
using (var sign = data_grad.sign()) {
- var perturbed = (image + ε * sign).clamp(0.0, 1.0);
+ using var zero_scalar = 0.0.ToScalar();
+ using var one_scalar = 1.0.ToScalar();
+ var perturbed = (image + ε * sign).clamp(zero_scalar, one_scalar);
return perturbed;
}
}
diff --git a/src/Examples/SequenceToSequence.cs b/src/Examples/SequenceToSequence.cs
index 436c05a67..be7e9672b 100644
--- a/src/Examples/SequenceToSequence.cs
+++ b/src/Examples/SequenceToSequence.cs
@@ -234,10 +234,14 @@ public TransformerModel(long ntokens, long ninputs, long nheads, long nhidden, l
public Tensor GenerateSquareSubsequentMask(long size)
{
- var mask = (torch.ones(new long[] { size, size }) == 1).triu().transpose(0, 1);
+ using var zero_scalar = 0.ToScalar();
+ using var one_scalar = 1.ToScalar();
+ using var float_negative_infinity_scalar = float.NegativeInfinity.ToScalar();
+ using var float_zero_scalar = 0.0f.ToScalar(); // FIXME: Equivalent to zero_scalar?
+ var mask = (torch.ones(new long[] { size, size }) == one_scalar).triu().transpose(0, 1);
return mask.to_type(ScalarType.Float32)
- .masked_fill(mask == 0, float.NegativeInfinity)
- .masked_fill(mask == 1, 0.0f).to(device);
+ .masked_fill(mask == zero_scalar, float_negative_infinity_scalar)
+ .masked_fill(mask == one_scalar, float_zero_scalar).to(device);
}
private void InitWeights()
diff --git a/src/Native/LibTorchSharp/THSTensor.h b/src/Native/LibTorchSharp/THSTensor.h
index 0925cd4e0..86e603690 100644
--- a/src/Native/LibTorchSharp/THSTensor.h
+++ b/src/Native/LibTorchSharp/THSTensor.h
@@ -1220,6 +1220,10 @@ EXPORT_API(Tensor) THSTensor_sqrt(const Tensor tensor);
EXPORT_API(void) THSTensor_sqrt_(const Tensor tensor);
+EXPORT_API(Tensor) THSTensor_square(const Tensor tensor);
+
+EXPORT_API(void) THSTensor_square_(const Tensor tensor);
+
EXPORT_API(Tensor) THSTensor_std(const Tensor tensor, const bool unbiased);
EXPORT_API(Tensor) THSTensor_std_along_dimensions(const Tensor tensor, const int64_t* dimensions, int length, bool unbiased, bool keepdim);
diff --git a/src/Native/LibTorchSharp/THSTensorMath.cpp b/src/Native/LibTorchSharp/THSTensorMath.cpp
index b1f0e3a23..9d4ac75f1 100644
--- a/src/Native/LibTorchSharp/THSTensorMath.cpp
+++ b/src/Native/LibTorchSharp/THSTensorMath.cpp
@@ -910,6 +910,16 @@ void THSTensor_sqrt_(const Tensor tensor)
CATCH(tensor->sqrt_();)
}
+Tensor THSTensor_square(const Tensor tensor)
+{
+ CATCH_TENSOR(tensor->square());
+}
+
+void THSTensor_square_(const Tensor tensor)
+{
+ CATCH(tensor->square_();)
+}
+
Tensor THSTensor_sign(const Tensor tensor)
{
CATCH_TENSOR(tensor->sign());
diff --git a/src/TorchAudio/Functional.cs b/src/TorchAudio/Functional.cs
index fdbfb37be..0ebb0c479 100644
--- a/src/TorchAudio/Functional.cs
+++ b/src/TorchAudio/Functional.cs
@@ -73,14 +73,15 @@ public static torch.Tensor spectrogram(torch.Tensor waveform, long pad, torch.Te
spec_f = spec_f.reshape(spec_shape);
if (normalized) {
- spec_f /= window.pow(2.0).sum().sqrt();
+ spec_f /= window.square().sum().sqrt();
}
if (power.HasValue) {
if (power.Value == 1.0) {
spec_f = spec_f.abs();
} else {
- spec_f = spec_f.abs().pow(power.Value);
+ using var power_scalar = power.Value.ToScalar();
+ spec_f = spec_f.abs().pow(power_scalar); // FIXME: Call torch.Tensor.square if power.Value == 2.0?
}
}
@@ -112,7 +113,7 @@ public static torch.Tensor inverse_spectrogram(torch.Tensor spectrogram, long? l
using (var d = torch.NewDisposeScope()) {
if (normalized) {
- spectrogram = spectrogram * window.pow(2.0).sum().sqrt();
+ spectrogram = spectrogram * window.square().sum().sqrt();
}
// pack batch
@@ -180,23 +181,24 @@ public static Tensor griffinlim(Tensor specgram, Tensor window, long n_fft, long
throw new ArgumentOutOfRangeException($"momentum must be in range [0, 1). Found: {momentum}");
}
momentum = momentum / (1 + momentum);
+ var need_momentum = momentum > 0.0;
+ using var momentum_scalar = (need_momentum) ? momentum.ToScalar() : null;
// pack batch
var shape = specgram.size();
specgram = specgram.reshape(new long[] { -1, shape[shape.Length - 2], shape[shape.Length - 1] });
- specgram = specgram.pow(1 / power);
+ using var exponent_scalar = (1 / power).ToScalar();
+ specgram = specgram.pow(exponent_scalar); // FIXME: Use inplace ops? Skip if power == 1?
// initialize the phase
- Tensor angles;
- if (rand_init) {
- angles = torch.rand(specgram.size(), dtype: _get_complex_dtype(specgram.dtype), device: specgram.device);
- } else {
- angles = torch.full(specgram.size(), 1, dtype: _get_complex_dtype(specgram.dtype), device: specgram.device);
- }
+ var angles = (rand_init)
+ ? torch.rand(specgram.size(), dtype: _get_complex_dtype(specgram.dtype), device: specgram.device)
+ : torch.ones(specgram.size(), dtype: _get_complex_dtype(specgram.dtype), device: specgram.device);
// And initialize the previous iterate to 0
var tprev = torch.tensor(0.0, dtype: specgram.dtype, device: specgram.device);
+ using var eps_scalar = (1e-16).ToScalar();
for (int i = 0; i < n_iter; i++) {
// Invert with our current estimate of the phases
var inverse = torch.istft(
@@ -218,10 +220,10 @@ public static Tensor griffinlim(Tensor specgram, Tensor window, long n_fft, long
// Update our phase estimates
angles = rebuilt;
- if (momentum > 0.0) {
- angles = angles - tprev.mul_(momentum);
+ if (need_momentum) {
+ angles = angles - tprev.mul_(momentum_scalar!); // FIXME: Use inplace ops?
}
- angles = angles.div(angles.abs().add(1e-16));
+ angles = angles.div(angles.abs().add(eps_scalar));
// Store the previous iterate
tprev = rebuilt;
@@ -528,6 +530,8 @@ internal static (torch.Tensor, int) _get_sinc_resample_kernel(int orig_freq, int
if (lowpass_filter_width <= 0) {
throw new ArgumentOutOfRangeException();
}
+ using var min_scalar = (-lowpass_filter_width).ToScalar();
+ using var max_scalar = lowpass_filter_width.ToScalar();
var kernels_list = new List();
double base_freq = Math.Min(orig_freq, new_freq);
@@ -535,11 +539,14 @@ internal static (torch.Tensor, int) _get_sinc_resample_kernel(int orig_freq, int
var width = (int)Math.Ceiling(((double)lowpass_filter_width) * orig_freq / base_freq);
var idx_dtype = dtype ?? torch.float64;
- var idx = torch.arange(-width, width + orig_freq, device: device, dtype: idx_dtype);
+ using var start_scalar = (-width).ToScalar();
+ using var stop_scalar = (width + orig_freq).ToScalar();
+ var idx = torch.arange(start_scalar, stop_scalar, device: device, dtype: idx_dtype);
+ using var zero_scalar = 0.ToScalar();
for (int i = 0; i < new_freq; i++) {
var t = (-i / new_freq + idx / orig_freq) * base_freq;
- t = t.clamp_(-lowpass_filter_width, lowpass_filter_width);
+ t = t.clamp_(min_scalar, max_scalar);
torch.Tensor window;
if (resampling_method == ResamplingMethod.sinc_interpolation) {
@@ -554,13 +561,14 @@ internal static (torch.Tensor, int) _get_sinc_resample_kernel(int orig_freq, int
}
t *= Math.PI;
// Tensor.to(Tensor) of TorchSharp desn't change dtype.
- var kernel = torch.where(t == 0, torch.tensor(1.0).to(t).type_as(t), torch.sin(t) / t);
+ var kernel = torch.where(t == zero_scalar, torch.tensor(1.0).to(t).type_as(t), torch.sin(t) / t);
kernel.mul_(window);
kernels_list.Add(kernel);
}
var scale = ((double)base_freq) / orig_freq;
- var kernels = torch.stack(kernels_list.ToArray()).view(new_freq, 1, -1).mul_(scale);
+ using var scale_scalar = scale.ToScalar();
+ var kernels = torch.stack(kernels_list.ToArray()).view(new_freq, 1, -1).mul_(scale_scalar);
if (dtype == null) {
kernels = kernels.to(torch.float32);
}
diff --git a/src/TorchAudio/Modules/HuBERTPretrainModel.cs b/src/TorchAudio/Modules/HuBERTPretrainModel.cs
index 7c6e121c8..a0b84edae 100644
--- a/src/TorchAudio/Modules/HuBERTPretrainModel.cs
+++ b/src/TorchAudio/Modules/HuBERTPretrainModel.cs
@@ -91,7 +91,7 @@ public override (Tensor?, Tensor?, Tensor) forward(
if (this.feature_grad_mult != null && this.feature_grad_mult < 1.0) {
x = Wav2Vec2Model.GradMultiply.apply(x, this.feature_grad_mult.Value);
}
- var features_pen = x.@float().pow(2).mean();
+ var features_pen = x.@float().square().mean();
if (lengths is not null) {
padding_mask = Wav2Vec2Model._get_padding_mask(x, lengths);
} else {
diff --git a/src/TorchAudio/Modules/Tacotron2.cs b/src/TorchAudio/Modules/Tacotron2.cs
index edf3d06b4..1421a53a8 100644
--- a/src/TorchAudio/Modules/Tacotron2.cs
+++ b/src/TorchAudio/Modules/Tacotron2.cs
@@ -147,9 +147,11 @@ public override (Tensor, Tensor, Tensor, Tensor) forward(
mask = mask.expand(this.n_mels, mask.size(0), mask.size(1));
mask = mask.permute(1, 0, 2);
- mel_specgram = mel_specgram.masked_fill(mask, 0.0);
- mel_specgram_postnet = mel_specgram_postnet.masked_fill(mask, 0.0);
- gate_outputs = gate_outputs.masked_fill(mask[TensorIndex.Colon, 0, TensorIndex.Colon], 1e3);
+ using var zero_scalar = 0.0.ToScalar();
+ mel_specgram = mel_specgram.masked_fill(mask, zero_scalar);
+ mel_specgram_postnet = mel_specgram_postnet.masked_fill(mask, zero_scalar);
+ using var eps_scalar = 1e3.ToScalar();
+ gate_outputs = gate_outputs.masked_fill(mask[TensorIndex.Colon, 0, TensorIndex.Colon], eps_scalar);
}
return (mel_specgram, mel_specgram_postnet, gate_outputs, alignments);
@@ -334,7 +336,8 @@ public override (Tensor, Tensor) forward(
{
var alignment = this._get_alignment_energies(attention_hidden_state, processed_memory, attention_weights_cat);
- alignment = alignment.masked_fill(mask, this.score_mask_value);
+ using var score_mask_value_scalar = this.score_mask_value.ToScalar();
+ alignment = alignment.masked_fill(mask, score_mask_value_scalar);
var attention_weights = F.softmax(alignment, dim: 1);
var attention_context = torch.bmm(attention_weights.unsqueeze(1), memory);
diff --git a/src/TorchAudio/Transforms/InverseMelScale.cs b/src/TorchAudio/Transforms/InverseMelScale.cs
index fa3196d23..a3032e43c 100644
--- a/src/TorchAudio/Transforms/InverseMelScale.cs
+++ b/src/TorchAudio/Transforms/InverseMelScale.cs
@@ -96,18 +96,19 @@ public override Tensor forward(Tensor melspec)
learningRate: 0.1, momentum: 0.9);
var loss = float.PositiveInfinity;
+ using var zero_scalar = 0.ToScalar();
for (long i = 0; i < this.max_iter; i++) {
using var d2 = torch.NewDisposeScope();
optim.zero_grad();
var diff = melspec - specgram.matmul(this.fb);
- var new_loss = diff.pow(2).sum(dim: -1).mean();
+ var new_loss = diff.square().sum(dim: -1).mean();
// take sum over mel-frequency then average over other dimensions
// so that loss threshold is applied par unit timeframe
new_loss.backward();
optim.step();
using (torch.no_grad())
- specgram.set_(specgram.clamp(min: 0));
+ specgram.set_(specgram.clamp(min: zero_scalar));
var new_loss_value = new_loss.item();
if (new_loss_value < this.tolerance_loss || Math.Abs(loss - new_loss_value) < this.tolerance_change) {
@@ -117,7 +118,7 @@ public override Tensor forward(Tensor melspec)
}
specgram.requires_grad_(false);
- var specgram_tensor = specgram.clamp(min: 0).transpose(-1, -2);
+ var specgram_tensor = specgram.clamp(min: zero_scalar).transpose(-1, -2);
// unpack batch
shape[shape.Length - 2] = freq;
diff --git a/src/TorchSharp/Distributions/Beta.cs b/src/TorchSharp/Distributions/Beta.cs
index 67b4d0d6c..155ab21ba 100644
--- a/src/TorchSharp/Distributions/Beta.cs
+++ b/src/TorchSharp/Distributions/Beta.cs
@@ -28,7 +28,7 @@ public override Tensor variance {
get {
using var _ = NewDisposeScope();
var total = concentration0 + concentration1;
- return (concentration1 * concentration0 / (total.pow(2) * (total + 1))).MoveToOuterDisposeScope();
+ return (concentration1 * concentration0 / (total.square() * (total + 1))).MoveToOuterDisposeScope();
}
}
diff --git a/src/TorchSharp/Distributions/Cauchy.cs b/src/TorchSharp/Distributions/Cauchy.cs
index 516152c80..3017f53ce 100644
--- a/src/TorchSharp/Distributions/Cauchy.cs
+++ b/src/TorchSharp/Distributions/Cauchy.cs
@@ -78,7 +78,7 @@ public override Tensor rsample(params long[] sample_shape)
///
///
public override Tensor log_prob(Tensor value) =>
- WrappedTensorDisposeScope(() => -Math.Log(Math.PI) - scale.log() - (((value - loc) / scale).pow(2)).log1p());
+ WrappedTensorDisposeScope(() => -Math.Log(Math.PI) - scale.log() - (((value - loc) / scale).square()).log1p());
///
/// Returns entropy of distribution, batched over batch_shape.
diff --git a/src/TorchSharp/Distributions/Constraints.cs b/src/TorchSharp/Distributions/Constraints.cs
index 0c4aa26f5..59fe22892 100644
--- a/src/TorchSharp/Distributions/Constraints.cs
+++ b/src/TorchSharp/Distributions/Constraints.cs
@@ -137,8 +137,10 @@ public _OneHot() : base(true, 1) { }
public override Tensor check(Tensor value)
{
- var is_boolean = (value == 0) | (value == 1);
- var is_normalized = value.sum(-1).eq(1);
+ using var zero_scalar = 0.ToScalar();
+ using var one_scalar = 1.ToScalar();
+ var is_boolean = (value == zero_scalar) | (value == one_scalar);
+ var is_normalized = value.sum(-1).eq(one_scalar);
return is_boolean.all(-1) & is_normalized;
}
}
@@ -433,9 +435,9 @@ public _CorrCholesky() : base(false, 2) { }
public override Tensor check(Tensor value)
{
- var tol = torch.finfo(value.dtype).eps * value.size(-1) * 10; // 10 is an adjustable fudge factor
+ using var tol_scalar = (torch.finfo(value.dtype).eps * value.size(-1) * 10).ToScalar(); // 10 is an adjustable fudge factor
var row_norm = torch.linalg.norm(value.detach(), dims: new[] { -1L });
- var unit_row_norm = (row_norm - 1.0).abs().le(tol).all(dim: -1);
+ var unit_row_norm = (row_norm - 1.0).abs().le(tol_scalar).all(dim: -1);
return lc.check(value) & unit_row_norm;
}
@@ -489,7 +491,8 @@ public override Tensor check(Tensor value)
var sym_check = base.check(value);
if (!sym_check.all().item())
return sym_check;
- return torch.linalg.eigvalsh(value).ge(0).all(-1);
+ using var zero_scalar = 0.ToScalar();
+ return torch.linalg.eigvalsh(value).ge(zero_scalar).all(-1);
}
}
@@ -503,7 +506,8 @@ public override Tensor check(Tensor value)
var sym_check = base.check(value);
if (!sym_check.all().item())
return sym_check;
- return torch.linalg.cholesky_ex(value).info.eq(0);
+ using var zero_scalar = 0.ToScalar();
+ return torch.linalg.cholesky_ex(value).info.eq(zero_scalar);
}
}
diff --git a/src/TorchSharp/Distributions/Dirichlet.cs b/src/TorchSharp/Distributions/Dirichlet.cs
index 68fbbcfa8..a456824fa 100644
--- a/src/TorchSharp/Distributions/Dirichlet.cs
+++ b/src/TorchSharp/Distributions/Dirichlet.cs
@@ -25,9 +25,11 @@ public override Tensor mode
{
get {
using var _ = NewDisposeScope();
- var concentrationm1 = (concentration - 1).clamp(min: 0.0);
+ using var zero_scalar = 0.0.ToScalar();
+ var concentrationm1 = (concentration - 1).clamp(min: zero_scalar);
var mode = concentrationm1 / concentrationm1.sum(-1, true);
- var mask = (concentration < 1).all(dim: -1);
+ using var one_scalar = 1.ToScalar();
+ var mask = (concentration < one_scalar).all(dim: -1);
mode[mask] = torch.nn.functional.one_hot(mode[mask].argmax(dim: -1), concentrationm1.shape[concentrationm1.ndim-1]).to(mode);
return mode.MoveToOuterDisposeScope();
}
@@ -40,7 +42,7 @@ public override Tensor variance {
get {
using var _ = NewDisposeScope();
var con0 = concentration.sum(-1, true);
- return (concentration * (con0 - concentration) / (con0.pow(2) * (con0 + 1))).MoveToOuterDisposeScope();
+ return (concentration * (con0 - concentration) / (con0.square() * (con0 + 1))).MoveToOuterDisposeScope();
}
}
diff --git a/src/TorchSharp/Distributions/Distribution.cs b/src/TorchSharp/Distributions/Distribution.cs
index 1ee988899..c381a078b 100644
--- a/src/TorchSharp/Distributions/Distribution.cs
+++ b/src/TorchSharp/Distributions/Distribution.cs
@@ -183,10 +183,16 @@ protected Tensor ProbsToLogits(Tensor probs, bool isBinary = false)
protected Tensor ClampProbs(Tensor probs)
{
var eps = torch.finfo(probs.dtype).eps;
- return probs.clamp(eps, 1 - eps);
+ using var eps_scalar = eps.ToScalar();
+ using var eps_bar_scalar = (1 - eps).ToScalar();
+ return probs.clamp(eps_scalar, eps_bar_scalar);
}
- protected Tensor ClampByZero(Tensor x) => (x.clamp_min(0) + x - x.clamp_max(0)) / 2;
+ protected Tensor ClampByZero(Tensor x)
+ {
+ using var zero_scalar = 0.ToScalar();
+ return (x.clamp_min(zero_scalar) + x - x.clamp_max(zero_scalar)) / 2;
+ }
protected torch.Generator generator;
bool disposedValue;
diff --git a/src/TorchSharp/Distributions/ExpRelaxedCategorical.cs b/src/TorchSharp/Distributions/ExpRelaxedCategorical.cs
index 7c42c0548..964460149 100644
--- a/src/TorchSharp/Distributions/ExpRelaxedCategorical.cs
+++ b/src/TorchSharp/Distributions/ExpRelaxedCategorical.cs
@@ -134,7 +134,9 @@ public override Tensor log_prob(Tensor value)
var logitsValue = broadcast_tensors(_logits, value);
var logits = logitsValue[0];
value = logitsValue[1];
- var log_scale = (torch.full_like(_temperature, K).lgamma() - _temperature.log().mul(-(K - 1)));
+ using var K_scalar = K.ToScalar();
+ using var negative_Ksub1_scalar = (-(K - 1)).ToScalar();
+ var log_scale = torch.full_like(_temperature, K_scalar).lgamma() - _temperature.log().mul(negative_Ksub1_scalar); // FIXME: Use inplace ops?
var score = logits - value.mul(_temperature);
score = (score - score.logsumexp(dim: -1, keepdim: true)).sum(-1);
return (score + log_scale).MoveToOuterDisposeScope();
diff --git a/src/TorchSharp/Distributions/Exponential.cs b/src/TorchSharp/Distributions/Exponential.cs
index 93e28d589..21b512d7c 100644
--- a/src/TorchSharp/Distributions/Exponential.cs
+++ b/src/TorchSharp/Distributions/Exponential.cs
@@ -24,7 +24,7 @@ public class Exponential : torch.distributions.ExponentialFamily
///
/// The variance of the distribution
///
- public override Tensor variance => rate.pow(2);
+ public override Tensor variance => rate.square();
///
/// The standard deviation of the distribution
diff --git a/src/TorchSharp/Distributions/FisherSnedecor.cs b/src/TorchSharp/Distributions/FisherSnedecor.cs
index d9bc40a4e..91f9ae706 100644
--- a/src/TorchSharp/Distributions/FisherSnedecor.cs
+++ b/src/TorchSharp/Distributions/FisherSnedecor.cs
@@ -32,7 +32,7 @@ public override Tensor variance {
using var _ = torch.NewDisposeScope();
var df2 = this.df2.clone();
df2[df2 <= 4] = torch.tensor(float.NaN);
- return (2 * df2.pow(2) * (this.df1 + df2 - 2) / (this.df1 * (df2 - 2).pow(2) * (df2 - 4))).MoveToOuterDisposeScope();
+ return (2 * df2.square() * (this.df1 + df2 - 2) / (this.df1 * (df2 - 2).square() * (df2 - 4))).MoveToOuterDisposeScope();
}
}
@@ -80,11 +80,11 @@ public override Tensor rsample(params long[] sample_shape)
var X1 = gamma1.rsample(sample_shape).view(shape);
var X2 = gamma2.rsample(sample_shape).view(shape);
- var tiny = torch.finfo(X2.dtype).tiny;
- X2.clamp_(min: tiny);
+ using var tiny_scalar = torch.finfo(X2.dtype).tiny.ToScalar();
+ X2.clamp_(min: tiny_scalar);
var Y = X1 / X2;
- return Y.clamp_(min: tiny).MoveToOuterDisposeScope();
+ return Y.clamp_(min: tiny_scalar).MoveToOuterDisposeScope();
}
///
diff --git a/src/TorchSharp/Distributions/Gamma.cs b/src/TorchSharp/Distributions/Gamma.cs
index 9ee82a5e3..35cf34917 100644
--- a/src/TorchSharp/Distributions/Gamma.cs
+++ b/src/TorchSharp/Distributions/Gamma.cs
@@ -19,12 +19,15 @@ public class Gamma : torch.distributions.ExponentialFamily
///
public override Tensor mean => WrappedTensorDisposeScope(() => concentration / rate);
- public override Tensor mode => WrappedTensorDisposeScope(() => ((concentration - 1) / rate).clamp_(min: 0));
+ public override Tensor mode => WrappedTensorDisposeScope(() => {
+ using var zero_scalar = 0.ToScalar();
+ return ((concentration - 1) / rate).clamp_(min: zero_scalar);
+ });
///
/// The variance of the distribution
///
- public override Tensor variance => WrappedTensorDisposeScope(() => concentration / rate.pow(2));
+ public override Tensor variance => WrappedTensorDisposeScope(() => concentration / rate.square());
///
/// Constructor
@@ -62,7 +65,8 @@ public override Tensor rsample(params long[] sample_shape)
using var _ = torch.NewDisposeScope();
var shape = ExtendedShape(sample_shape);
var value = torch._standard_gamma(concentration.expand(shape), generator: generator) / rate.expand(shape);
- return value.detach().clamp_(min: torch.finfo(value.dtype).tiny).MoveToOuterDisposeScope();
+ using var tiny_scalar = torch.finfo(value.dtype).tiny.ToScalar();
+ return value.detach().clamp_(min: tiny_scalar).MoveToOuterDisposeScope();
}
///
diff --git a/src/TorchSharp/Distributions/GumbEL.cs b/src/TorchSharp/Distributions/GumbEL.cs
index 1ca1845f4..6efdbefda 100644
--- a/src/TorchSharp/Distributions/GumbEL.cs
+++ b/src/TorchSharp/Distributions/GumbEL.cs
@@ -30,7 +30,7 @@ internal Gumbel(Tensor loc, Tensor scale, Distribution base_distribution, torch.
public override Tensor mode => loc;
- public override Tensor variance => stddev.pow(2);
+ public override Tensor variance => stddev.square();
public override Tensor stddev => pioversqrtsix * scale;
diff --git a/src/TorchSharp/Distributions/HalfNormal.cs b/src/TorchSharp/Distributions/HalfNormal.cs
index 563d8d91e..526e21112 100644
--- a/src/TorchSharp/Distributions/HalfNormal.cs
+++ b/src/TorchSharp/Distributions/HalfNormal.cs
@@ -26,7 +26,7 @@ internal HalfNormal(Tensor scale, torch.Generator generator = null) :
public override Tensor mode => torch.zeros_like(scale);
- public override Tensor variance => scale.pow(2) * (1 - 2 / Math.PI);
+ public override Tensor variance => scale.square() * (1 - 2 / Math.PI);
public override Tensor log_prob(Tensor value)
{
diff --git a/src/TorchSharp/Distributions/Laplace.cs b/src/TorchSharp/Distributions/Laplace.cs
index 4ebb48053..b9b53f528 100644
--- a/src/TorchSharp/Distributions/Laplace.cs
+++ b/src/TorchSharp/Distributions/Laplace.cs
@@ -31,7 +31,7 @@ public class Laplace : torch.distributions.Distribution
///
/// The variance of the distribution
///
- public override Tensor variance => 2 * scale.pow(2);
+ public override Tensor variance => 2 * scale.square();
///
diff --git a/src/TorchSharp/Distributions/LogNormal.cs b/src/TorchSharp/Distributions/LogNormal.cs
index 944792a1e..2cd6e08d2 100644
--- a/src/TorchSharp/Distributions/LogNormal.cs
+++ b/src/TorchSharp/Distributions/LogNormal.cs
@@ -22,11 +22,11 @@ internal LogNormal(Tensor loc, Tensor scale, torch.Generator generator = null) :
public Tensor scale { get; private set; }
- public override Tensor mean => torch.WrappedTensorDisposeScope(() => (loc + scale.pow(2) / 2).exp());
+ public override Tensor mean => torch.WrappedTensorDisposeScope(() => (loc + scale.square() / 2).exp());
public override Tensor mode => torch.WrappedTensorDisposeScope(() => (loc - scale.square()).exp());
- public override Tensor variance => torch.WrappedTensorDisposeScope(() => (scale.pow(2).exp() - 1) * (2 * loc + scale.pow(2)).exp());
+ public override Tensor variance => torch.WrappedTensorDisposeScope(() => (scale.square().exp() - 1) * (2 * loc + scale.square()).exp());
protected override void Dispose(bool disposing)
{
diff --git a/src/TorchSharp/Distributions/MultiVariateNormal.cs b/src/TorchSharp/Distributions/MultiVariateNormal.cs
index 721c0a5de..1ccc0c437 100644
--- a/src/TorchSharp/Distributions/MultiVariateNormal.cs
+++ b/src/TorchSharp/Distributions/MultiVariateNormal.cs
@@ -34,7 +34,7 @@ public class MultivariateNormal : torch.distributions.Distribution
/// The variance of the distribution
///
public override Tensor variance =>
- WrappedTensorDisposeScope(() => _unbroadcasted_scale_tril.pow(2).sum(-1).expand(batch_shape + event_shape));
+ WrappedTensorDisposeScope(() => _unbroadcasted_scale_tril.square().sum(-1).expand(batch_shape + event_shape));
///
/// Constructor
@@ -241,7 +241,7 @@ private Tensor BatchMahalanobis(Tensor bL, Tensor bx)
var flat_x = bx.reshape(-1, flat_L.size(0), n);
var flat_x_swap = flat_x.permute(1, 2, 0);
- var M_swap = torch.linalg.solve_triangular(flat_L, flat_x_swap, upper: false).pow(2).sum(-2);
+ var M_swap = torch.linalg.solve_triangular(flat_L, flat_x_swap, upper: false).square().sum(-2);
var M = M_swap.t();
var permuted_M = M.reshape(TakeAllBut(bx.shape, 1));
diff --git a/src/TorchSharp/Distributions/NegativeBinomial.cs b/src/TorchSharp/Distributions/NegativeBinomial.cs
index 36506d620..4b4b737ff 100644
--- a/src/TorchSharp/Distributions/NegativeBinomial.cs
+++ b/src/TorchSharp/Distributions/NegativeBinomial.cs
@@ -26,7 +26,10 @@ public class NegativeBinomial : torch.distributions.Distribution
/// Mode of the negative binomial distribution.
///
public override Tensor mode =>
- WrappedTensorDisposeScope(() => ((total_count - 1) * logits.exp()).floor_().clamp(min: 0));
+ WrappedTensorDisposeScope(() => {
+ using var zero_scalar = 0.ToScalar();
+ return ((total_count - 1) * logits.exp()).floor_().clamp(min: zero_scalar);
+ });
///
/// The variance of the distribution
@@ -101,7 +104,8 @@ public override Tensor log_prob(Tensor value)
using var _ = NewDisposeScope();
var log_unnormalized_prob = (total_count * (-_logits).log_sigmoid() + value * logits.log_sigmoid());
var log_normalization = (-torch.lgamma(total_count + value) + torch.lgamma(1.0 + value) + torch.lgamma(total_count));
- log_normalization = log_normalization.masked_fill(total_count + value == 0, 0);
+ using var zero_scalar = 0.ToScalar();
+ log_normalization = log_normalization.masked_fill(total_count + value == zero_scalar, zero_scalar);
return (log_unnormalized_prob - log_normalization).MoveToOuterDisposeScope();
}
diff --git a/src/TorchSharp/Distributions/Normal.cs b/src/TorchSharp/Distributions/Normal.cs
index fb2f34aa1..921272210 100644
--- a/src/TorchSharp/Distributions/Normal.cs
+++ b/src/TorchSharp/Distributions/Normal.cs
@@ -31,7 +31,7 @@ public class Normal : distributions.Distribution
///
/// The variance of the distribution
///
- public override Tensor variance => scale.pow(2);
+ public override Tensor variance => scale.square();
///
/// Constructor
@@ -91,9 +91,9 @@ public override Tensor rsample(params long[] sample_shape)
public override Tensor log_prob(Tensor value)
{
using var _ = NewDisposeScope();
- var v = scale.pow(2);
+ var v = scale.square();
var log_scale = scale.log();
- return (-((value - loc).pow(2)) / (2 * v) - log_scale - Math.Log(Math.Sqrt(2 * Math.PI))).MoveToOuterDisposeScope();
+ return (-((value - loc).square()) / (2 * v) - log_scale - Math.Log(Math.Sqrt(2 * Math.PI))).MoveToOuterDisposeScope();
}
///
diff --git a/src/TorchSharp/Distributions/Pareto.cs b/src/TorchSharp/Distributions/Pareto.cs
index 2ff0e22f2..4ab27684a 100644
--- a/src/TorchSharp/Distributions/Pareto.cs
+++ b/src/TorchSharp/Distributions/Pareto.cs
@@ -25,7 +25,8 @@ internal Pareto(Tensor scale, Tensor alpha, Distribution base_distribution, torc
public override Tensor mean {
get {
using var _ = torch.NewDisposeScope();
- var a = alpha.clamp(min: 1);
+ using var one_scalar = 1.ToScalar();
+ var a = alpha.clamp(min: one_scalar);
return (a * scale / (a - 1)).MoveToOuterDisposeScope();
}
}
@@ -35,8 +36,9 @@ public override Tensor mean {
public override Tensor variance {
get {
using var _ = torch.NewDisposeScope();
- var a = alpha.clamp(min: 2);
- return (scale.pow(2) * a / ((a - 1).pow(2) * (a - 2))).MoveToOuterDisposeScope();
+ using var two_scalar = 2.ToScalar();
+ var a = alpha.clamp(min: two_scalar);
+ return (scale.square() * a / ((a - 1).square() * (a - 2))).MoveToOuterDisposeScope();
}
}
diff --git a/src/TorchSharp/Distributions/Transforms.cs b/src/TorchSharp/Distributions/Transforms.cs
index 48f58f31f..363ab9775 100644
--- a/src/TorchSharp/Distributions/Transforms.cs
+++ b/src/TorchSharp/Distributions/Transforms.cs
@@ -517,21 +517,25 @@ public class SigmoidTransform : Transform
public override bool bijective => true;
- protected internal override Tensor _sign() => 1;
+ protected internal override Tensor _sign() => torch.tensor(1);
protected internal override Tensor log_abs_det_jacobian(Tensor x, Tensor y) => -nn.functional.softplus(-x) - nn.functional.softplus(x);
protected internal override Tensor _call(Tensor x)
{
var finfo = torch.finfo(x.dtype);
- return torch.WrappedTensorDisposeScope(() => torch.clamp(torch.sigmoid(x), min: finfo.tiny, max: 1 - finfo.eps));
+ using var tiny_scalar = finfo.tiny.ToScalar();
+ using var eps_bar_scalar = (1 - finfo.eps).ToScalar();
+ return torch.WrappedTensorDisposeScope(() => torch.sigmoid(x).clamp(min: tiny_scalar, max: eps_bar_scalar));
}
protected internal override Tensor _inverse(Tensor y)
{
using var _ = torch.NewDisposeScope();
var finfo = torch.finfo(y.dtype);
- y = y.clamp(min: finfo.tiny, max: 1 - finfo.eps);
+ using var tiny_scalar = finfo.tiny.ToScalar();
+ using var eps_bar_scalar = (1 - finfo.eps).ToScalar();
+ y = y.clamp(min: tiny_scalar, max: eps_bar_scalar);
return (y.log() - (-y).log1p()).MoveToOuterDisposeScope();
}
}
diff --git a/src/TorchSharp/Distributions/Uniform.cs b/src/TorchSharp/Distributions/Uniform.cs
index 671ec0825..5dfa209ce 100644
--- a/src/TorchSharp/Distributions/Uniform.cs
+++ b/src/TorchSharp/Distributions/Uniform.cs
@@ -25,7 +25,7 @@ public class Uniform : torch.distributions.Distribution
/// The variance of the distribution
///
public override Tensor variance =>
- WrappedTensorDisposeScope(() => (high - low).pow(2) / 12);
+ WrappedTensorDisposeScope(() => (high - low).square() / 12);
///
/// Constructor
@@ -83,7 +83,9 @@ public override Tensor log_prob(Tensor value)
///
public override Tensor cdf(Tensor value)
{
- return torch.WrappedTensorDisposeScope(() => ((value - low) / (high - low)).clamp_(0, 1));
+ using var zero_scalar = 0.ToScalar();
+ using var one_scalar = 1.ToScalar();
+ return torch.WrappedTensorDisposeScope(() => ((value - low) / (high - low)).clamp_(zero_scalar, one_scalar));
}
///
diff --git a/src/TorchSharp/Distributions/Weibull.cs b/src/TorchSharp/Distributions/Weibull.cs
index 131b8ec64..13bcff3f2 100644
--- a/src/TorchSharp/Distributions/Weibull.cs
+++ b/src/TorchSharp/Distributions/Weibull.cs
@@ -44,7 +44,7 @@ protected override void Dispose(bool disposing)
public override Tensor variance =>
WrappedTensorDisposeScope(() =>
- scale.pow(2) * (torch.exp(torch.lgamma(1 + 2 * concentration_reciprocal)) - torch.exp(2 * torch.lgamma(1 + concentration_reciprocal)))
+ scale.square() * (torch.exp(torch.lgamma(1 + 2 * concentration_reciprocal)) - torch.exp(2 * torch.lgamma(1 + concentration_reciprocal)))
);
///
diff --git a/src/TorchSharp/NN/Activation/PReLU.cs b/src/TorchSharp/NN/Activation/PReLU.cs
index 2b48b4a6b..5f3133ed9 100644
--- a/src/TorchSharp/NN/Activation/PReLU.cs
+++ b/src/TorchSharp/NN/Activation/PReLU.cs
@@ -20,9 +20,9 @@ internal PReLU(long num_parameters, double init, Device? device = null, ScalarTy
{
this.init = init;
this.num_parameters = num_parameters;
-
- var w = torch.empty(num_parameters, device:device, dtype:dtype);
- w.fill_(init);
+
+ using var init_scalar = init.ToScalar();
+ var w = torch.full(num_parameters, init_scalar, device: device, dtype: dtype);
this.weight = new Parameter(w);
}
diff --git a/src/TorchSharp/NN/Losses.cs b/src/TorchSharp/NN/Losses.cs
index 5e514bef5..b90a00c05 100644
--- a/src/TorchSharp/NN/Losses.cs
+++ b/src/TorchSharp/NN/Losses.cs
@@ -999,10 +999,12 @@ public override Tensor forward(Tensor input, Tensor target, Tensor variance)
variance = variance.view(target.shape[0], -1);
if (variance.shape[1] != input.shape[1] && variance.shape[1] != 1) throw new ArgumentException("variance has the wrong shape");
- if ((variance < 0).any().cpu().item()) throw new ArgumentException("variance has negative entry/entries");
+ using var zero_scalar = 0.ToScalar();
+ if ((variance < zero_scalar).any().cpu().item()) throw new ArgumentException("variance has negative entry/entries");
using (var _ = torch.no_grad())
- variance = variance.clamp_min(eps);
+ using (var eps_scalar = eps.ToScalar())
+ variance = variance.clamp_min(eps_scalar);
var loss = 0.5 * (variance.log() + (input - target).square() / variance).view(input.shape[0], -1).sum(dim: stackalloc long[] { 1 });
diff --git a/src/TorchSharp/NN/Normalization/BatchNorm.cs b/src/TorchSharp/NN/Normalization/BatchNorm.cs
index 398eae63c..0bdc68f56 100644
--- a/src/TorchSharp/NN/Normalization/BatchNorm.cs
+++ b/src/TorchSharp/NN/Normalization/BatchNorm.cs
@@ -37,7 +37,8 @@ public override Tensor forward(Tensor input)
{
if (num_batches_tracked is not null)
{
- num_batches_tracked.add_(1);
+ using var one_scalar = 1.ToScalar(); // FIXME: Cache over training?
+ num_batches_tracked.add_(one_scalar);
exponential_average_factor = (this.momentum is null) ? (1.0 / (double)num_batches_tracked) : momentum.Value;
}
}
diff --git a/src/TorchSharp/Optimizers/ASGD.cs b/src/TorchSharp/Optimizers/ASGD.cs
index 260810aa0..4e9fb852c 100644
--- a/src/TorchSharp/Optimizers/ASGD.cs
+++ b/src/TorchSharp/Optimizers/ASGD.cs
@@ -140,6 +140,8 @@ public override Tensor step(Func closure = null)
var lambd = options.lambd.Value;
var alpha = options.alpha.Value;
var weight_decay = options.weight_decay.Value;
+ var need_weight_decay = weight_decay != 0;
+ using var weight_decay_scalar = weight_decay.ToScalar(); // FIXME: Omit if not need_weight_decay?
var t0 = options.t0.Value;
var lr = options.LearningRate.Value;
@@ -157,15 +159,19 @@ public override Tensor step(Func closure = null)
state.step += 1;
- grad = (weight_decay != 0)
- ? grad.add(param, alpha: weight_decay)
+ grad = (need_weight_decay)
+ ? grad.add(param, alpha: weight_decay_scalar)
: grad.alias();
- param.mul_(1 - lambd * state.eta);
- param.add_(grad, alpha: -state.eta);
+ var lambd_eta_bar = 1 - lambd * state.eta;
+ using var lambd_eta_bar_scalar = lambd_eta_bar.ToScalar();
+ param.mul_(lambd_eta_bar_scalar);
+ using var negative_eta_scalar = (-state.eta).ToScalar();
+ param.add_(grad, alpha: negative_eta_scalar);
if (state.mu != 1) {
- state.ax.add_(param.sub(state.ax).mul(state.mu));
+ using var mu_scalar = state.mu.ToScalar();
+ state.ax.add_(param.sub(state.ax).mul(mu_scalar));
} else {
state.ax.copy_(param);
}
diff --git a/src/TorchSharp/Optimizers/Adadelta.cs b/src/TorchSharp/Optimizers/Adadelta.cs
index 924dcb468..c0f10da21 100644
--- a/src/TorchSharp/Optimizers/Adadelta.cs
+++ b/src/TorchSharp/Optimizers/Adadelta.cs
@@ -129,10 +129,15 @@ public override Tensor step(Func closure = null)
var options = group.Options as Options;
var rho = options.rho.Value;
- var eps = options.eps.Value;
+ using var rho_scalar = rho.ToScalar();
+ using var rho_bar_scalar = (1 - rho).ToScalar();
+ using var eps_scalar = options.eps.Value.ToScalar();
var weight_decay = options.weight_decay.Value;
+ var need_weight_decay = (weight_decay != 0);
+ using var weight_decay_scalar = weight_decay.ToScalar(); // FIXME: Omit if not need_weight_decay?
var maximize = options.maximize.Value;
var lr = options.LearningRate.Value;
+ using var negative_lr_scalar = (-lr).ToScalar();
foreach (var param in group.Parameters) {
@@ -149,17 +154,17 @@ public override Tensor step(Func closure = null)
var square_avg = state.square_avg;
var acc_delta = state.acc_delta;
- grad = (weight_decay != 0)
- ? grad.add(param, alpha: weight_decay)
+ grad = (need_weight_decay)
+ ? grad.add(param, alpha: weight_decay_scalar)
: grad.alias();
- square_avg.mul_(rho).addcmul_(grad, grad, 1 - rho);
+ square_avg.mul_(rho_scalar).addcmul_(grad, grad, rho_bar_scalar);
- var std = square_avg.add(eps).sqrt_();
- var delta = acc_delta.add(eps).sqrt_().div_(std).mul_(grad);
+ var std = square_avg.add(eps_scalar).sqrt_();
+ var delta = acc_delta.add(eps_scalar).sqrt_().div_(std).mul_(grad);
- param.add_(delta, alpha: -lr);
- acc_delta.mul_(rho).addcmul_(delta, delta, 1 - rho);
+ param.add_(delta, alpha: negative_lr_scalar);
+ acc_delta.mul_(rho_scalar).addcmul_(delta, delta, rho_bar_scalar);
}
}, closure);
}
diff --git a/src/TorchSharp/Optimizers/Adagrad.cs b/src/TorchSharp/Optimizers/Adagrad.cs
index a4d4b70fc..2c9c9c8d4 100644
--- a/src/TorchSharp/Optimizers/Adagrad.cs
+++ b/src/TorchSharp/Optimizers/Adagrad.cs
@@ -139,9 +139,12 @@ public override Tensor step(Func closure = null)
var options = group.Options as Options;
var lr_decay = options.lr_decay.Value;
var weight_decay = options.weight_decay.Value;
- var eps = options.eps.Value;
- var initial_accumulator_value = options.initial_accumulator_value.Value;
+ var need_weight_decay = weight_decay != 0;
+ using var weight_decay_scalar = weight_decay.ToScalar(); // FIXME: Omit if not need_weight_decay?
+ using var eps_scalar = options.eps.Value.ToScalar();
+ var initial_accumulator_value = options.initial_accumulator_value.Value; // FIXME: Unused?
var lr = options.LearningRate.Value;
+ using var one_scalar = 1.ToScalar();
foreach (var param in group.Parameters) {
@@ -153,20 +156,19 @@ public override Tensor step(Func closure = null)
state.step += 1;
- if (weight_decay != 0) {
- grad = grad.add(param, alpha: weight_decay);
- }
+ if (need_weight_decay) grad = grad.add(param, alpha: weight_decay_scalar);
var clr = lr / (1 + (state.step - 1) * lr_decay);
+ using var negative_clr_scalar = (-clr).ToScalar();
if (grad.is_sparse)
throw new NotImplementedException("Adagrad optimization over sparse parameters");
if (torch.is_complex(grad))
throw new NotImplementedException("Adagrad optimization over complex parameters");
- state.sum.addcmul_(grad, grad, value: 1);
- var std = state.sum.sqrt().add_(eps);
- param.addcdiv_(grad, std, value: -clr);
+ state.sum.addcmul_(grad, grad, value: one_scalar);
+ var std = state.sum.sqrt().add_(eps_scalar);
+ param.addcdiv_(grad, std, value: negative_clr_scalar);
}
diff --git a/src/TorchSharp/Optimizers/Adam.cs b/src/TorchSharp/Optimizers/Adam.cs
index 2e04fc6ef..222a63439 100644
--- a/src/TorchSharp/Optimizers/Adam.cs
+++ b/src/TorchSharp/Optimizers/Adam.cs
@@ -154,10 +154,18 @@ public override Tensor step(Func closure = null)
var options = group.Options as Options;
var beta1 = options.beta1.Value;
var beta2 = options.beta2.Value;
+ using var beta1_scalar = beta1.ToScalar();
+ using var beta2_scalar = beta2.ToScalar();
+ var beta1_bar = 1 - beta1;
+ var beta2_bar = 1 - beta2;
+ using var beta1_bar_scalar = beta1_bar.ToScalar();
+ using var beta2_bar_scalar = beta2_bar.ToScalar();
var weight_decay = options.weight_decay.Value;
+ var need_weight_decay = weight_decay != 0;
+ using var weight_decay_scalar = weight_decay.ToScalar(); // FIXME: Omit if not need_weight_decay?
var amsgrad = options.amsgrad.Value;
var maximize = options.maximize.Value;
- var eps = options.eps.Value;
+ using var eps_scalar = options.eps.Value.ToScalar();
var lr = options.LearningRate.Value;
foreach (var param in group.Parameters) {
@@ -175,25 +183,24 @@ public override Tensor step(Func closure = null)
var bias_correction1 = 1 - Math.Pow(beta1, state.step);
var bias_correction2 = 1 - Math.Pow(beta2, state.step);
- if (weight_decay != 0) {
- grad = grad.add(param, alpha: weight_decay);
- }
+ if (need_weight_decay) grad = grad.add(param, alpha: weight_decay_scalar);
- state.exp_avg.mul_(beta1).add_(grad, alpha: 1 - beta1);
- state.exp_avg_sq.mul_(beta2).addcmul_(grad, grad.conj(), value: 1 - beta2);
+ state.exp_avg.mul_(beta1_scalar).add_(grad, alpha: beta1_bar_scalar);
+ state.exp_avg_sq.mul_(beta2_scalar).addcmul_(grad, grad.conj(), value: beta2_bar_scalar);
- Tensor denom = null;
+ Tensor denom = null; // FIXME: Need dispose?
if (amsgrad) {
var t0 = state.max_exp_avg_sq;
state.max_exp_avg_sq = torch.maximum(t0, state.exp_avg_sq).DetachFromDisposeScope();
t0.Dispose();
- denom = (state.max_exp_avg_sq.sqrt() / Math.Sqrt(bias_correction2)).add_(eps);
+ denom = (state.max_exp_avg_sq.sqrt() / Math.Sqrt(bias_correction2)).add_(eps_scalar);
} else {
- denom = (state.exp_avg_sq.sqrt() / Math.Sqrt(bias_correction2)).add_(eps);
+ denom = (state.exp_avg_sq.sqrt() / Math.Sqrt(bias_correction2)).add_(eps_scalar);
}
var step_size = lr / bias_correction1;
- param.addcdiv_(state.exp_avg, denom, value: -step_size);
+ using var negative_step_size_scalar = (-step_size).ToScalar();
+ param.addcdiv_(state.exp_avg, denom, value: negative_step_size_scalar);
}
}, closure);
}
diff --git a/src/TorchSharp/Optimizers/AdamW.cs b/src/TorchSharp/Optimizers/AdamW.cs
index 4f6c10d85..6dd77a2c3 100644
--- a/src/TorchSharp/Optimizers/AdamW.cs
+++ b/src/TorchSharp/Optimizers/AdamW.cs
@@ -154,11 +154,18 @@ public override Tensor step(Func closure = null)
var options = group.Options as Options;
var beta1 = options.beta1.Value;
var beta2 = options.beta2.Value;
+ using var beta1_scalar = beta1.ToScalar();
+ using var beta2_scalar = beta2.ToScalar();
+ var beta1_bar = 1 - beta1;
+ var beta2_bar = 1 - beta2;
+ using var beta1_bar_scalar = beta1_bar.ToScalar();
+ using var beta2_bar_scalar = beta2_bar.ToScalar();
var weight_decay = options.weight_decay.Value;
var amsgrad = options.amsgrad.Value;
var maximize = options.maximize.Value;
- var eps = options.eps.Value;
+ using var eps_scalar = options.eps.Value.ToScalar();
var lr = options.LearningRate.Value;
+ using var lr_weight_decay_bar_scalar = (1 - lr * weight_decay).ToScalar();
foreach (var param in group.Parameters) {
@@ -172,26 +179,28 @@ public override Tensor step(Func closure = null)
state.step += 1;
- param.mul_(1 - lr * weight_decay);
+ param.mul_(lr_weight_decay_bar_scalar);
var bias_correction1 = 1 - Math.Pow(beta1, state.step);
var bias_correction2 = 1 - Math.Pow(beta2, state.step);
- state.exp_avg.mul_(beta1).add_(grad, alpha: 1 - beta1);
- state.exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value: 1 - beta2);
+ state.exp_avg.mul_(beta1_scalar).add_(grad, alpha: beta1_bar_scalar);
+ state.exp_avg_sq.mul_(beta2_scalar).addcmul_(grad, grad, value: beta2_bar_scalar);
Tensor denom = null;
if (amsgrad) {
var t0 = state.max_exp_avg_sq;
state.max_exp_avg_sq = torch.maximum(t0, state.exp_avg_sq).DetachFromDisposeScope();
t0.Dispose();
- denom = (state.max_exp_avg_sq.sqrt() / Math.Sqrt(bias_correction2)).add_(eps);
+ denom = (state.max_exp_avg_sq.sqrt() / Math.Sqrt(bias_correction2)).add_(eps_scalar);
} else {
- denom = (state.exp_avg_sq.sqrt() / Math.Sqrt(bias_correction2)).add_(eps);
+ denom = (state.exp_avg_sq.sqrt() / Math.Sqrt(bias_correction2)).add_(eps_scalar);
}
var step_size = lr / bias_correction1;
- param.addcdiv_(state.exp_avg, denom, value: -step_size);
+ using var negative_step_size_scalar = (-step_size).ToScalar();
+ param.addcdiv_(state.exp_avg, denom, value: negative_step_size_scalar);
+ denom.Dispose();
}
}, closure);
}
diff --git a/src/TorchSharp/Optimizers/Adamax.cs b/src/TorchSharp/Optimizers/Adamax.cs
index e09ef9170..c54505070 100644
--- a/src/TorchSharp/Optimizers/Adamax.cs
+++ b/src/TorchSharp/Optimizers/Adamax.cs
@@ -142,8 +142,14 @@ public override Tensor step(Func closure = null)
var options = group.Options as Options;
var beta1 = options.beta1.Value;
var beta2 = options.beta2.Value;
- var eps = options.eps.Value;
+ using var beta1_scalar = beta1.ToScalar();
+ using var beta2_scalar = beta2.ToScalar();
+ var beta1_bar = 1 - beta1;
+ using var beta1_bar_scalar = beta1_bar.ToScalar();
+ using var eps_scalar = options.eps.Value.ToScalar();
var weight_decay = options.weight_decay.Value;
+ var need_weight_decay = weight_decay != 0;
+ using var weight_decay_scalar = weight_decay.ToScalar(); // FIXME: Omit if not need_weight_decay?
var lr = options.LearningRate.Value;
foreach (var param in group.Parameters) {
@@ -161,21 +167,22 @@ public override Tensor step(Func closure = null)
var exp_avg = state.exp_avg;
var exp_inf = state.exp_inf;
- grad = (weight_decay != 0)
- ? grad.add(param, alpha: weight_decay)
+ grad = (need_weight_decay)
+ ? grad.add(param, alpha: weight_decay_scalar)
: grad.alias();
- exp_avg.mul_(beta1).add_(grad, alpha: 1 - beta1);
+ exp_avg.mul_(beta1_scalar).add_(grad, alpha: beta1_bar_scalar);
var norm_buf = torch.cat(new Tensor[] {
- exp_inf.mul_(beta2).unsqueeze(0),
- grad.abs().add_(eps).unsqueeze_(0)
+ exp_inf.mul_(beta2_scalar).unsqueeze(0),
+ grad.abs().add_(eps_scalar).unsqueeze_(0)
}, 0);
- torch.amax(norm_buf, new long[] { 0 }, false, exp_inf);
+ torch.amax(norm_buf, new long[] { 0 }, false, exp_inf); // FIXME: CA1806?
var clr = lr / (1 - Math.Pow(beta1, state.step));
- param.addcdiv_(exp_avg, exp_inf, value: -clr);
+ using var negative_clr_scalar = (-clr).ToScalar();
+ param.addcdiv_(exp_avg, exp_inf, value: negative_clr_scalar);
}
}, closure);
}
diff --git a/src/TorchSharp/Optimizers/NAdam.cs b/src/TorchSharp/Optimizers/NAdam.cs
index 6118cc5d1..ed5add3e4 100644
--- a/src/TorchSharp/Optimizers/NAdam.cs
+++ b/src/TorchSharp/Optimizers/NAdam.cs
@@ -147,8 +147,16 @@ public override Tensor step(Func closure = null)
var options = group.Options as Options;
var beta1 = options.beta1.Value;
var beta2 = options.beta2.Value;
- var eps = options.eps.Value;
+ using var beta1_scalar = beta1.ToScalar();
+ using var beta2_scalar = beta2.ToScalar();
+ var beta1_bar = 1 - beta1;
+ var beta2_bar = 1 - beta2;
+ using var beta1_bar_scalar = beta1_bar.ToScalar();
+ using var beta2_bar_scalar = beta2_bar.ToScalar();
+ using var eps_scalar = options.eps.Value.ToScalar();
var weight_decay = options.weight_decay.Value;
+ var need_weight_decay = weight_decay != 0;
+ using var weight_decay_scalar = weight_decay.ToScalar(); // FIXME: Omit if not need_weight_decay?
var momentum_decay = options.momentum_decay.Value;
var lr = options.LearningRate.Value;
@@ -166,9 +174,10 @@ public override Tensor step(Func closure = null)
var exp_avg_sq = state.exp_avg_sq;
var bias_correction2 = 1 - Math.Pow(beta2, state.step);
+ using var bias_correction2_scalar = bias_correction2.ToScalar();
- grad = (weight_decay != 0)
- ? grad.add(param, alpha: weight_decay)
+ grad = (need_weight_decay)
+ ? grad.add(param, alpha: weight_decay_scalar)
: grad.alias();
var mu = beta1 * (1.0 - 0.5 * Math.Pow(0.96, state.step * momentum_decay));
@@ -177,13 +186,17 @@ public override Tensor step(Func closure = null)
var mu_product = state.mu_product * mu;
var mu_product_next = mu_product * mu_next;
- exp_avg.mul_(beta1).add_(grad, alpha: 1 - beta1);
- exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value: 1 - beta2);
+ exp_avg.mul_(beta1_scalar).add_(grad, alpha: beta1_bar_scalar);
+ exp_avg_sq.mul_(beta2_scalar).addcmul_(grad, grad, value: beta2_bar_scalar);
- var denom = exp_avg_sq.div(bias_correction2).sqrt_().add_(eps);
+ var denom = exp_avg_sq.div(bias_correction2_scalar).sqrt_().add_(eps_scalar); // FIXME: Need dispose?
- param.addcdiv_(grad, denom, value: -lr * (1 - mu) / (1 - mu_product));
- param.addcdiv_(exp_avg, denom, value: -lr * mu_next / (1 - mu_product_next));
+ var scaled_lr = lr * (1 - mu) / (1 - mu_product);
+ using var negative_scaled_scalar = (-scaled_lr).ToScalar();
+ param.addcdiv_(grad, denom, value: negative_scaled_scalar);
+ var scaled_lr_next = lr * mu_next / (1 - mu_product_next);
+ using var negative_scaled_lr_next_scalar = (-scaled_lr_next).ToScalar();
+ param.addcdiv_(exp_avg, denom, value: negative_scaled_lr_next_scalar);
state.mu_product = mu_product;
}
diff --git a/src/TorchSharp/Optimizers/RAdam.cs b/src/TorchSharp/Optimizers/RAdam.cs
index d64416196..ed12e328a 100644
--- a/src/TorchSharp/Optimizers/RAdam.cs
+++ b/src/TorchSharp/Optimizers/RAdam.cs
@@ -141,9 +141,19 @@ public override Tensor step(Func closure = null)
var options = group.Options as Options;
var beta1 = options.beta1.Value;
var beta2 = options.beta2.Value;
- var eps = options.eps.Value;
+ using var beta1_scalar = beta1.ToScalar();
+ using var beta2_scalar = beta2.ToScalar();
+ var beta1_bar = 1 - beta1;
+ var beta2_bar = 1 - beta2;
+ using var beta1_bar_scalar = beta1_bar.ToScalar();
+ using var beta2_bar_scalar = beta2_bar.ToScalar();
+ using var eps_scalar = options.eps.Value.ToScalar();
var weight_decay = options.weight_decay.Value;
+ var need_weight_decay = weight_decay != 0;
+ using var weight_decay_scalar = weight_decay.ToScalar(); // FIXME: Omit if not need_weight_decay?
var lr = options.LearningRate.Value;
+ using var lr_scalar = lr.ToScalar();
+ using var negative_one_scalar = (-1.0).ToScalar(); // FIXME: Use torch.Tensor.sub_ instead?
foreach (var param in group.Parameters) {
@@ -161,27 +171,27 @@ public override Tensor step(Func closure = null)
var bias_correction1 = 1 - Math.Pow(beta1, state.step);
var bias_correction2 = 1 - Math.Pow(beta2, state.step);
- grad = (weight_decay != 0)
- ? grad.add(param, alpha: weight_decay)
+ grad = (need_weight_decay)
+ ? grad.add(param, alpha: weight_decay_scalar)
: grad.alias();
- exp_avg.mul_(beta1).add_(grad, alpha: 1 - beta1);
- exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value: 1 - beta2);
+ exp_avg.mul_(beta1_scalar).add_(grad, alpha: beta1_bar_scalar);
+ exp_avg_sq.mul_(beta2_scalar).addcmul_(grad, grad, value: beta2_bar_scalar);
- var bias_corrected_exp_avg = exp_avg / bias_correction1;
+ var bias_corrected_exp_avg = exp_avg / bias_correction1; // FIXME: Need dispose?
var rho_inf = 2 / (1 - beta2) - 1;
var rho_t = rho_inf - 2 * state.step * Math.Pow(beta2, state.step) / bias_correction2;
- var t6 = bias_corrected_exp_avg * lr;
+ var t6 = bias_corrected_exp_avg.mul(lr_scalar); // FIXME: Need dispose?
if (rho_t > 5) {
var rect = Math.Sqrt((rho_t - 4) * (rho_t - 2) * rho_inf / ((rho_inf - 4) * (rho_inf - 2) * rho_t));
- var adaptive_lr = Math.Sqrt(bias_correction2) / exp_avg_sq.sqrt().add_(eps);
+ var adaptive_lr = Math.Sqrt(bias_correction2) / exp_avg_sq.sqrt().add_(eps_scalar); // FIXME: Need dispose?
- param.add_(t6 * lr * adaptive_lr * rect, alpha: -1.0);
+ param.add_(t6 * lr * adaptive_lr * rect, alpha: negative_one_scalar); // FIXME: Need dispose? Use inplace ops?
} else {
- param.add_(t6, alpha: -1.0);
+ param.add_(t6, alpha: negative_one_scalar);
}
}
}, closure);
diff --git a/src/TorchSharp/Optimizers/RMSprop.cs b/src/TorchSharp/Optimizers/RMSprop.cs
index 9bc77f95f..8333832bb 100644
--- a/src/TorchSharp/Optimizers/RMSprop.cs
+++ b/src/TorchSharp/Optimizers/RMSprop.cs
@@ -152,11 +152,20 @@ public override Tensor step(Func closure = null)
var options = group.Options as Options;
var maximize = options.maximize.Value;
var momentum = options.momentum.Value;
+ var need_momentum = momentum > 0;
+ using var momentum_scalar = momentum.ToScalar(); // FIXME: Omit if not need_momentum?
var alpha = options.alpha.Value;
+ var alpha_bar = 1 - alpha;
+ using var alpha_scalar = alpha.ToScalar();
+ using var alpha_bar_scalar = alpha_bar.ToScalar();
var weight_decay = options.weight_decay.Value;
+ var need_weight_decay = weight_decay != 0;
+ using var weight_decay_scalar = weight_decay.ToScalar(); // FIXME: Omit if not need_weight_decay?
var centered = options.centered.Value;
- var eps = options.eps.Value;
+ using var negative_one_scalar = (-1).ToScalar();
+ using var eps_scalar = options.eps.Value.ToScalar();
var lr = options.LearningRate.Value;
+ using var negative_lr_scalar = (-lr).ToScalar();
foreach (var param in group.Parameters) {
@@ -170,28 +179,26 @@ public override Tensor step(Func closure = null)
state.step += 1;
- if (weight_decay != 0) {
- grad = grad.add(param, alpha: weight_decay);
- }
+ if (need_weight_decay) grad = grad.add(param, alpha: weight_decay_scalar);
- state.square_avg.mul_(alpha).addcmul_(grad, grad, value: 1 - alpha);
+ state.square_avg.mul_(alpha_scalar).addcmul_(grad, grad, value: alpha_bar_scalar);
- Tensor avg = null;
+ Tensor avg = null; // FIXME: Need dispose?
if (centered) {
var grad_avg = state.grad_avg;
- grad_avg.mul_(alpha).add_(grad, alpha: 1 - alpha);
- avg = state.square_avg.addcmul(grad_avg, grad_avg, value: -1).sqrt_().add_(eps);
+ grad_avg.mul_(alpha_scalar).add_(grad, alpha: alpha_bar_scalar);
+ avg = state.square_avg.addcmul(grad_avg, grad_avg, value: negative_one_scalar).sqrt_().add_(eps_scalar);
} else {
- avg = state.square_avg.sqrt().add_(eps);
+ avg = state.square_avg.sqrt().add_(eps_scalar);
}
- if (momentum > 0) {
+ if (need_momentum) {
var buf = state.momentum_buffer;
- buf.mul_(momentum).addcdiv_(grad, avg);
- param.add_(buf, alpha: -lr);
+ buf.mul_(momentum_scalar).addcdiv_(grad, avg);
+ param.add_(buf, alpha: negative_lr_scalar);
} else {
- param.addcdiv_(grad, avg, -lr);
+ param.addcdiv_(grad, avg, negative_lr_scalar);
}
}
}, closure);
diff --git a/src/TorchSharp/Optimizers/Rprop.cs b/src/TorchSharp/Optimizers/Rprop.cs
index abe9d736e..6dae2a11a 100644
--- a/src/TorchSharp/Optimizers/Rprop.cs
+++ b/src/TorchSharp/Optimizers/Rprop.cs
@@ -136,11 +136,14 @@ public override Tensor step(Func closure = null)
var options = group.Options as Options;
var maximize = options.maximize.Value;
- var etaminus = options.etaminus.Value;
- var etaplus = options.etaplus.Value;
- var min_step = options.min_step.Value;
- var max_step = options.max_step.Value;
- var lr = options.LearningRate.Value;
+ using var etaminus_scalar = options.etaminus.Value.ToScalar();
+ using var etaplus_scalar = options.etaplus.Value.ToScalar();
+ using var min_step_scalar = options.min_step.Value.ToScalar();
+ using var max_step_scalar = options.max_step.Value.ToScalar();
+ var lr = options.LearningRate.Value; // FIXME: Unused?
+ using var zero_scalar = 0.ToScalar();
+ using var one_scalar = 1.ToScalar();
+ using var negative_one_scalar = (-1).ToScalar();
foreach (var param in group.Parameters) {
@@ -156,18 +159,18 @@ public override Tensor step(Func closure = null)
state.step += 1;
- var sign = grad.mul(state.prev).sign();
- sign[sign.gt(0)] = (Tensor)etaplus;
- sign[sign.lt(0)] = (Tensor)etaminus;
- sign[sign.eq(0)] = (Tensor)1;
+ var sign = grad.mul(state.prev).sign(); // FIXME: Use torch.Tensor.sign_?
+ sign.masked_fill_(sign.gt(zero_scalar), etaplus_scalar);
+ sign.masked_fill_(sign.lt(zero_scalar), etaminus_scalar);
+ sign.masked_fill_(sign.eq(zero_scalar), one_scalar);
- state.step_size.mul_(sign).clamp_(min_step, max_step);
+ state.step_size.mul_(sign).clamp_(min_step_scalar, max_step_scalar);
grad = grad.clone();
- grad.index_put_(0, sign.eq(etaminus));
+ grad.index_put_(zero_scalar, sign.eq(etaminus_scalar));
- param.addcmul_(grad.sign(), state.step_size, -1);
+ param.addcmul_(grad.sign(), state.step_size, negative_one_scalar);
state.prev.copy_(grad);
}
@@ -308,7 +311,7 @@ public override void Initialize(OptimizerOptions options)
this.step = 0;
this.prev = torch.zeros_like(_parameter).DetachFromDisposeScope();
- this.step_size = _parameter.new_empty(_parameter.shape).fill_((options as Options).LearningRate).DetachFromDisposeScope();
+ this.step_size = _parameter.new_empty(_parameter.shape).fill_((double)(options as Options).LearningRate!).DetachFromDisposeScope();
}
}
diff --git a/src/TorchSharp/Optimizers/SGD.cs b/src/TorchSharp/Optimizers/SGD.cs
index fed1f912b..580a4cfeb 100644
--- a/src/TorchSharp/Optimizers/SGD.cs
+++ b/src/TorchSharp/Optimizers/SGD.cs
@@ -137,14 +137,21 @@ public SGD(IEnumerable parameters, double lr, double momentum = 0.0,
public override Tensor step(Func closure = null)
{
return _step(group => {
-
+ #nullable enable
var options = group.Options;
- var momentum = options.momentum.Value;
- var dampening = options.dampening.Value;
- var weight_decay = options.weight_decay.Value;
- var nesterov = options.nesterov.Value;
- var maximize = options.maximize.Value;
- var lr = options.LearningRate.Value;
+ var momentum = options.momentum!.Value;
+ var need_momentum = momentum != 0;
+ using var momentum_scalar = (need_momentum) ? momentum.ToScalar() : null;
+ var dampening = options.dampening!.Value;
+ var need_dampening = dampening != 1;
+ using var dampening_bar_scalar = (need_momentum && need_dampening) ? (1 - dampening).ToScalar() : null;
+ var weight_decay = options.weight_decay!.Value;
+ var need_weight_decay = weight_decay != 0;
+ using var weight_decay_scalar = (need_weight_decay) ? weight_decay.ToScalar() : null;
+ var nesterov = options.nesterov!.Value;
+ var maximize = options.maximize!.Value;
+ var lr = options.LearningRate!.Value;
+ using var signed_lr_scalar = ((maximize) ? lr : -lr).ToScalar();
foreach (var param in group.Parameters) {
@@ -154,22 +161,21 @@ public override Tensor step(Func closure = null)
if (grad is null) continue;
- if (weight_decay != 0) {
- grad = grad.add(param, alpha: weight_decay);
- }
+ if (need_weight_decay) grad = grad.add(param, alpha: weight_decay_scalar!);
- if (momentum != 0) {
+ if (need_momentum) {
var buf = state.momentum_buffer;
if (buf is null) {
buf = grad.clone().detach().DetachFromDisposeScope();
state.momentum_buffer = buf;
} else {
- buf.mul_(momentum).add_(grad, alpha: (1 - dampening));
+ buf.mul_(momentum_scalar!);
+ if (need_dampening) buf.add_(grad, alpha: dampening_bar_scalar!);
}
if (nesterov) {
- grad = grad.add(buf, alpha: momentum);
+ grad = grad.add(buf, alpha: momentum_scalar!);
} else {
grad = buf;
}
@@ -177,10 +183,10 @@ public override Tensor step(Func closure = null)
state.momentum_buffer = buf;
}
- var alpha = maximize ? lr : -lr;
- param.add_(grad, alpha: alpha);
+ param.add_(grad, alpha: signed_lr_scalar);
}
+ #nullable disable
}, closure);
}
diff --git a/src/TorchSharp/PInvoke/LibTorchSharp.THSTensor.cs b/src/TorchSharp/PInvoke/LibTorchSharp.THSTensor.cs
index bb568ae68..9afcbd103 100644
--- a/src/TorchSharp/PInvoke/LibTorchSharp.THSTensor.cs
+++ b/src/TorchSharp/PInvoke/LibTorchSharp.THSTensor.cs
@@ -1917,6 +1917,9 @@ internal static extern IntPtr THSTensor_upsample_nearest3d(IntPtr input,
[DllImport("LibTorchSharp")]
internal static extern IntPtr THSTensor_sqrt(IntPtr tensor);
+ [DllImport("LibTorchSharp")]
+ internal static extern IntPtr THSTensor_square(IntPtr tensor);
+
[DllImport("LibTorchSharp")]
internal static extern IntPtr THSTensor_float_power(IntPtr tensor, IntPtr trg);
@@ -1998,6 +2001,9 @@ internal static extern IntPtr THSTensor_upsample_nearest3d(IntPtr input,
[DllImport("LibTorchSharp")]
internal static extern void THSTensor_sqrt_(IntPtr tensor);
+ [DllImport("LibTorchSharp")]
+ internal static extern void THSTensor_square_(IntPtr tensor);
+
[DllImport("LibTorchSharp")]
internal static extern IntPtr THSTensor_sign(IntPtr tensor);
diff --git a/src/TorchSharp/Scalar.cs b/src/TorchSharp/Scalar.cs
index cfe92cd98..4ca217f7c 100644
--- a/src/TorchSharp/Scalar.cs
+++ b/src/TorchSharp/Scalar.cs
@@ -5,6 +5,28 @@
#nullable enable
namespace TorchSharp
{
+ ///
+ /// Represents a leak detector for Scalar.
+ ///
+ public static partial class ScalarLeakDetector
+ {
+ ///
+ /// Allows implicit conversion from a .NET scalar value to Scalar.
+ /// FIXME: Declared true for default to be compatible to 0.105.1 or earlier.
+ ///
+ public static bool allowImplicitConversionOperator { get; set; } = true;
+ ///
+ /// Throws an exception if implicit conversion is not allowed.
+ ///
+ ///
+ public static void ThrowIfImplicitConversionNotAllowed()
+ {
+ if (!allowImplicitConversionOperator)
+ {
+ throw new InvalidCastException("Unexpected implicit conversion to Scalar.");
+ }
+ }
+ }
///
/// Represents a dynamically typed scalar value to the LibTorch runtime.
///
@@ -31,6 +53,7 @@ internal Scalar(IntPtr handle)
/// The scalar value.
public static implicit operator Scalar(byte value)
{
+ ScalarLeakDetector.ThrowIfImplicitConversionNotAllowed();
return value.ToScalar();
}
@@ -40,6 +63,7 @@ public static implicit operator Scalar(byte value)
/// The scalar value.
public static implicit operator Scalar(sbyte value)
{
+ ScalarLeakDetector.ThrowIfImplicitConversionNotAllowed();
return value.ToScalar();
}
@@ -49,6 +73,7 @@ public static implicit operator Scalar(sbyte value)
/// The scalar value.
public static implicit operator Scalar(short value)
{
+ ScalarLeakDetector.ThrowIfImplicitConversionNotAllowed();
return value.ToScalar();
}
@@ -58,6 +83,7 @@ public static implicit operator Scalar(short value)
/// The scalar value.
public static implicit operator Scalar(int value)
{
+ ScalarLeakDetector.ThrowIfImplicitConversionNotAllowed();
return value.ToScalar();
}
@@ -67,6 +93,7 @@ public static implicit operator Scalar(int value)
/// The scalar value.
public static implicit operator Scalar(long value)
{
+ ScalarLeakDetector.ThrowIfImplicitConversionNotAllowed();
return value.ToScalar();
}
@@ -77,6 +104,7 @@ public static implicit operator Scalar(long value)
/// The scalar value.
public static implicit operator Scalar(Half value)
{
+ ScalarLeakDetector.ThrowIfImplicitConversionNotAllowed();
return value.ToScalar();
}
#endif
@@ -87,6 +115,7 @@ public static implicit operator Scalar(Half value)
/// The scalar value.
public static implicit operator Scalar(float value)
{
+ ScalarLeakDetector.ThrowIfImplicitConversionNotAllowed();
return value.ToScalar();
}
@@ -96,6 +125,7 @@ public static implicit operator Scalar(float value)
/// The scalar value.
public static implicit operator Scalar(double value)
{
+ ScalarLeakDetector.ThrowIfImplicitConversionNotAllowed();
return value.ToScalar();
}
@@ -105,6 +135,7 @@ public static implicit operator Scalar(double value)
/// The scalar value.
public static implicit operator Scalar(bool value)
{
+ ScalarLeakDetector.ThrowIfImplicitConversionNotAllowed();
return value.ToScalar();
}
@@ -114,6 +145,7 @@ public static implicit operator Scalar(bool value)
/// The scalar value.
public static implicit operator Scalar((float, float) value)
{
+ ScalarLeakDetector.ThrowIfImplicitConversionNotAllowed();
return value.ToScalar();
}
@@ -123,6 +155,7 @@ public static implicit operator Scalar((float, float) value)
/// The scalar value.
public static implicit operator Scalar(System.Numerics.Complex value)
{
+ ScalarLeakDetector.ThrowIfImplicitConversionNotAllowed();
return value.ToScalar();
}
diff --git a/src/TorchSharp/Tensor/Factories/Tensor.Factories.cs b/src/TorchSharp/Tensor/Factories/Tensor.Factories.cs
index b306c0cd7..657803e58 100644
--- a/src/TorchSharp/Tensor/Factories/Tensor.Factories.cs
+++ b/src/TorchSharp/Tensor/Factories/Tensor.Factories.cs
@@ -117,8 +117,10 @@ public static Tensor eye(long rows, long columns = -1L, ScalarType? dtype = null
///
public static Tensor normal(double mean, double std, ReadOnlySpan size, ScalarType? dtype = null, Device? device = null, bool requires_grad = false, Generator? generator = null, string[]? names = null)
{
+ using var mean_scalar = mean.ToScalar();
+ using var std_scalar = std.ToScalar();
return randn(size, dtype, device, requires_grad: false, generator, names)
- .mul_(std).add_(mean).requires_grad_(requires_grad);
+ .mul_(std_scalar).add_(mean_scalar).requires_grad_(requires_grad);
}
///
diff --git a/src/TorchSharp/Tensor/Tensor.Math.cs b/src/TorchSharp/Tensor/Tensor.Math.cs
index fb7207638..e8342467c 100644
--- a/src/TorchSharp/Tensor/Tensor.Math.cs
+++ b/src/TorchSharp/Tensor/Tensor.Math.cs
@@ -55,7 +55,10 @@ public Tensor abs_()
///
public Tensor add(Tensor target)
{
- return add(target, 1);
+ // FIXME: Consider implement another THSTensor_add variant that takes no alpha?
+ // at::Tensor::add has default c10::Scalar alpha = 1.
+ using Scalar one_scalar = 1.ToScalar();
+ return add(target, one_scalar);
}
///
@@ -79,7 +82,10 @@ public Tensor add(Tensor target, Scalar alpha)
///
public Tensor add(Scalar scalar)
{
- return add(scalar, 1);
+ // FIXME: Consider implement another THSTensor_add_scalar variant that takes no alpha?
+ // at::Tensor::add has default c10::Scalar alpha = 1.
+ using Scalar one_scalar = 1.ToScalar();
+ return add(scalar, one_scalar);
}
///
@@ -103,7 +109,10 @@ public Tensor add(Scalar scalar, Scalar alpha)
///
public Tensor add_(Tensor target)
{
- return add_(target, 1);
+ // FIXME: Consider implement another THSTensor_add_ variant that takes no alpha?
+ // at::Tensor::add_ has default c10::Scalar alpha = 1.
+ using Scalar one_scalar = 1.ToScalar();
+ return add_(target, one_scalar);
}
///
@@ -126,7 +135,10 @@ public Tensor add_(Tensor target, Scalar alpha)
///
public Tensor add_(Scalar scalar)
{
- return add_(scalar, 1);
+ // FIXME: Consider implement another THSTensor_add_scalar_ variant that takes no alpha?
+ // at::Tensor::add_ has default c10::Scalar alpha = 1.
+ using Scalar one_scalar = 1.ToScalar();
+ return add_(scalar, one_scalar);
}
///
@@ -200,7 +212,10 @@ public Tensor addcdiv(Tensor tensor1, Tensor tensor2, Scalar value)
///
public Tensor addcdiv(Tensor tensor1, Tensor tensor2)
{
- return addcdiv(tensor1, tensor2, 1);
+ // FIXME: Consider implement another THSTensor_addcdiv variant that takes no value?
+ // at::Tensor::addcdiv has default c10::Scalar value = 1.
+ using Scalar one_scalar = 1.ToScalar();
+ return addcdiv(tensor1, tensor2, one_scalar);
}
///
@@ -225,7 +240,10 @@ public Tensor addcdiv_(Tensor tensor1, Tensor tensor2, Scalar value)
///
public Tensor addcdiv_(Tensor tensor1, Tensor tensor2)
{
- return addcdiv_(tensor1, tensor2, 1);
+ // FIXME: Consider implement another THSTensor_addcdiv variant that takes no value?
+ // at::Tensor::addcdiv has default c10::Scalar value = 1.
+ using Scalar one_scalar = 1.ToScalar();
+ return addcdiv_(tensor1, tensor2, one_scalar);
}
///
@@ -1542,12 +1560,6 @@ public Tensor rsqrt_()
return this;
}
- ///
- /// Computes the element-wise square
- ///
- ///
- public Tensor square() => pow(2);
-
///
/// Computes the element-wise square root
///
@@ -1570,6 +1582,28 @@ public Tensor sqrt_()
return this;
}
+ ///
+ /// Computes the element-wise square
+ ///
+ ///
+ public Tensor square()
+ {
+ var res = THSTensor_square(Handle);
+ if (res == IntPtr.Zero) { CheckForErrors(); }
+ return new Tensor(res);
+ }
+
+ ///
+ /// Computes the element-wise square, in place
+ ///
+ ///
+ public Tensor square_()
+ {
+ THSTensor_square_(Handle);
+ CheckForErrors();
+ return this;
+ }
+
///
/// Returns a new tensor with the signs (-1, 0, 1) of the elements of input.
///
diff --git a/src/TorchSharp/Tensor/Tensor.Operators.cs b/src/TorchSharp/Tensor/Tensor.Operators.cs
index 31e8f7449..bd12e56d6 100644
--- a/src/TorchSharp/Tensor/Tensor.Operators.cs
+++ b/src/TorchSharp/Tensor/Tensor.Operators.cs
@@ -12,66 +12,209 @@ public partial class Tensor
public static Tensor operator +(Tensor left, Scalar right) => left.add(right);
public static Tensor operator +(Scalar left, Tensor right) => right.add(left);
- public static Tensor operator +(Tensor left, int right) => left.add(right);
- public static Tensor operator +(Tensor left, long right) => left.add(right);
- public static Tensor operator +(Tensor left, float right) => left.add(right);
- public static Tensor operator +(Tensor left, double right) => left.add(right);
-
- public static Tensor operator +(int left, Tensor right) => right.add(left);
- public static Tensor operator +(long left, Tensor right) => right.add(left);
- public static Tensor operator +(float left, Tensor right) => right.add(left);
- public static Tensor operator +(double left, Tensor right) => right.add(left);
+ public static Tensor operator +(Tensor left, int right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left + right_scalar;
+ }
+ public static Tensor operator +(Tensor left, long right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left + right_scalar;
+ }
+ public static Tensor operator +(Tensor left, float right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left + right_scalar;
+ }
+ public static Tensor operator +(Tensor left, double right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left + right_scalar;
+ }
+
+ public static Tensor operator +(int left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar + right;
+ }
+ public static Tensor operator +(long left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar + right;
+ }
+ public static Tensor operator +(float left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar + right;
+ }
+ public static Tensor operator +(double left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar + right;
+ }
public static Tensor operator *(Tensor left, Tensor right) => left.mul(right);
public static Tensor operator *(Tensor left, Scalar right) => left.mul(right);
public static Tensor operator *(Scalar left, Tensor right) => right.mul(left);
- public static Tensor operator *(Tensor left, int right) => left.mul(right);
- public static Tensor operator *(Tensor left, long right) => left.mul(right);
- public static Tensor operator *(Tensor left, float right) => left.mul(right);
- public static Tensor operator *(Tensor left, double right) => left.mul(right);
-
- public static Tensor operator *(int left, Tensor right) => right.mul(left);
- public static Tensor operator *(long left, Tensor right) => right.mul(left);
- public static Tensor operator *(float left, Tensor right) => right.mul(left);
- public static Tensor operator *(double left, Tensor right) => right.mul(left);
+ public static Tensor operator *(Tensor left, int right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left * right_scalar;
+ }
+ public static Tensor operator *(Tensor left, long right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left * right_scalar;
+ }
+ public static Tensor operator *(Tensor left, float right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left * right_scalar;
+ }
+ public static Tensor operator *(Tensor left, double right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left * right_scalar;
+ }
+
+ public static Tensor operator *(int left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar * right;
+ }
+ public static Tensor operator *(long left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar * right;
+ }
+ public static Tensor operator *(float left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar * right;
+ }
+ public static Tensor operator *(double left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar * right;
+ }
public static Tensor operator -(Tensor left, Tensor right) => left.sub(right);
public static Tensor operator -(Tensor left, Scalar right) => left.sub(right);
public static Tensor operator -(Scalar left, Tensor right) => right.negative().add(left);
- public static Tensor operator -(Tensor left, int right) => left.sub(right);
- public static Tensor operator -(Tensor left, long right) => left.sub(right);
- public static Tensor operator -(Tensor left, float right) => left.sub(right);
- public static Tensor operator -(Tensor left, double right) => left.sub(right);
-
- public static Tensor operator -(int left, Tensor right) => right.negative().add(left);
- public static Tensor operator -(long left, Tensor right) => right.negative().add(left);
- public static Tensor operator -(float left, Tensor right) => right.negative().add(left);
- public static Tensor operator -(double left, Tensor right) => right.negative().add(left);
+ public static Tensor operator -(Tensor left, int right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left - right_scalar;
+ }
+ public static Tensor operator -(Tensor left, long right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left - right_scalar;
+ }
+ public static Tensor operator -(Tensor left, float right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left - right_scalar;
+ }
+ public static Tensor operator -(Tensor left, double right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left - right_scalar;
+ }
+
+ public static Tensor operator -(int left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar - right;
+ }
+ public static Tensor operator -(long left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar - right;
+ }
+ public static Tensor operator -(float left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar - right;
+ }
+ public static Tensor operator -(double left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar - right;
+ }
public static Tensor operator /(Tensor left, Tensor right) => left.div(right);
public static Tensor operator /(Tensor left, Scalar right) => left.div(right);
public static Tensor operator /(Scalar left, Tensor right) => right.reciprocal().mul(left);
- public static Tensor operator /(Tensor left, int right) => left.div(right);
- public static Tensor operator /(Tensor left, long right) => left.div(right);
- public static Tensor operator /(Tensor left, float right) => left.div(right);
- public static Tensor operator /(Tensor left, double right) => left.div(right);
-
- public static Tensor operator /(int left, Tensor right) => right.reciprocal().mul(left);
- public static Tensor operator /(long left, Tensor right) => right.reciprocal().mul(left);
- public static Tensor operator /(float left, Tensor right) => right.reciprocal().mul(left);
- public static Tensor operator /(double left, Tensor right) => right.reciprocal().mul(left);
-
+ public static Tensor operator /(Tensor left, int right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left / right_scalar;
+ }
+ public static Tensor operator /(Tensor left, long right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left / right_scalar;
+ }
+ public static Tensor operator /(Tensor left, float right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left / right_scalar;
+ }
+ public static Tensor operator /(Tensor left, double right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left / right_scalar;
+ }
+
+ public static Tensor operator /(int left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar / right;
+ }
+ public static Tensor operator /(long left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar / right;
+ }
+ public static Tensor operator /(float left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar / right;
+ }
+ public static Tensor operator /(double left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar / right;
+ }
public static Tensor operator %(Tensor left, Tensor right) => left.remainder(right);
public static Tensor operator %(Tensor left, Scalar right) => left.remainder(right);
- public static Tensor operator %(Tensor left, int right) => left.remainder(right);
- public static Tensor operator %(Tensor left, long right) => left.remainder(right);
- public static Tensor operator %(Tensor left, float right) => left.remainder(right);
- public static Tensor operator %(Tensor left, double right) => left.remainder(right);
+ public static Tensor operator %(Tensor left, int right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left % right_scalar;
+ }
+ public static Tensor operator %(Tensor left, long right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left % right_scalar;
+ }
+ public static Tensor operator %(Tensor left, float right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left % right_scalar;
+ }
+ public static Tensor operator %(Tensor left, double right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left % right_scalar;
+ }
public static Tensor operator &(Tensor left, Tensor right) => left.bitwise_and(right);
diff --git a/src/TorchSharp/Tensor/Tensor.cs b/src/TorchSharp/Tensor/Tensor.cs
index c17995a52..a7fc52f61 100644
--- a/src/TorchSharp/Tensor/Tensor.cs
+++ b/src/TorchSharp/Tensor/Tensor.cs
@@ -14,6 +14,24 @@
#nullable enable
namespace TorchSharp
{
+ public static partial class TensorLeakDetector
+ {
+ ///
+ /// Allows implicit conversion to torch.Tensor.
+ /// FIXME: Declared true for default to be compatible to 0.105.1 or earlier.
+ ///
+ public static bool allowImplicitConversionOperator { get; set; } = true;
+ ///
+ /// Throws an exception if implicit conversion is not allowed.
+ ///
+ ///
+ public static void ThrowIfImplicitConversionNotAllowed()
+ {
+ if (!allowImplicitConversionOperator) {
+ throw new InvalidCastException("Unexpected implicit conversion to torch.Tensor.");
+ }
+ }
+ }
public static partial class torch
{
///
@@ -652,6 +670,31 @@ public Tensor fill_(Scalar value)
return this;
}
+ ///
+ public Tensor fill_(byte value) { using var value_scalar = value.ToScalar(); return fill_(value_scalar); }
+ ///
+ public Tensor fill_(sbyte value) { using var value_scalar = value.ToScalar(); return fill_(value_scalar); }
+ ///
+ public Tensor fill_(short value) { using var value_scalar = value.ToScalar(); return fill_(value_scalar); }
+ ///
+ public Tensor fill_(int value) { using var value_scalar = value.ToScalar(); return fill_(value_scalar); }
+ ///
+ public Tensor fill_(long value) { using var value_scalar = value.ToScalar(); return fill_(value_scalar); }
+#if NET6_0_OR_GREATER
+ ///
+ public Tensor fill_(Half value) { using var value_scalar = value.ToScalar(); return fill_(value_scalar); }
+#endif
+ ///
+ public Tensor fill_(float value) { using var value_scalar = value.ToScalar(); return fill_(value_scalar); }
+ ///
+ public Tensor fill_(double value) { using var value_scalar = value.ToScalar(); return fill_(value_scalar); }
+ ///
+ public Tensor fill_(bool value) { using var value_scalar = value.ToScalar(); return fill_(value_scalar); }
+ ///
+ public Tensor fill_((float, float) value) { using var value_scalar = value.ToScalar(); return fill_(value_scalar); }
+ ///
+ public Tensor fill_(System.Numerics.Complex value) { using var value_scalar = value.ToScalar(); return fill_(value_scalar); }
+
///
/// Gets the type of the tensor elements.
///
@@ -1665,6 +1708,31 @@ public Tensor index_put_(Scalar value, params TensorIndex[] indices)
}
}
+ ///
+ public Tensor index_put_(byte value, params TensorIndex[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); }
+ ///
+ public Tensor index_put_(sbyte value, params TensorIndex[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); }
+ ///
+ public Tensor index_put_(short value, params TensorIndex[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); }
+ ///
+ public Tensor index_put_(int value, params TensorIndex[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); }
+ ///
+ public Tensor index_put_(long value, params TensorIndex[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); }
+#if NET6_0_OR_GREATER
+ ///
+ public Tensor index_put_(Half value, params TensorIndex[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); }
+#endif
+ ///
+ public Tensor index_put_(float value, params TensorIndex[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); }
+ ///
+ public Tensor index_put_(double value, params TensorIndex[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); }
+ ///
+ public Tensor index_put_(bool value, params TensorIndex[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); }
+ ///
+ public Tensor index_put_((float, float) value, params TensorIndex[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); }
+ ///
+ public Tensor index_put_(System.Numerics.Complex value, params TensorIndex[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); }
+
///
/// Index into the tensor using Python-like indexing expressions and place a scalar tensor at the index.
///
@@ -1673,6 +1741,31 @@ public Tensor index_put_(Scalar value, params Tensor[] indices)
return index_put_(value, indices.Select(t => TensorIndex.Tensor(t)).ToArray());
}
+ ///
+ public Tensor index_put_(byte value, params Tensor[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); }
+ ///
+ public Tensor index_put_(sbyte value, params Tensor[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); }
+ ///
+ public Tensor index_put_(short value, params Tensor[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); }
+ ///
+ public Tensor index_put_(int value, params Tensor[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); }
+ ///
+ public Tensor index_put_(long value, params Tensor[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); }
+#if NET6_0_OR_GREATER
+ ///
+ public Tensor index_put_(Half value, params Tensor[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); }
+#endif
+ ///
+ public Tensor index_put_(float value, params Tensor[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); }
+ ///
+ public Tensor index_put_(double value, params Tensor[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); }
+ ///
+ public Tensor index_put_(bool value, params Tensor[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); }
+ ///
+ public Tensor index_put_((float, float) value, params Tensor[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); }
+ ///
+ public Tensor index_put_(System.Numerics.Complex value, params Tensor[] indices) { using var value_scalar = value.ToScalar(); return index_put_(value_scalar, indices); }
+
///
/// Returns a new tensor which indexes the input tensor along dimension dim using the entries in index which is a LongTensor.
///
@@ -1791,6 +1884,31 @@ public Tensor index_add(long dim, Tensor index, Tensor source, Scalar alpha)
return new Tensor(res);
}
+ ///
+ public Tensor index_add(long dim, Tensor index, Tensor source, byte alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add(dim, index, source, alpha_scalar); }
+ ///
+ public Tensor index_add(long dim, Tensor index, Tensor source, sbyte alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add(dim, index, source, alpha_scalar); }
+ ///
+ public Tensor index_add(long dim, Tensor index, Tensor source, short alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add(dim, index, source, alpha_scalar); }
+ ///
+ public Tensor index_add(long dim, Tensor index, Tensor source, int alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add(dim, index, source, alpha_scalar); }
+ ///
+ public Tensor index_add(long dim, Tensor index, Tensor source, long alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add(dim, index, source, alpha_scalar); }
+#if NET6_0_OR_GREATER
+ ///
+ public Tensor index_add(long dim, Tensor index, Tensor source, Half alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add(dim, index, source, alpha_scalar); }
+#endif
+ ///
+ public Tensor index_add(long dim, Tensor index, Tensor source, float alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add(dim, index, source, alpha_scalar); }
+ ///
+ public Tensor index_add(long dim, Tensor index, Tensor source, double alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add(dim, index, source, alpha_scalar); }
+ ///
+ public Tensor index_add(long dim, Tensor index, Tensor source, bool alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add(dim, index, source, alpha_scalar); } // FIXME: Well defined?
+ ///
+ public Tensor index_add(long dim, Tensor index, Tensor source, (float, float) alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add(dim, index, source, alpha_scalar); }
+ ///
+ public Tensor index_add(long dim, Tensor index, Tensor source, System.Numerics.Complex alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add(dim, index, source, alpha_scalar); }
+
///
/// Accumulate, in place, the elements of alpha times source into the input tensor by adding to the indices in the order given in index.
///
@@ -1811,6 +1929,31 @@ public Tensor index_add_(long dim, Tensor index, Tensor source, Scalar alpha)
return this;
}
+ ///
+ public Tensor index_add_(long dim, Tensor index, Tensor source, byte alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add_(dim, index, source, alpha_scalar); }
+ ///
+ public Tensor index_add_(long dim, Tensor index, Tensor source, sbyte alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add_(dim, index, source, alpha_scalar); }
+ ///
+ public Tensor index_add_(long dim, Tensor index, Tensor source, short alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add_(dim, index, source, alpha_scalar); }
+ ///
+ public Tensor index_add_(long dim, Tensor index, Tensor source, int alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add_(dim, index, source, alpha_scalar); }
+ ///
+ public Tensor index_add_(long dim, Tensor index, Tensor source, long alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add_(dim, index, source, alpha_scalar); }
+#if NET6_0_OR_GREATER
+ ///
+ public Tensor index_add_(long dim, Tensor index, Tensor source, Half alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add_(dim, index, source, alpha_scalar); }
+#endif
+ ///
+ public Tensor index_add_(long dim, Tensor index, Tensor source, float alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add_(dim, index, source, alpha_scalar); }
+ ///
+ public Tensor index_add_(long dim, Tensor index, Tensor source, double alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add_(dim, index, source, alpha_scalar); }
+ ///
+ public Tensor index_add_(long dim, Tensor index, Tensor source, bool alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add_(dim, index, source, alpha_scalar); } // FIXME: Well defined?
+ ///
+ public Tensor index_add_(long dim, Tensor index, Tensor source, (float, float) alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add_(dim, index, source, alpha_scalar); }
+ ///
+ public Tensor index_add_(long dim, Tensor index, Tensor source, System.Numerics.Complex alpha) { using var alpha_scalar = alpha.ToScalar(); return index_add_(dim, index, source, alpha_scalar); }
+
///
/// Copies the elements of the source tensor into the input tensor by selecting the indices in the order given in index.
///
@@ -1870,6 +2013,31 @@ public Tensor index_fill(long dim, Tensor index, Scalar value)
return new Tensor(res);
}
+ ///
+ public Tensor index_fill(long dim, Tensor index, byte value) { using var value_scalar = value.ToScalar(); return index_fill(dim, index, value_scalar); }
+ ///
+ public Tensor index_fill(long dim, Tensor index, sbyte value) { using var value_scalar = value.ToScalar(); return index_fill(dim, index, value_scalar); }
+ ///
+ public Tensor index_fill(long dim, Tensor index, short value) { using var value_scalar = value.ToScalar(); return index_fill(dim, index, value_scalar); }
+ ///
+ public Tensor index_fill(long dim, Tensor index, int value) { using var value_scalar = value.ToScalar(); return index_fill(dim, index, value_scalar); }
+ ///
+ public Tensor index_fill(long dim, Tensor index, long value) { using var value_scalar = value.ToScalar(); return index_fill(dim, index, value_scalar); }
+#if NET6_0_OR_GREATER
+ ///
+ public Tensor index_fill(long dim, Tensor index, Half value) { using var value_scalar = value.ToScalar(); return index_fill(dim, index, value_scalar); }
+#endif
+ ///
+ public Tensor index_fill(long dim, Tensor index, float value) { using var value_scalar = value.ToScalar(); return index_fill(dim, index, value_scalar); }
+ ///
+ public Tensor index_fill(long dim, Tensor index, double value) { using var value_scalar = value.ToScalar(); return index_fill(dim, index, value_scalar); }
+ ///
+ public Tensor index_fill(long dim, Tensor index, bool value) { using var value_scalar = value.ToScalar(); return index_fill(dim, index, value_scalar); }
+ ///
+ public Tensor index_fill(long dim, Tensor index, (float, float) value) { using var value_scalar = value.ToScalar(); return index_fill(dim, index, value_scalar); }
+ ///
+ public Tensor index_fill(long dim, Tensor index, System.Numerics.Complex value) { using var value_scalar = value.ToScalar(); return index_fill(dim, index, value_scalar); }
+
///
/// Fills, in place, the elements of the input tensor with value value by selecting the indices in the order given in index.
///
@@ -1889,6 +2057,31 @@ public Tensor index_fill_(long dim, Tensor index, Scalar value)
return this;
}
+ ///
+ public Tensor index_fill_(long dim, Tensor index, byte value) { using var value_scalar = value.ToScalar(); return index_fill_(dim, index, value_scalar); }
+ ///
+ public Tensor index_fill_(long dim, Tensor index, sbyte value) { using var value_scalar = value.ToScalar(); return index_fill_(dim, index, value_scalar); }
+ ///
+ public Tensor index_fill_(long dim, Tensor index, short value) { using var value_scalar = value.ToScalar(); return index_fill_(dim, index, value_scalar); }
+ ///
+ public Tensor index_fill_(long dim, Tensor index, int value) { using var value_scalar = value.ToScalar(); return index_fill_(dim, index, value_scalar); }
+ ///
+ public Tensor index_fill_(long dim, Tensor index, long value) { using var value_scalar = value.ToScalar(); return index_fill_(dim, index, value_scalar); }
+#if NET6_0_OR_GREATER
+ ///
+ public Tensor index_fill_(long dim, Tensor index, Half value) { using var value_scalar = value.ToScalar(); return index_fill_(dim, index, value_scalar); }
+#endif
+ ///
+ public Tensor index_fill_(long dim, Tensor index, float value) { using var value_scalar = value.ToScalar(); return index_fill_(dim, index, value_scalar); }
+ ///
+ public Tensor index_fill_(long dim, Tensor index, double value) { using var value_scalar = value.ToScalar(); return index_fill_(dim, index, value_scalar); }
+ ///
+ public Tensor index_fill_(long dim, Tensor index, bool value) { using var value_scalar = value.ToScalar(); return index_fill_(dim, index, value_scalar); }
+ ///
+ public Tensor index_fill_(long dim, Tensor index, (float, float) value) { using var value_scalar = value.ToScalar(); return index_fill_(dim, index, value_scalar); }
+ ///
+ public Tensor index_fill_(long dim, Tensor index, System.Numerics.Complex value) { using var value_scalar = value.ToScalar(); return index_fill_(dim, index, value_scalar); }
+
///
/// Returns a tensor with the same data and number of elements as the input tensor but with the specified shape.
///
@@ -2211,6 +2404,21 @@ public Tensor threshold(Scalar threshold, Scalar value)
return new Tensor(res);
}
+ // FIXME: Consider in cases of threshold and value are not same typed?
+ public Tensor threshold(byte threshold, byte value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return this.threshold(threshold_scalar, value_scalar); }
+ public Tensor threshold(sbyte threshold, sbyte value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return this.threshold(threshold_scalar, value_scalar); }
+ public Tensor threshold(short threshold, short value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return this.threshold(threshold_scalar, value_scalar); }
+ public Tensor threshold(int threshold, int value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return this.threshold(threshold_scalar, value_scalar); }
+ public Tensor threshold(long threshold, long value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return this.threshold(threshold_scalar, value_scalar); }
+#if NET6_0_OR_GREATER
+ public Tensor threshold(Half threshold, Half value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return this.threshold(threshold_scalar, value_scalar); }
+#endif
+ public Tensor threshold(float threshold, float value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return this.threshold(threshold_scalar, value_scalar); }
+ public Tensor threshold(double threshold, double value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return this.threshold(threshold_scalar, value_scalar); }
+ public Tensor threshold(bool threshold, bool value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return this.threshold(threshold_scalar, value_scalar); } // FIXME: Well defined?
+ public Tensor threshold((float, float) threshold, (float, float) value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return this.threshold(threshold_scalar, value_scalar); } // FIXME: Well defined?
+ public Tensor threshold(System.Numerics.Complex threshold, System.Numerics.Complex value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return this.threshold(threshold_scalar, value_scalar); } // FIXME: Well defined?
+
public Tensor threshold_(Scalar threshold, Scalar value)
{
NativeMethods.THSTensor_threshold_(Handle, threshold.Handle, value.Handle);
@@ -2218,6 +2426,21 @@ public Tensor threshold_(Scalar threshold, Scalar value)
return this;
}
+ // FIXME: Consider in cases of threshold and value are not same typed?
+ public Tensor threshold_(byte threshold, byte value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return threshold_(threshold_scalar, value_scalar); }
+ public Tensor threshold_(sbyte threshold, sbyte value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return threshold_(threshold_scalar, value_scalar); }
+ public Tensor threshold_(short threshold, short value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return threshold_(threshold_scalar, value_scalar); }
+ public Tensor threshold_(int threshold, int value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return threshold_(threshold_scalar, value_scalar); }
+ public Tensor threshold_(long threshold, long value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return threshold_(threshold_scalar, value_scalar); }
+#if NET6_0_OR_GREATER
+ public Tensor threshold_(Half threshold, Half value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return threshold_(threshold_scalar, value_scalar); }
+#endif
+ public Tensor threshold_(float threshold, float value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return threshold_(threshold_scalar, value_scalar); }
+ public Tensor threshold_(double threshold, double value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return threshold_(threshold_scalar, value_scalar); }
+ public Tensor threshold_(bool threshold, bool value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return threshold_(threshold_scalar, value_scalar); } // FIXME: Well defined?
+ public Tensor threshold_((float, float) threshold, (float, float) value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return threshold_(threshold_scalar, value_scalar); } // FIXME: Well defined?
+ public Tensor threshold_(System.Numerics.Complex threshold, System.Numerics.Complex value) { using var threshold_scalar = threshold.ToScalar(); using var value_scalar = value.ToScalar(); return threshold_(threshold_scalar, value_scalar); } // FIXME: Well defined?
+
///
/// Returns a view of the tensor conjugated and with the last two dimensions transposed.
///
@@ -2746,11 +2969,7 @@ public Tensor positive()
public Tensor softmax(long dim, ScalarType? dtype = null) =>
torch.special.softmax(this, dim, dtype);
-
- public Tensor softplus(double beta = 1, double threshold = 20) =>
- softplus1(beta, threshold);
-
- private Tensor softplus1(Scalar beta, Scalar threshold)
+ public Tensor softplus(Scalar beta, Scalar threshold) // FIXME: No default beta and threshold?
{
var res = NativeMethods.THSTensor_softplus(Handle, beta.Handle, threshold.Handle);
if (res == IntPtr.Zero)
@@ -2758,6 +2977,22 @@ private Tensor softplus1(Scalar beta, Scalar threshold)
return new Tensor(res);
}
+ // FIXME: Consider in cases of beta and threshold are not same typed?
+ public Tensor softplus(byte beta = 1, byte threshold = 20) { using var beta_scalar = beta.ToScalar(); using var threshold_scalar = threshold.ToScalar(); return softplus(beta_scalar, threshold_scalar); } // FIXME: Well defined?
+ public Tensor softplus(sbyte beta = 1, sbyte threshold = 20) { using var beta_scalar = beta.ToScalar(); using var threshold_scalar = threshold.ToScalar(); return softplus(beta_scalar, threshold_scalar); } // FIXME: Well defined?
+ public Tensor softplus(short beta = 1, short threshold = 20) { using var beta_scalar = beta.ToScalar(); using var threshold_scalar = threshold.ToScalar(); return softplus(beta_scalar, threshold_scalar); } // FIXME: Well defined?
+ public Tensor softplus(int beta = 1, int threshold = 20) { using var beta_scalar = beta.ToScalar(); using var threshold_scalar = threshold.ToScalar(); return softplus(beta_scalar, threshold_scalar); } // FIXME: Well defined?
+ public Tensor softplus(long beta = 1, long threshold = 20) { using var beta_scalar = beta.ToScalar(); using var threshold_scalar = threshold.ToScalar(); return softplus(beta_scalar, threshold_scalar); } // FIXME: Well defined?
+#if NET6_0_OR_GREATER
+ public Tensor softplus(Half beta, Half threshold) { using var beta_scalar = beta.ToScalar(); using var threshold_scalar = threshold.ToScalar(); return softplus(beta_scalar, threshold_scalar); } // FIXME: No default beta and threshold?
+#endif
+ public Tensor softplus(float beta = 1, float threshold = 20) { using var beta_scalar = beta.ToScalar(); using var threshold_scalar = threshold.ToScalar(); return softplus(beta_scalar, threshold_scalar); }
+ public Tensor softplus(double beta = 1, double threshold = 20) { using var beta_scalar = beta.ToScalar(); using var threshold_scalar = threshold.ToScalar(); return softplus(beta_scalar, threshold_scalar); }
+ public Tensor softplus(bool beta, bool threshold) { using var beta_scalar = beta.ToScalar(); using var threshold_scalar = threshold.ToScalar(); return softplus(beta_scalar, threshold_scalar); } // FIXME: Well defined? No default beta and threshold?
+ public Tensor softplus((float, float) beta, (float, float) threshold) { using var beta_scalar = beta.ToScalar(); using var threshold_scalar = threshold.ToScalar(); return softplus(beta_scalar, threshold_scalar); } // FIXME: Well defined? No default beta and threshold?
+ public Tensor softplus(System.Numerics.Complex beta, System.Numerics.Complex threshold) { using var beta_scalar = beta.ToScalar(); using var threshold_scalar = threshold.ToScalar(); return softplus(beta_scalar, threshold_scalar); } // FIXME: Well defined? No default beta and threshold?
+ public Tensor softplus() => softplus(1.0, 20.0);
+
public Tensor ravel()
{
var res = NativeMethods.THSTensor_ravel(Handle);
@@ -2816,10 +3051,6 @@ public Tensor rrelu_(double lower = one_eighth, double upper = one_third)
return this;
}
- public Tensor celu() => this.celu(1.0);
-
- public Tensor celu_() => this.celu_(1.0);
-
public Tensor celu(Scalar alpha)
{
var res = NativeMethods.THSTensor_celu(Handle, alpha.Handle);
@@ -2828,6 +3059,21 @@ public Tensor celu(Scalar alpha)
return new Tensor(res);
}
+ public Tensor celu(byte alpha = 1) { using var alpha_scalar = alpha.ToScalar(); return celu(alpha_scalar); }
+ public Tensor celu(sbyte alpha = 1) { using var alpha_scalar = alpha.ToScalar(); return celu(alpha_scalar); }
+ public Tensor celu(short alpha = 1) { using var alpha_scalar = alpha.ToScalar(); return celu(alpha_scalar); }
+ public Tensor celu(int alpha = 1) { using var alpha_scalar = alpha.ToScalar(); return celu(alpha_scalar); }
+ public Tensor celu(long alpha = 1) { using var alpha_scalar = alpha.ToScalar(); return celu(alpha_scalar); }
+#if NET6_0_OR_GREATER
+ public Tensor celu(Half alpha) { using var alpha_scalar = alpha.ToScalar(); return celu(alpha_scalar); } // FIXME: No default alpha?
+#endif
+ public Tensor celu(float alpha = 1) { using var alpha_scalar = alpha.ToScalar(); return celu(alpha_scalar); }
+ public Tensor celu(double alpha = 1) { using var alpha_scalar = alpha.ToScalar(); return celu(alpha_scalar); }
+ public Tensor celu(bool alpha) { using var alpha_scalar = alpha.ToScalar(); return celu(alpha_scalar); } // FIXME: Well defined? No default alpha?
+ public Tensor celu((float, float) alpha) { using var alpha_scalar = alpha.ToScalar(); return celu(alpha_scalar); } // FIXME: Well defined? No default alpha?
+ public Tensor celu(System.Numerics.Complex alpha) { using var alpha_scalar = alpha.ToScalar(); return celu(alpha_scalar); } // FIXME: Well defined? No default alpha?
+ public Tensor celu() => celu(1.0);
+
public Tensor celu_(Scalar alpha)
{
NativeMethods.THSTensor_celu_(Handle, alpha.Handle);
@@ -2835,9 +3081,20 @@ public Tensor celu_(Scalar alpha)
return this;
}
- public Tensor elu(double alpha = 1) => elu(alpha, 1.0, 1.0);
-
- public Tensor elu_(double alpha = 1) => elu(alpha, 1.0, 1.0);
+ public Tensor celu_(byte alpha = 1) { using var alpha_scalar = alpha.ToScalar(); return celu_(alpha_scalar); }
+ public Tensor celu_(sbyte alpha = 1) { using var alpha_scalar = alpha.ToScalar(); return celu_(alpha_scalar); }
+ public Tensor celu_(short alpha = 1) { using var alpha_scalar = alpha.ToScalar(); return celu_(alpha_scalar); }
+ public Tensor celu_(int alpha = 1) { using var alpha_scalar = alpha.ToScalar(); return celu_(alpha_scalar); }
+ public Tensor celu_(long alpha = 1) { using var alpha_scalar = alpha.ToScalar(); return celu_(alpha_scalar); }
+#if NET6_0_OR_GREATER
+ public Tensor celu_(Half alpha) { using var alpha_scalar = alpha.ToScalar(); return celu_(alpha_scalar); } // FIXME: No default alpha?
+#endif
+ public Tensor celu_(float alpha = 1) { using var alpha_scalar = alpha.ToScalar(); return celu_(alpha_scalar); }
+ public Tensor celu_(double alpha = 1) { using var alpha_scalar = alpha.ToScalar(); return celu_(alpha_scalar); }
+ public Tensor celu_(bool alpha) { using var alpha_scalar = alpha.ToScalar(); return celu_(alpha_scalar); } // FIXME: Well defined? No default alpha?
+ public Tensor celu_((float, float) alpha) { using var alpha_scalar = alpha.ToScalar(); return celu_(alpha_scalar); } // FIXME: Well defined? No default alpha?
+ public Tensor celu_(System.Numerics.Complex alpha) { using var alpha_scalar = alpha.ToScalar(); return celu_(alpha_scalar); } // FIXME: Well defined? No default alpha?
+ public Tensor celu_() => celu_(1.0);
public Tensor elu(Scalar alpha, Scalar scale, Scalar input_scale)
{
@@ -2847,6 +3104,22 @@ public Tensor elu(Scalar alpha, Scalar scale, Scalar input_scale)
return new Tensor(res);
}
+ // FIXME: Consider in cases of alpha, scale and input_scale are not same typed?
+ public Tensor elu(byte alpha = 1, byte scale = 1, byte input_scale = 1) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu(alpha_scalar, scale_scalar, input_scale_scalar); }
+ public Tensor elu(sbyte alpha = 1, sbyte scale = 1, sbyte input_scale = 1) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu(alpha_scalar, scale_scalar, input_scale_scalar); }
+ public Tensor elu(short alpha = 1, short scale = 1, short input_scale = 1) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu(alpha_scalar, scale_scalar, input_scale_scalar); }
+ public Tensor elu(int alpha = 1, int scale = 1, int input_scale = 1) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu(alpha_scalar, scale_scalar, input_scale_scalar); }
+ public Tensor elu(long alpha = 1, long scale = 1, long input_scale = 1) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu(alpha_scalar, scale_scalar, input_scale_scalar); }
+#if NET6_0_OR_GREATER
+ public Tensor elu(Half alpha, Half scale, Half input_scale) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu(alpha_scalar, scale_scalar, input_scale_scalar); } // FIXME: No default alpha, scale and input_scale?
+#endif
+ public Tensor elu(float alpha = 1, float scale = 1, float input_scale = 1) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu(alpha_scalar, scale_scalar, input_scale_scalar); }
+ public Tensor elu(double alpha = 1, double scale = 1, double input_scale = 1) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu(alpha_scalar, scale_scalar, input_scale_scalar); }
+ public Tensor elu(bool alpha, bool scale, bool input_scale) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu(alpha_scalar, scale_scalar, input_scale_scalar); } // FIXME: Well defined? No default alpha, scale and input_scale?
+ public Tensor elu((float, float) alpha, (float, float) scale, (float, float) input_scale) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu(alpha_scalar, scale_scalar, input_scale_scalar); } // FIXME: Well defined? No default alpha, scale and input_scale?
+ public Tensor elu(System.Numerics.Complex alpha, System.Numerics.Complex scale, System.Numerics.Complex input_scale) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu(alpha_scalar, scale_scalar, input_scale_scalar); } // FIXME: Well defined? No default alpha, scale and input_scale?
+ public Tensor elu() => elu(1.0, 1.0, 1.0);
+
public Tensor elu_(Scalar alpha, Scalar scale, Scalar input_scale)
{
NativeMethods.THSTensor_elu_(Handle, alpha.Handle, scale.Handle, input_scale.Handle);
@@ -2854,6 +3127,22 @@ public Tensor elu_(Scalar alpha, Scalar scale, Scalar input_scale)
return this;
}
+ // FIXME: Consider in cases of alpha, scale and input_scale are not same typed?
+ public Tensor elu_(byte alpha = 1, byte scale = 1, byte input_scale = 1) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu_(alpha_scalar, scale_scalar, input_scale_scalar); }
+ public Tensor elu_(sbyte alpha = 1, sbyte scale = 1, sbyte input_scale = 1) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu_(alpha_scalar, scale_scalar, input_scale_scalar); }
+ public Tensor elu_(short alpha = 1, short scale = 1, short input_scale = 1) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu_(alpha_scalar, scale_scalar, input_scale_scalar); }
+ public Tensor elu_(int alpha = 1, int scale = 1, int input_scale = 1) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu_(alpha_scalar, scale_scalar, input_scale_scalar); }
+ public Tensor elu_(long alpha = 1, long scale = 1, long input_scale = 1) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu_(alpha_scalar, scale_scalar, input_scale_scalar); }
+#if NET6_0_OR_GREATER
+ public Tensor elu_(Half alpha, Half scale, Half input_scale) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu_(alpha_scalar, scale_scalar, input_scale_scalar); } // FIXME: No default alpha, scale and input_scale?
+#endif
+ public Tensor elu_(float alpha = 1, float scale = 1, float input_scale = 1) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu_(alpha_scalar, scale_scalar, input_scale_scalar); }
+ public Tensor elu_(double alpha = 1, double scale = 1, double input_scale = 1) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu_(alpha_scalar, scale_scalar, input_scale_scalar); }
+ public Tensor elu_(bool alpha, bool scale, bool input_scale) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu_(alpha_scalar, scale_scalar, input_scale_scalar); } // FIXME: Well defined? No default alpha, scale and input_scale?
+ public Tensor elu_((float, float) alpha, (float, float) scale, (float, float) input_scale) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu_(alpha_scalar, scale_scalar, input_scale_scalar); } // FIXME: Well defined? No default alpha, scale and input_scale?
+ public Tensor elu_(System.Numerics.Complex alpha, System.Numerics.Complex scale, System.Numerics.Complex input_scale) { using var alpha_scalar = alpha.ToScalar(); using var scale_scalar = scale.ToScalar(); using var input_scale_scalar = input_scale.ToScalar(); return elu_(alpha_scalar, scale_scalar, input_scale_scalar); } // FIXME: Well defined? No default alpha, scale and input_scale?
+ public Tensor elu_() => elu_(1.0, 1.0, 1.0);
+
public Tensor gelu()
{
var res = NativeMethods.THSTensor_gelu(Handle);
@@ -2915,6 +3204,20 @@ public Tensor hardtanh(Scalar min, Scalar max)
return this;
}
+ public Tensor hardtanh(byte min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh(min_scalar, max_scalar); }
+ public Tensor hardtanh(sbyte min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh(min_scalar, max_scalar); }
+ public Tensor hardtanh(short min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh(min_scalar, max_scalar); }
+ public Tensor hardtanh(int min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh(min_scalar, max_scalar); }
+ public Tensor hardtanh(long min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh(min_scalar, max_scalar); }
+#if NET6_0_OR_GREATER
+ public Tensor hardtanh(Half min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh(min_scalar, max_scalar); }
+#endif
+ public Tensor hardtanh(float min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh(min_scalar, max_scalar); }
+ public Tensor hardtanh(double min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh(min_scalar, max_scalar); }
+ public Tensor hardtanh(bool min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh(min_scalar, max_scalar); } // FIXME: Well defined?
+ public Tensor hardtanh((float, float) min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh(min_scalar, max_scalar); } // FIXME: Well defined?
+ public Tensor hardtanh(System.Numerics.Complex min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh(min_scalar, max_scalar); } // FIXME: Well defined?
+
public Tensor hardtanh_(Scalar min, Scalar max)
{
NativeMethods.THSTensor_hardtanh_(Handle, min.Handle, max.Handle);
@@ -2922,6 +3225,20 @@ public Tensor hardtanh_(Scalar min, Scalar max)
return this;
}
+ public Tensor hardtanh_(byte min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh_(min_scalar, max_scalar); }
+ public Tensor hardtanh_(sbyte min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh_(min_scalar, max_scalar); }
+ public Tensor hardtanh_(short min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh_(min_scalar, max_scalar); }
+ public Tensor hardtanh_(int min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh_(min_scalar, max_scalar); }
+ public Tensor hardtanh_(long min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh_(min_scalar, max_scalar); }
+#if NET6_0_OR_GREATER
+ public Tensor hardtanh_(Half min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh_(min_scalar, max_scalar); }
+#endif
+ public Tensor hardtanh_(float min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh_(min_scalar, max_scalar); }
+ public Tensor hardtanh_(double min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh_(min_scalar, max_scalar); }
+ public Tensor hardtanh_(bool min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh_(min_scalar, max_scalar); } // FIXME: Well defined?
+ public Tensor hardtanh_((float, float) min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh_(min_scalar, max_scalar); } // FIXME: Well defined?
+ public Tensor hardtanh_(System.Numerics.Complex min, double max) { using var min_scalar = min.ToScalar(); using var max_scalar = max.ToScalar(); return hardtanh_(min_scalar, max_scalar); } // FIXME: Well defined?
+
public Tensor heaviside(Tensor other)
{
var res = NativeMethods.THSTensor_heaviside(Handle, other.Handle);
@@ -3060,6 +3377,20 @@ public Tensor leaky_relu(Scalar negative_slope)
return new Tensor(res);
}
+ public Tensor leaky_relu(byte negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu(negative_slope_scalar); } // FIXME: Well defined?
+ public Tensor leaky_relu(sbyte negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu(negative_slope_scalar); }
+ public Tensor leaky_relu(short negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu(negative_slope_scalar); }
+ public Tensor leaky_relu(int negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu(negative_slope_scalar); }
+ public Tensor leaky_relu(long negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu(negative_slope_scalar); }
+#if NET6_0_OR_GREATER
+ public Tensor leaky_relu(Half negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu(negative_slope_scalar); }
+#endif
+ public Tensor leaky_relu(float negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu(negative_slope_scalar); }
+ public Tensor leaky_relu(double negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu(negative_slope_scalar); }
+ public Tensor leaky_relu(bool negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu(negative_slope_scalar); } // FIXME: Well defined?
+ public Tensor leaky_relu((float, float) negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu(negative_slope_scalar); } // FIXME: Well defined?
+ public Tensor leaky_relu(System.Numerics.Complex negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu(negative_slope_scalar); } // FIXME: Well defined?
+
public Tensor leaky_relu_(Scalar negative_slope)
{
NativeMethods.THSTensor_leaky_relu_(Handle, negative_slope.Handle);
@@ -3067,6 +3398,20 @@ public Tensor leaky_relu_(Scalar negative_slope)
return this;
}
+ public Tensor leaky_relu_(byte negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu_(negative_slope_scalar); } // FIXME: Well defined?
+ public Tensor leaky_relu_(sbyte negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu_(negative_slope_scalar); }
+ public Tensor leaky_relu_(short negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu_(negative_slope_scalar); }
+ public Tensor leaky_relu_(int negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu_(negative_slope_scalar); }
+ public Tensor leaky_relu_(long negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu_(negative_slope_scalar); }
+#if NET6_0_OR_GREATER
+ public Tensor leaky_relu_(Half negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu_(negative_slope_scalar); }
+#endif
+ public Tensor leaky_relu_(float negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu_(negative_slope_scalar); }
+ public Tensor leaky_relu_(double negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu_(negative_slope_scalar); }
+ public Tensor leaky_relu_(bool negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu_(negative_slope_scalar); } // FIXME: Well defined?
+ public Tensor leaky_relu_((float, float) negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu_(negative_slope_scalar); } // FIXME: Well defined?
+ public Tensor leaky_relu_(System.Numerics.Complex negative_slope) { using var negative_slope_scalar = negative_slope.ToScalar(); return leaky_relu_(negative_slope_scalar); } // FIXME: Well defined?
+
public Tensor selu()
{
var res = NativeMethods.THSTensor_selu(Handle);
@@ -3498,7 +3843,7 @@ public Tensor erfinv_()
public Tensor eq(Tensor target)
{
- if (target is null) return false;
+ if (target is null) return torch.tensor(false);
var res = NativeMethods.THSTensor_eq(Handle, target.Handle);
if (res == IntPtr.Zero) { CheckForErrors(); }
return new Tensor(res);
@@ -3508,7 +3853,7 @@ public Tensor eq(Tensor target)
public Tensor eq_(Tensor target)
{
- if (target is null) return false;
+ if (target is null) return torch.tensor(false);
NativeMethods.THSTensor_eq_(Handle, target.Handle);
CheckForErrors();
return this;
@@ -3516,7 +3861,7 @@ public Tensor eq_(Tensor target)
public Tensor eq(Scalar target)
{
- if (target is null) return false;
+ if (target is null) return torch.tensor(false);
var res = NativeMethods.THSTensor_eq_scalar(Handle, target.Handle);
if (res == IntPtr.Zero) { CheckForErrors(); }
return new Tensor(res);
@@ -3524,7 +3869,7 @@ public Tensor eq(Scalar target)
public Tensor eq_(Scalar target)
{
- if (target is null) return false;
+ if (target is null) return torch.tensor(false);
NativeMethods.THSTensor_eq_scalar_(Handle, target.Handle);
CheckForErrors();
return this;
@@ -3555,7 +3900,7 @@ public bool allclose(Tensor target, double rtol = 1e-05, double atol = 1e-08, bo
public Tensor ge(Tensor target)
{
- if (target is null) return false;
+ if (target is null) return torch.tensor(false);
var res = NativeMethods.THSTensor_ge(Handle, target.Handle);
if (res == IntPtr.Zero) { CheckForErrors(); }
return new Tensor(res);
@@ -3565,7 +3910,7 @@ public Tensor ge(Tensor target)
public Tensor ge_(Tensor target)
{
- if (target is null) return false;
+ if (target is null) return torch.tensor(false);
NativeMethods.THSTensor_ge_(Handle, target.Handle);
CheckForErrors();
return this;
@@ -3573,7 +3918,7 @@ public Tensor ge_(Tensor target)
public Tensor ge(Scalar target)
{
- if (target is null) return false;
+ if (target is null) return torch.tensor(false);
var res = NativeMethods.THSTensor_ge_scalar(Handle, target.Handle);
if (res == IntPtr.Zero) { CheckForErrors(); }
return new Tensor(res);
@@ -3581,7 +3926,7 @@ public Tensor ge(Scalar target)
public Tensor ge_(Scalar target)
{
- if (target is null) return false;
+ if (target is null) return torch.tensor(false);
NativeMethods.THSTensor_ge_scalar_(Handle, target.Handle);
CheckForErrors();
return this;
@@ -3589,7 +3934,7 @@ public Tensor ge_(Scalar target)
public Tensor gt(Tensor target)
{
- if (target is null) return false;
+ if (target is null) return torch.tensor(false);
var res = NativeMethods.THSTensor_gt(Handle, target.Handle);
if (res == IntPtr.Zero) { CheckForErrors(); }
return new Tensor(res);
@@ -3599,7 +3944,7 @@ public Tensor gt(Tensor target)
public Tensor gt_(Tensor target)
{
- if (target is null) return false;
+ if (target is null) return torch.tensor(false);
NativeMethods.THSTensor_gt_(Handle, target.Handle);
CheckForErrors();
return this;
@@ -3607,7 +3952,7 @@ public Tensor gt_(Tensor target)
public Tensor gt(Scalar target)
{
- if (target is null) return false;
+ if (target is null) return torch.tensor(false);
var res = NativeMethods.THSTensor_gt_scalar(Handle, target.Handle);
if (res == IntPtr.Zero) { CheckForErrors(); }
return new Tensor(res);
@@ -3615,7 +3960,7 @@ public Tensor gt(Scalar target)
public Tensor gt_(Scalar target)
{
- if (target is null) return false;
+ if (target is null) return torch.tensor(false);
NativeMethods.THSTensor_gt_scalar_(Handle, target.Handle);
CheckForErrors();
return this;
@@ -6225,94 +6570,730 @@ public Tensor where(Tensor condition, Tensor y)
// Operators overloading
- public static Tensor operator ==(Tensor left, Tensor right)
+ public static Tensor operator ==(Tensor left, Tensor right) => left.eq(right);
+
+ public static Tensor operator ==(Tensor left, Scalar right) => left.eq(right);
+
+ public static Tensor operator ==(Scalar left, Tensor right) => right.eq(left);
+
+ public static Tensor operator ==(Tensor left, byte right)
{
- return left.eq(right);
+ using Scalar right_scalar = right.ToScalar();
+ return left == right_scalar;
}
-
- public static Tensor operator ==(Tensor left, Scalar right)
+ public static Tensor operator ==(Tensor left, sbyte right)
{
- return left.eq(right);
+ using Scalar right_scalar = right.ToScalar();
+ return left == right_scalar;
}
-
- public static Tensor operator ==(Scalar left, Tensor right)
+ public static Tensor operator ==(Tensor left, short right)
{
- return right.eq(left);
+ using Scalar right_scalar = right.ToScalar();
+ return left == right_scalar;
}
-
- public static Tensor operator !=(Tensor left, Tensor right)
+ public static Tensor operator ==(Tensor left, int right)
{
- return left.ne(right);
+ using Scalar right_scalar = right.ToScalar();
+ return left == right_scalar;
}
-
- public static Tensor operator !=(Tensor left, Scalar right)
+ public static Tensor operator ==(Tensor left, long right)
{
- return left.ne(right);
+ using Scalar right_scalar = right.ToScalar();
+ return left == right_scalar;
}
-
- public static Tensor operator !=(Scalar left, Tensor right)
+#if NET6_0_OR_GREATER
+ public static Tensor operator ==(Tensor left, Half right)
{
- return right.ne(left);
+ using Scalar right_scalar = right.ToScalar();
+ return left == right_scalar;
}
-
- public static Tensor operator <(Tensor left, Tensor right)
+#endif
+ public static Tensor operator ==(Tensor left, float right)
{
- return left.lt(right);
+ using Scalar right_scalar = right.ToScalar();
+ return left == right_scalar;
}
-
- public static Tensor operator <(Tensor left, Scalar right)
+ public static Tensor operator ==(Tensor left, double right)
{
- return left.lt(right);
+ using Scalar right_scalar = right.ToScalar();
+ return left == right_scalar;
}
-
- public static Tensor operator <(Scalar left, Tensor right)
+ public static Tensor operator ==(Tensor left, bool right)
{
- return right.gt(left);
+ using Scalar right_scalar = right.ToScalar();
+ return left == right_scalar;
}
-
- public static Tensor operator <=(Tensor left, Tensor right)
+ public static Tensor operator ==(Tensor left, (float, float) right)
{
- return left.le(right);
+ using Scalar right_scalar = right.ToScalar();
+ return left == right_scalar;
}
-
- public static Tensor operator <=(Tensor left, Scalar right)
+ public static Tensor operator ==(Tensor left, System.Numerics.Complex right)
{
- return left.le(right);
+ using Scalar right_scalar = right.ToScalar();
+ return left == right_scalar;
}
-
- public static Tensor operator <=(Scalar left, Tensor right)
+ public static Tensor operator ==(byte left, Tensor right)
{
- return right.ge(left);
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar == right;
}
-
- public static Tensor operator >(Tensor left, Tensor right)
+ public static Tensor operator ==(sbyte left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar == right;
+ }
+ public static Tensor operator ==(short left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar == right;
+ }
+ public static Tensor operator ==(int left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar == right;
+ }
+ public static Tensor operator ==(long left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar == right;
+ }
+#if NET6_0_OR_GREATER
+ public static Tensor operator ==(Half left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar == right;
+ }
+#endif
+ public static Tensor operator ==(float left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar == right;
+ }
+ public static Tensor operator ==(double left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar == right;
+ }
+ public static Tensor operator ==(bool left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar == right;
+ }
+ public static Tensor operator ==((float, float) left, Tensor right)
{
- return left.gt(right);
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar == right;
}
+ public static Tensor operator ==(System.Numerics.Complex left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar == right;
+ }
+
+ public static Tensor operator !=(Tensor left, Tensor right) => left.ne(right);
- public static Tensor operator >(Tensor left, Scalar right)
+ public static Tensor operator !=(Tensor left, Scalar right) => left.ne(right);
+
+ public static Tensor operator !=(Scalar left, Tensor right) => right.ne(left);
+
+ public static Tensor operator !=(Tensor left, byte right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left != right_scalar;
+ }
+ public static Tensor operator !=(Tensor left, sbyte right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left != right_scalar;
+ }
+ public static Tensor operator !=(Tensor left, short right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left != right_scalar;
+ }
+ public static Tensor operator !=(Tensor left, int right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left != right_scalar;
+ }
+ public static Tensor operator !=(Tensor left, long right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left != right_scalar;
+ }
+#if NET6_0_OR_GREATER
+ public static Tensor operator !=(Tensor left, Half right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left != right_scalar;
+ }
+#endif
+ public static Tensor operator !=(Tensor left, float right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left != right_scalar;
+ }
+ public static Tensor operator !=(Tensor left, double right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left != right_scalar;
+ }
+ public static Tensor operator !=(Tensor left, bool right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left != right_scalar;
+ }
+ public static Tensor operator !=(Tensor left, (float, float) right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left != right_scalar;
+ }
+ public static Tensor operator !=(Tensor left, System.Numerics.Complex right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left != right_scalar;
+ }
+ public static Tensor operator !=(byte left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar != right;
+ }
+ public static Tensor operator !=(sbyte left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar != right;
+ }
+ public static Tensor operator !=(short left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar != right;
+ }
+ public static Tensor operator !=(int left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar != right;
+ }
+ public static Tensor operator !=(long left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar != right;
+ }
+#if NET6_0_OR_GREATER
+ public static Tensor operator !=(Half left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar != right;
+ }
+#endif
+ public static Tensor operator !=(float left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar != right;
+ }
+ public static Tensor operator !=(double left, Tensor right)
{
- return left.gt(right);
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar != right;
+ }
+ public static Tensor operator !=(bool left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar != right;
+ }
+ public static Tensor operator !=((float, float) left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar != right;
+ }
+ public static Tensor operator !=(System.Numerics.Complex left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar != right;
}
- public static Tensor operator >(Scalar left, Tensor right)
+ public static Tensor operator <(Tensor left, Tensor right) => left.lt(right);
+
+ public static Tensor operator <(Tensor left, Scalar right) => left.lt(right);
+
+ public static Tensor operator <(Scalar left, Tensor right) => right.gt(left);
+
+ public static Tensor operator <(Tensor left, byte right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left < right_scalar;
+ }
+ public static Tensor operator <(Tensor left, sbyte right)
{
- return right.lt(left);
+ using Scalar right_scalar = right.ToScalar();
+ return left < right_scalar;
}
+ public static Tensor operator <(Tensor left, short right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left < right_scalar;
+ }
+ public static Tensor operator <(Tensor left, int right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left < right_scalar;
+ }
+ public static Tensor operator <(Tensor left, long right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left < right_scalar;
+ }
+#if NET6_0_OR_GREATER
+ public static Tensor operator <(Tensor left, Half right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left < right_scalar;
+ }
+#endif
+ public static Tensor operator <(Tensor left, float right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left < right_scalar;
+ }
+ public static Tensor operator <(Tensor left, double right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left < right_scalar;
+ }
+ public static Tensor operator <(Tensor left, bool right) // FIXME: Well defined?
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left < right_scalar;
+ }
+ public static Tensor operator <(Tensor left, (float, float) right) // FIXME: Well defined?
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left < right_scalar;
+ }
+ public static Tensor operator <(Tensor left, System.Numerics.Complex right) // FIXME: Well defined?
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left < right_scalar;
+ }
+ public static Tensor operator <(byte left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar < right;
+ }
+ public static Tensor operator <(sbyte left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar < right;
+ }
+ public static Tensor operator <(short left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar < right;
+ }
+ public static Tensor operator <(int left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar < right;
+ }
+ public static Tensor operator <(long left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar < right;
+ }
+#if NET6_0_OR_GREATER
+ public static Tensor operator <(Half left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar < right;
+ }
+#endif
+ public static Tensor operator <(float left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar < right;
+ }
+ public static Tensor operator <(double left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar < right;
+ }
+ public static Tensor operator <(bool left, Tensor right) // FIXME: Well defined?
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar < right;
+ }
+ public static Tensor operator <((float, float) left, Tensor right) // FIXME: Well defined?
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar < right;
+ }
+ public static Tensor operator <(System.Numerics.Complex left, Tensor right) // FIXME: Well defined?
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar < right;
+ }
+
+ public static Tensor operator <=(Tensor left, Tensor right) => left.le(right);
- public static Tensor operator >=(Tensor left, Tensor right)
+ public static Tensor operator <=(Tensor left, Scalar right) => left.le(right);
+
+ public static Tensor operator <=(Scalar left, Tensor right) => right.ge(left);
+
+ public static Tensor operator <=(Tensor left, byte right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left <= right_scalar;
+ }
+ public static Tensor operator <=(Tensor left, sbyte right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left <= right_scalar;
+ }
+ public static Tensor operator <=(Tensor left, short right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left <= right_scalar;
+ }
+ public static Tensor operator <=(Tensor left, int right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left <= right_scalar;
+ }
+ public static Tensor operator <=(Tensor left, long right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left <= right_scalar;
+ }
+#if NET6_0_OR_GREATER
+ public static Tensor operator <=(Tensor left, Half right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left <= right_scalar;
+ }
+#endif
+ public static Tensor operator <=(Tensor left, float right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left <= right_scalar;
+ }
+ public static Tensor operator <=(Tensor left, double right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left <= right_scalar;
+ }
+ public static Tensor operator <=(Tensor left, bool right) // FIXME: Well defined?
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left <= right_scalar;
+ }
+ public static Tensor operator <=(Tensor left, (float, float) right) // FIXME: Well defined?
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left <= right_scalar;
+ }
+ public static Tensor operator <=(Tensor left, System.Numerics.Complex right) // FIXME: Well defined?
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left <= right_scalar;
+ }
+ public static Tensor operator <=(byte left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar <= right;
+ }
+ public static Tensor operator <=(sbyte left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar <= right;
+ }
+ public static Tensor operator <=(short left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar <= right;
+ }
+ public static Tensor operator <=(int left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar <= right;
+ }
+ public static Tensor operator <=(long left, Tensor right)
{
- return left.ge(right);
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar <= right;
}
+#if NET6_0_OR_GREATER
+ public static Tensor operator <=(Half left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar <= right;
+ }
+#endif
+ public static Tensor operator <=(float left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar <= right;
+ }
+ public static Tensor operator <=(double left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar <= right;
+ }
+ public static Tensor operator <=(bool left, Tensor right) // FIXME: Well defined?
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar <= right;
+ }
+ public static Tensor operator <=((float, float) left, Tensor right) // FIXME: Well defined?
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar <= right;
+ }
+ public static Tensor operator <=(System.Numerics.Complex left, Tensor right) // FIXME: Well defined?
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar <= right;
+ }
+
+ public static Tensor operator >(Tensor left, Tensor right) => left.gt(right);
+
+ public static Tensor operator >(Tensor left, Scalar right) => left.gt(right);
+
+ public static Tensor operator >(Scalar left, Tensor right) => right.lt(left);
- public static Tensor operator >=(Tensor left, Scalar right)
+ public static Tensor operator >(Tensor left, byte right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left > right_scalar;
+ }
+ public static Tensor operator >(Tensor left, sbyte right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left > right_scalar;
+ }
+ public static Tensor operator >(Tensor left, short right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left > right_scalar;
+ }
+ public static Tensor operator >(Tensor left, int right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left > right_scalar;
+ }
+ public static Tensor operator >(Tensor left, long right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left > right_scalar;
+ }
+#if NET6_0_OR_GREATER
+ public static Tensor operator >(Tensor left, Half right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left > right_scalar;
+ }
+#endif
+ public static Tensor operator >(Tensor left, float right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left > right_scalar;
+ }
+ public static Tensor operator >(Tensor left, double right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left > right_scalar;
+ }
+ public static Tensor operator >(Tensor left, bool right) // FIXME: Well defined?
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left > right_scalar;
+ }
+ public static Tensor operator >(Tensor left, (float, float) right) // FIXME: Well defined?
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left > right_scalar;
+ }
+ public static Tensor operator >(Tensor left, System.Numerics.Complex right) // FIXME: Well defined?
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left > right_scalar;
+ }
+ public static Tensor operator >(byte left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar > right;
+ }
+ public static Tensor operator >(sbyte left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar > right;
+ }
+ public static Tensor operator >(short left, Tensor right)
{
- return left.ge(right);
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar > right;
}
+ public static Tensor operator >(int left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar > right;
+ }
+ public static Tensor operator >(long left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar > right;
+ }
+#if NET6_0_OR_GREATER
+ public static Tensor operator >(Half left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar > right;
+ }
+#endif
+ public static Tensor operator >(float left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar > right;
+ }
+ public static Tensor operator >(double left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar > right;
+ }
+ public static Tensor operator >(bool left, Tensor right) // FIXME: Well defined?
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar > right;
+ }
+ public static Tensor operator >((float, float) left, Tensor right) // FIXME: Well defined?
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar > right;
+ }
+ public static Tensor operator >(System.Numerics.Complex left, Tensor right) // FIXME: Well defined?
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar > right;
+ }
+
+ public static Tensor operator >=(Tensor left, Tensor right) => left.ge(right);
+
+ public static Tensor operator >=(Tensor left, Scalar right) => left.ge(right);
- public static Tensor operator >=(Scalar left, Tensor right)
+ public static Tensor operator >=(Scalar left, Tensor right) => right.le(left);
+
+ public static Tensor operator >=(Tensor left, byte right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left >= right_scalar;
+ }
+ public static Tensor operator >=(Tensor left, sbyte right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left >= right_scalar;
+ }
+ public static Tensor operator >=(Tensor left, short right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left >= right_scalar;
+ }
+ public static Tensor operator >=(Tensor left, int right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left >= right_scalar;
+ }
+ public static Tensor operator >=(Tensor left, long right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left >= right_scalar;
+ }
+#if NET6_0_OR_GREATER
+ public static Tensor operator >=(Tensor left, Half right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left >= right_scalar;
+ }
+#endif
+ public static Tensor operator >=(Tensor left, float right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left >= right_scalar;
+ }
+ public static Tensor operator >=(Tensor left, double right)
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left >= right_scalar;
+ }
+ public static Tensor operator >=(Tensor left, bool right) // FIXME: Well defined?
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left >= right_scalar;
+ }
+ public static Tensor operator >=(Tensor left, (float, float) right) // FIXME: Well defined?
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left >= right_scalar;
+ }
+ public static Tensor operator >=(Tensor left, System.Numerics.Complex right) // FIXME: Well defined?
+ {
+ using Scalar right_scalar = right.ToScalar();
+ return left >= right_scalar;
+ }
+ public static Tensor operator >=(byte left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar >= right;
+ }
+ public static Tensor operator >=(sbyte left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar >= right;
+ }
+ public static Tensor operator >=(short left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar >= right;
+ }
+ public static Tensor operator >=(int left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar >= right;
+ }
+ public static Tensor operator >=(long left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar >= right;
+ }
+#if NET6_0_OR_GREATER
+ public static Tensor operator >=(Half left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar >= right;
+ }
+#endif
+ public static Tensor operator >=(float left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar >= right;
+ }
+ public static Tensor operator >=(double left, Tensor right)
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar >= right;
+ }
+ public static Tensor operator >=(bool left, Tensor right) // FIXME: Well defined?
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar >= right;
+ }
+ public static Tensor operator >=((float, float) left, Tensor right) // FIXME: Well defined?
+ {
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar >= right;
+ }
+ public static Tensor operator >=(System.Numerics.Complex left, Tensor right) // FIXME: Well defined?
{
- return right.le(left);
+ using Scalar left_scalar = left.ToScalar();
+ return left_scalar >= right;
}
///
@@ -6321,6 +7302,7 @@ public Tensor where(Tensor condition, Tensor y)
/// The numeric value.
public static implicit operator Tensor(byte value)
{
+ TensorLeakDetector.ThrowIfImplicitConversionNotAllowed();
return torch.tensor(value);
}
@@ -6330,6 +7312,7 @@ public static implicit operator Tensor(byte value)
/// The numeric value.
public static implicit operator Tensor(sbyte value)
{
+ TensorLeakDetector.ThrowIfImplicitConversionNotAllowed();
return torch.tensor(value);
}
@@ -6339,6 +7322,7 @@ public static implicit operator Tensor(sbyte value)
/// The numeric value.
public static implicit operator Tensor(short value)
{
+ TensorLeakDetector.ThrowIfImplicitConversionNotAllowed();
return torch.tensor(value);
}
@@ -6348,6 +7332,7 @@ public static implicit operator Tensor(short value)
/// The numeric value.
public static implicit operator Tensor(int value)
{
+ TensorLeakDetector.ThrowIfImplicitConversionNotAllowed();
return torch.tensor(value);
}
@@ -6357,6 +7342,7 @@ public static implicit operator Tensor(int value)
/// The numeric value.
public static implicit operator Tensor(long value)
{
+ TensorLeakDetector.ThrowIfImplicitConversionNotAllowed();
return torch.tensor(value);
}
@@ -6366,6 +7352,7 @@ public static implicit operator Tensor(long value)
/// The numeric value.
public static implicit operator Tensor(float value)
{
+ TensorLeakDetector.ThrowIfImplicitConversionNotAllowed();
return torch.tensor(value);
}
@@ -6375,6 +7362,7 @@ public static implicit operator Tensor(float value)
/// The numeric value.
public static implicit operator Tensor(double value)
{
+ TensorLeakDetector.ThrowIfImplicitConversionNotAllowed();
return torch.tensor(value);
}
@@ -6384,6 +7372,7 @@ public static implicit operator Tensor(double value)
/// The numeric value.
public static implicit operator Tensor(bool value)
{
+ TensorLeakDetector.ThrowIfImplicitConversionNotAllowed();
return torch.tensor(value);
}
@@ -6393,6 +7382,7 @@ public static implicit operator Tensor(bool value)
/// The numeric value.
public static implicit operator Tensor((float, float) value)
{
+ TensorLeakDetector.ThrowIfImplicitConversionNotAllowed();
return torch.tensor(value);
}
@@ -6402,6 +7392,7 @@ public static implicit operator Tensor((float, float) value)
/// The numeric value.
public static implicit operator Tensor(System.Numerics.Complex value)
{
+ TensorLeakDetector.ThrowIfImplicitConversionNotAllowed();
return torch.tensor(value);
}
@@ -6411,6 +7402,7 @@ public static implicit operator Tensor(System.Numerics.Complex value)
/// The numeric value array.
public static implicit operator Tensor(byte[] value)
{
+ TensorLeakDetector.ThrowIfImplicitConversionNotAllowed();
return torch.tensor(value);
}
@@ -6420,6 +7412,7 @@ public static implicit operator Tensor(byte[] value)
/// The numeric value array.
public static implicit operator Tensor(sbyte[] value)
{
+ TensorLeakDetector.ThrowIfImplicitConversionNotAllowed();
return torch.tensor(value);
}
@@ -6429,6 +7422,7 @@ public static implicit operator Tensor(sbyte[] value)
/// The numeric value array.
public static implicit operator Tensor(short[] value)
{
+ TensorLeakDetector.ThrowIfImplicitConversionNotAllowed();
return torch.tensor(value);
}
@@ -6438,6 +7432,7 @@ public static implicit operator Tensor(short[] value)
/// The numeric value array.
public static implicit operator Tensor(int[] value)
{
+ TensorLeakDetector.ThrowIfImplicitConversionNotAllowed();
return torch.tensor(value);
}
@@ -6447,6 +7442,7 @@ public static implicit operator Tensor(int[] value)
/// The numeric value array.
public static implicit operator Tensor(long[] value)
{
+ TensorLeakDetector.ThrowIfImplicitConversionNotAllowed();
return torch.tensor(value);
}
@@ -6456,6 +7452,7 @@ public static implicit operator Tensor(long[] value)
/// The numeric value array.
public static implicit operator Tensor(float[] value)
{
+ TensorLeakDetector.ThrowIfImplicitConversionNotAllowed();
return torch.tensor(value);
}
@@ -6465,6 +7462,7 @@ public static implicit operator Tensor(float[] value)
/// The numeric value array.
public static implicit operator Tensor(double[] value)
{
+ TensorLeakDetector.ThrowIfImplicitConversionNotAllowed();
return torch.tensor(value);
}
@@ -6474,6 +7472,7 @@ public static implicit operator Tensor(double[] value)
/// The numeric value array.
public static implicit operator Tensor(bool[] value)
{
+ TensorLeakDetector.ThrowIfImplicitConversionNotAllowed();
return torch.tensor(value);
}
@@ -6483,6 +7482,7 @@ public static implicit operator Tensor(bool[] value)
/// The numeric value array.
public static implicit operator Tensor((float, float)[] value)
{
+ TensorLeakDetector.ThrowIfImplicitConversionNotAllowed();
return torch.tensor(value);
}
@@ -6492,6 +7492,7 @@ public static implicit operator Tensor((float, float)[] value)
/// The numeric value array.
public static implicit operator Tensor(System.Numerics.Complex[] value)
{
+ TensorLeakDetector.ThrowIfImplicitConversionNotAllowed();
return torch.tensor(value);
}
@@ -6502,6 +7503,7 @@ public static implicit operator Tensor(System.Numerics.Complex[] value)
public static implicit operator Tensor(Scalar scalar)
{
_throw();
+ TensorLeakDetector.ThrowIfImplicitConversionNotAllowed();
return new Tensor(IntPtr.Zero);
}
@@ -6600,7 +7602,8 @@ private static string ToNumpyString(Tensor t, long mdim, bool isFCreate, string
var sb = new StringBuilder(isFCreate ? string.Join("", Enumerable.Repeat(' ', (int)(mdim - dim))) : "");
if (dim == 0) {
- PrintValue(sb, t.dtype, t.ToScalar(), fltFormat, actualCulturInfo);
+ using (var scalar = t.ToScalar())
+ PrintValue(sb, t.dtype, scalar, fltFormat, actualCulturInfo);
return sb.ToString(); ;
}
@@ -6612,25 +7615,30 @@ private static string ToNumpyString(Tensor t, long mdim, bool isFCreate, string
else if (dim == 1) {
if (currentSize <= torch.maxColumns) {
for (var i = 0; i < currentSize - 1; i++) {
- PrintValue(sb, t.dtype, t[i].ToScalar(), fltFormat, actualCulturInfo);
+ using (var scalar = t[i].ToScalar())
+ PrintValue(sb, t.dtype, scalar, fltFormat, actualCulturInfo);
sb.Append(',').Append(' ');
}
- PrintValue(sb, t.dtype, t[currentSize - 1].ToScalar(), fltFormat, actualCulturInfo);
+ using (var scalar = t[currentSize - 1].ToScalar())
+ PrintValue(sb, t.dtype, scalar, fltFormat, actualCulturInfo);
} else {
for (var i = 0; i < leadingCols; i++) {
- PrintValue(sb, t.dtype, t[i].ToScalar(), fltFormat, actualCulturInfo);
+ using (var scalar = t[i].ToScalar())
+ PrintValue(sb, t.dtype, scalar, fltFormat, actualCulturInfo);
sb.Append(',').Append(' ');
}
sb.Append("... ");
for (var i = currentSize - trailingCols; i < currentSize - 1; i++) {
- PrintValue(sb, t.dtype, t[i].ToScalar(), fltFormat, actualCulturInfo);
+ using (var scalar = t[i].ToScalar())
+ PrintValue(sb, t.dtype, scalar, fltFormat, actualCulturInfo);
sb.Append(',').Append(' ');
}
- PrintValue(sb, t.dtype, t[currentSize - 1].ToScalar(), fltFormat, actualCulturInfo);
+ using (var scalar = t[currentSize - 1].ToScalar())
+ PrintValue(sb, t.dtype, scalar, fltFormat, actualCulturInfo);
}
} else {
var newline = string.Join("", Enumerable.Repeat(newLine, (int)dim - 1).ToList());
@@ -6688,7 +7696,8 @@ private static string ToCSharpString(Tensor t, long mdim, bool isFCreate, string
sb.Append(", value = ");
if (t.Dimensions == 0) {
- PrintValue(sb, t.dtype, t.ToScalar(), fltFormat, actualCulturInfo);
+ using (var scalar = t.ToScalar())
+ PrintValue(sb, t.dtype, scalar, fltFormat, actualCulturInfo);
return sb.ToString(); ;
}
}
@@ -6762,16 +7771,19 @@ private static string ToCSharpString(Tensor t, long mdim, bool isFCreate, string
else if (dim == 1) {
if (currentSize <= torch.maxColumns) {
for (var i = 0; i < currentSize - 1; i++) {
- PrintValue(sb, t.dtype, t[i].ToScalar(), fltFormat, actualCulturInfo);
+ using (var scalar = t[i].ToScalar())
+ PrintValue(sb, t.dtype, scalar, fltFormat, actualCulturInfo);
sb.Append(appendChar);
sb.Append(',').Append(' ');
}
- PrintValue(sb, t.dtype, t[currentSize - 1].ToScalar(), fltFormat, actualCulturInfo);
+ using (var scalar = t[currentSize - 1].ToScalar())
+ PrintValue(sb, t.dtype, scalar, fltFormat, actualCulturInfo);
sb.Append(appendChar);
} else {
for (var i = 0; i < leadingCols; i++) {
- PrintValue(sb, t.dtype, t[i].ToScalar(), fltFormat, actualCulturInfo);
+ using (var scalar = t[i].ToScalar())
+ PrintValue(sb, t.dtype, scalar, fltFormat, actualCulturInfo);
sb.Append(appendChar);
sb.Append(',').Append(' ');
}
@@ -6779,12 +7791,14 @@ private static string ToCSharpString(Tensor t, long mdim, bool isFCreate, string
sb.Append("... ");
for (var i = currentSize - trailingCols; i < currentSize - 1; i++) {
- PrintValue(sb, t.dtype, t[i].ToScalar(), fltFormat, actualCulturInfo);
+ using (var scalar = t[i].ToScalar())
+ PrintValue(sb, t.dtype, scalar, fltFormat, actualCulturInfo);
sb.Append(appendChar);
sb.Append(',').Append(' ');
}
- PrintValue(sb, t.dtype, t[currentSize - 1].ToScalar(), fltFormat, actualCulturInfo);
+ using (var scalar = t[currentSize - 1].ToScalar())
+ PrintValue(sb, t.dtype, scalar, fltFormat, actualCulturInfo);
sb.Append(appendChar);
}
} else {
@@ -6843,7 +7857,8 @@ private string ToJuliaString(string fltFormat, int width, CultureInfo? cultureIn
if (Dimensions == 0) {
builder.Append(", value = ");
- PrintValue(builder, dtype, this.ToScalar(), fltFormat, actualCulturInfo);
+ using (var scalar = this.ToScalar())
+ PrintValue(builder, dtype, scalar, fltFormat, actualCulturInfo);
} else if (Dimensions == 1) {
@@ -6974,7 +7989,8 @@ private static void BuildRow(List row, Tensor t, int width, string fltFo
for (int i = 0; i < t.shape[0]; i++) {
var builder = new StringBuilder();
- PrintValue(builder, type, t[i].ToScalar(), fltFormat, cultureInfo);
+ using (var scalar = t[i].ToScalar())
+ PrintValue(builder, type, scalar, fltFormat, cultureInfo);
var str = builder.ToString();
@@ -7246,6 +8262,7 @@ public static implicit operator TensorIndex(long value)
public static implicit operator Tensor(TensorIndex value)
{
_throw();
+ TensorLeakDetector.ThrowIfImplicitConversionNotAllowed();
return new Tensor(IntPtr.Zero);
}
diff --git a/src/TorchSharp/Tensor/TensorExtensionMethods.cs b/src/TorchSharp/Tensor/TensorExtensionMethods.cs
index 2f4fa81dc..89046dafb 100644
--- a/src/TorchSharp/Tensor/TensorExtensionMethods.cs
+++ b/src/TorchSharp/Tensor/TensorExtensionMethods.cs
@@ -597,82 +597,126 @@ public static Tensor ToTensor(this T scalar, Device? device = null, bool requ
/// Explicitly convert a singleton tensor to a .NET scalar value.
///
/// The input tensor
- public static (float Real, float Imaginary) ToComplexFloat32(this Tensor value) => value.ToScalar().ToComplexFloat32();
+ public static (float Real, float Imaginary) ToComplexFloat32(this Tensor value)
+ {
+ using var scalar = value.ToScalar();
+ return scalar.ToComplexFloat32();
+ }
///
/// Explicitly convert a singleton tensor to a .NET scalar value.
///
/// The input tensor
- public static System.Numerics.Complex ToComplexFloat64(this Tensor value) => value.ToScalar().ToComplexFloat64();
+ public static System.Numerics.Complex ToComplexFloat64(this Tensor value)
+ {
+ using var scalar = value.ToScalar();
+ return scalar.ToComplexFloat64();
+ }
#if NET6_0_OR_GREATER
///
/// Explicitly convert a singleton tensor to a .NET scalar value.
///
/// The input tensor
- public static Half ToHalf(this Tensor value) => value.ToScalar().ToHalf();
+ public static Half ToHalf(this Tensor value)
+ {
+ using var scalar = value.ToScalar();
+ return scalar.ToHalf();
+ }
#endif
///
/// Explicitly convert a singleton tensor to a .NET scalar value.
///
/// The input tensor
- public static float ToSingle(this Tensor value) => value.ToScalar().ToSingle();
+ public static float ToSingle(this Tensor value)
+ {
+ using var scalar = value.ToScalar();
+ return scalar.ToSingle();
+ }
///
/// Explicitly convert a singleton tensor to a .NET scalar value.
///
/// The input tensor
- public static double ToDouble(this Tensor value) => value.ToScalar().ToDouble();
+ public static double ToDouble(this Tensor value)
+ {
+ using var scalar = value.ToScalar();
+ return scalar.ToDouble();
+ }
///
/// Explicitly convert a singleton tensor to a .NET scalar value.
///
/// The input tensor
- public static sbyte ToSByte(this Tensor value) => value.ToScalar().ToSByte();
+ public static sbyte ToSByte(this Tensor value)
+ {
+ using var scalar = value.ToScalar();
+ return scalar.ToSByte();
+ }
///
/// Explicitly convert a singleton tensor to a .NET scalar value.
///
/// The input tensor
- public static byte ToByte(this Tensor value) => value.ToScalar().ToByte();
+ public static byte ToByte(this Tensor value)
+ {
+ using var scalar = value.ToScalar();
+ return scalar.ToByte();
+ }
///
/// Explicitly convert a singleton tensor to a .NET scalar value.
///
/// The input tensor
- public static short ToInt16(this Tensor value) => value.ToScalar().ToInt16();
+ public static short ToInt16(this Tensor value)
+ {
+ using var scalar = value.ToScalar();
+ return scalar.ToInt16();
+ }
///
/// Explicitly convert a singleton tensor to a .NET scalar value.
///
/// The input tensor
- public static int ToInt32(this Tensor value) => value.ToScalar().ToInt32();
+ public static int ToInt32(this Tensor value)
+ {
+ using var scalar = value.ToScalar();
+ return scalar.ToInt32();
+ }
///
/// Explicitly convert a singleton tensor to a .NET scalar value.
///
/// The input tensor
- public static long ToInt64(this Tensor value) => value.ToScalar().ToInt64();
+ public static long ToInt64(this Tensor value)
+ {
+ using var scalar = value.ToScalar();
+ return scalar.ToInt64();
+ }
///
/// Explicitly convert a singleton tensor to a .NET scalar value.
///
/// The input tensor
- public static bool ToBoolean(this Tensor value) => value.ToScalar().ToBoolean();
+ public static bool ToBoolean(this Tensor value)
+ {
+ using var scalar = value.ToScalar();
+ return scalar.ToBoolean();
+ }
///
/// Explicitly convert a singleton tensor to a .NET scalar value.
///
/// The input tensor
- public static (float Real, float Imaginary) ToComplex32(this Tensor value) => value.ToScalar().ToComplexFloat32();
+ public static (float Real, float Imaginary) ToComplex32(this Tensor value) => ToComplexFloat32(value);
///
/// Explicitly convert a singleton tensor to a .NET scalar value.
///
/// The input tensor
- public static System.Numerics.Complex ToComplex64(this Tensor value) => value.ToScalar().ToComplexFloat64();
+ public static System.Numerics.Complex ToComplex64(this Tensor value) => ToComplexFloat64(value);
///
/// Multiply the dimensions of a tensor shape to provide a complete size.
diff --git a/src/TorchSharp/Tensor/torch.PointwiseOps.cs b/src/TorchSharp/Tensor/torch.PointwiseOps.cs
index 0fccbd8ce..de22fe9dc 100644
--- a/src/TorchSharp/Tensor/torch.PointwiseOps.cs
+++ b/src/TorchSharp/Tensor/torch.PointwiseOps.cs
@@ -1552,6 +1552,13 @@ public static Tensor quantized_max_pool2d(Tensor input, long[] kernel_size, long
/// The input tensor.
[Pure]public static Tensor square(Tensor input) => input.square();
+ // https://pytorch.org/docs/stable/generated/torch.square
+ ///
+ /// Computes the element-wise square, in place
+ ///
+ /// The input tensor.
+ [Pure] public static Tensor square_(Tensor input) => input.square_();
+
// https://pytorch.org/docs/stable/generated/torch.sub
///
/// Element-wise subtraction
diff --git a/src/TorchSharp/Tensor/torch.cs b/src/TorchSharp/Tensor/torch.cs
index 6892d2b69..fd7c37105 100644
--- a/src/TorchSharp/Tensor/torch.cs
+++ b/src/TorchSharp/Tensor/torch.cs
@@ -112,11 +112,11 @@ public static Tensor row_stack(IList tensors)
public static Tensor clamp_max(Tensor input, Scalar max) => input.clamp_max(max);
- public static Tensor clamp_max_(Tensor input, Scalar max) => input.clamp_max(max);
+ public static Tensor clamp_max_(Tensor input, Scalar max) => input.clamp_max_(max);
public static Tensor clamp_min(Tensor input, Scalar min) => input.clamp_min(min);
- public static Tensor clamp_min_(Tensor input, Scalar min) => input.clamp_min(min);
+ public static Tensor clamp_min_(Tensor input, Scalar min) => input.clamp_min_(min);
///
/// Expands the dimension dim of the self tensor over multiple dimensions of sizes given by sizes.
diff --git a/src/TorchSharp/Utils/tensorboard/Summary.cs b/src/TorchSharp/Utils/tensorboard/Summary.cs
index dd43d825d..f08a35e86 100644
--- a/src/TorchSharp/Utils/tensorboard/Summary.cs
+++ b/src/TorchSharp/Utils/tensorboard/Summary.cs
@@ -177,7 +177,9 @@ public static Tensorboard.Summary image(string tag, Tensor tensor, double rescal
tensor = utils.convert_to_HWC(tensor, dataformats);
int scale_factor = calc_scale_factor(tensor);
tensor = tensor.to_type(ScalarType.Float32);
- tensor = (tensor * scale_factor).clip(0, 255).to_type(ScalarType.Byte);
+ using var min_byte_scalar = 0.ToScalar(); // FIXME: No torch.min_int_value?
+ using var max_byte_scalar = torch.max_int_value(ScalarType.Byte).ToScalar();
+ tensor = (tensor * scale_factor).clip(min_byte_scalar, max_byte_scalar).to_type(ScalarType.Byte);
Tensorboard.Summary.Types.Image image = make_image(tensor, rescale);
var summary = new Tensorboard.Summary();
summary.Value.Add(new Tensorboard.Summary.Types.Value() { Tag = tag, Image = image });
@@ -241,7 +243,9 @@ public static Tensorboard.Summary video(string tag, Tensor tensor, int fps)
tensor = utils.prepare_video(tensor);
int scale_factor = calc_scale_factor(tensor);
tensor = tensor.to_type(ScalarType.Float32);
- tensor = (tensor * scale_factor).clip(0, 255).to_type(ScalarType.Byte);
+ using var min_byte_scalar = 0.ToScalar(); // FIXME: No torch.min_int_value?
+ using var max_byte_scalar = torch.max_int_value(ScalarType.Byte).ToScalar();
+ tensor = (tensor * scale_factor).clip(min_byte_scalar, max_byte_scalar).to_type(ScalarType.Byte);
Tensorboard.Summary.Types.Image video = make_video(tensor, fps);
var summary = new Tensorboard.Summary();
summary.Value.Add(new Tensorboard.Summary.Types.Value() { Tag = tag, Image = video });
diff --git a/src/TorchVision/AdjustGamma.cs b/src/TorchVision/AdjustGamma.cs
index 89cd0b5ae..8a08554b7 100644
--- a/src/TorchVision/AdjustGamma.cs
+++ b/src/TorchVision/AdjustGamma.cs
@@ -22,7 +22,10 @@ public Tensor call(Tensor img)
if (!torch.is_floating_point(img))
img = transforms.ConvertImageDtype(torch.float32).call(img);
- img = (gain * img.pow(gamma)).clamp(0, 1);
+ using var gamma_scalar = gamma.ToScalar();
+ using var zero_scalar = 0.ToScalar();
+ using var one_scalar = 1.ToScalar();
+ img = (gain * img.pow(gamma_scalar)).clamp(zero_scalar, one_scalar);
return transforms.ConvertImageDtype(dtype).call(img); ;
}
diff --git a/src/TorchVision/Functional.cs b/src/TorchVision/Functional.cs
index b26d4715f..22b175e1b 100644
--- a/src/TorchVision/Functional.cs
+++ b/src/TorchVision/Functional.cs
@@ -110,9 +110,12 @@ public static Tensor adjust_gamma(Tensor img, double gamma, double gain = 1.0)
img = img.alias();
}
- using var t0 = img.pow(gamma);
+ using var gamma_scalar = gamma.ToScalar();
+ using var t0 = img.pow(gamma_scalar);
using var t1 = gain * t0;
- using var t2 = t1.clamp(0, 1);
+ using var zero_scalar = 0.ToScalar();
+ using var one_scalar = 1.ToScalar();
+ using var t2 = t1.clamp(zero_scalar, one_scalar);
return convert_image_dtype(t2, dtype);
}
@@ -276,8 +279,10 @@ public static Tensor autocontrast(Tensor input)
var t3 = t2.nonzero_as_list();
var eq_idxs = t3[0];
- using var t4 = minimum.index_put_(0, eq_idxs);
- using var t5 = maximum.index_put_(bound, eq_idxs);
+ using var zero_scalar = 0.ToScalar();
+ using var t4 = minimum.index_put_(zero_scalar, eq_idxs);
+ using var bound_scalar = bound.ToScalar();
+ using var t5 = maximum.index_put_(bound_scalar, eq_idxs);
using var t6 = (maximum - minimum);
using var t7 = torch.tensor(bound, float32);
@@ -286,7 +291,7 @@ public static Tensor autocontrast(Tensor input)
using var t8 = (input - minimum);
using var t9 = t8 * scale;
- using var t10 = t9.clamp(0, bound);
+ using var t10 = t9.clamp(zero_scalar, bound_scalar);
return t10.to(input.dtype);
}
@@ -344,7 +349,9 @@ public static Tensor convert_image_dtype(Tensor image, ScalarType dtype = Scalar
}
var eps = 1e-3;
- using var result = image.mul(output_max + 1.0 - eps);
+ var factor = output_max + 1.0 - eps;
+ using var factor_scalar = factor.ToScalar();
+ using var result = image.mul(factor_scalar);
return result.to_type(dtype);
} else {
@@ -359,7 +366,8 @@ public static Tensor convert_image_dtype(Tensor image, ScalarType dtype = Scalar
if (input_max > output_max) {
var factor = (input_max + 1) / (output_max + 1);
- using var t0 = torch.div(image, factor);
+ using var factor_scalar = factor.ToScalar();
+ using var t0 = torch.div(image, factor_scalar);
return t0.to_type(dtype);
} else {
var factor = (output_max + 1) / (input_max + 1);
@@ -898,7 +906,9 @@ private static Tensor Blend(Tensor img1, Tensor img2, double ratio)
using var t0 = img1 * ratio;
using var t2 = img2 * (1.0 - ratio);
using var t3 = (t0 + t2);
- using var t4 = t3.clamp(0, bound);
+ using var zero_scalar = 0.ToScalar();
+ using var bound_scalar = bound.ToScalar();
+ using var t4 = t3.clamp(zero_scalar, bound_scalar);
return t4.to(img1.dtype);
}
@@ -927,7 +937,7 @@ private static Tensor GetGaussianKernel1d(long size, float sigma)
using var x = torch.linspace(-ksize_half, ksize_half, size);
using var t0 = x / sigma;
using var t1 = -t0;
- using var t2 = t1.pow(2);
+ using var t2 = t1.square();
using var pdf = t2 * 0.5f;
using var sum = pdf.sum();
diff --git a/src/TorchVision/Ops.cs b/src/TorchVision/Ops.cs
index 987f022f7..8e8c34780 100644
--- a/src/TorchVision/Ops.cs
+++ b/src/TorchVision/Ops.cs
@@ -18,7 +18,8 @@ public static Tensor sigmoid_focal_loss(Tensor inputs, Tensor targets, float alp
var ce_loss = nn.functional.binary_cross_entropy_with_logits(inputs, targets, reduction: reduction);
var p_t = p * targets + (1 - p) * (1 - targets);
- var loss = ce_loss * (1 - p_t).pow(gamma);
+ using var gamma_scalar = gamma.ToScalar();
+ var loss = ce_loss * (1 - p_t).pow(gamma_scalar);
if (alpha >= 0) {
var alpha_t = alpha * targets + (1 - alpha) * (1 - targets);
@@ -52,6 +53,9 @@ public static Tensor nms(Tensor boxes, Tensor scores, double iou_threshold = 0.5
var areas = (x2 - x1) * (y2 - y1);
var (_, order) = scores.sort(0, descending: true);
+ using var zero_scalar = 0.ToScalar();
+ using var one_scalar = 1.ToScalar();
+ using var iou_threshold_scalar = iou_threshold.ToScalar();
var keep = new List();
while (order.numel() > 0) {
long i;
@@ -64,16 +68,17 @@ public static Tensor nms(Tensor boxes, Tensor scores, double iou_threshold = 0.5
keep.Add(i);
}
- var indices = torch.arange(1, order.shape[0], dtype: ScalarType.Int64);
+ using var stop_scalar = order.shape[0].ToScalar();
+ var indices = torch.arange(one_scalar, stop_scalar, dtype: ScalarType.Int64);
order = order[indices];
var xx1 = x1[order].clamp(min: x1[i]);
var yy1 = y1[order].clamp(min: y1[i]);
var xx2 = x2[order].clamp(max: x2[i]);
var yy2 = y2[order].clamp(max: y2[i]);
- var inter = (xx2 - xx1).clamp(min: 0) * (yy2 - yy1).clamp(min: 0);
+ var inter = (xx2 - xx1).clamp(min: zero_scalar) * (yy2 - yy1).clamp(min: zero_scalar);
var iou = inter / (areas[i] + areas[order] - inter);
- var idx = (iou <= iou_threshold).nonzero().squeeze();
+ var idx = (iou <= iou_threshold_scalar).nonzero().squeeze();
if (idx.numel() == 0) {
break;
}
diff --git a/src/TorchVision/Ops/Boxes.cs b/src/TorchVision/Ops/Boxes.cs
index 134e89a63..f71048d0c 100644
--- a/src/TorchVision/Ops/Boxes.cs
+++ b/src/TorchVision/Ops/Boxes.cs
@@ -91,7 +91,8 @@ public static Tensor generalized_box_iou(Tensor boxes1, Tensor boxes2)
var lti = torch.min(boxes1[colon, None, (null, 2)], boxes2[colon, (null, 2)]);
var rbi = torch.max(boxes1[colon, None, (2, null)], boxes2[colon, (2, null)]);
- var whi = _upcast(rbi - lti).clamp(min: 0); // [N,M,2]
+ using var zero_scalar = 0.ToScalar();
+ var whi = _upcast(rbi - lti).clamp(min: zero_scalar); // [N,M,2]
var areai = whi[colon, colon, 0] * whi[colon, colon, 1];
return (iou - (areai - union) / areai).MoveToOuterDisposeScope();
@@ -179,7 +180,8 @@ private static Tensor _box_inter_union(Tensor boxes1, Tensor boxes2, out Tensor
var lt = torch.max(boxes1[colon, None, (null, 2)], boxes2[colon, (null, 2)]); // [N,M,2];
var rb = torch.min(boxes1[colon, None, (2, null)], boxes2[colon, (2, null)]); // [N,M,2];
- var wh = _upcast(rb - lt).clamp(min: 0); // [N,M,2];
+ using var zero_scalar = 0.ToScalar();
+ var wh = _upcast(rb - lt).clamp(min: zero_scalar); // [N,M,2];
var inter = wh[colon, colon, 0] * wh[colon, colon, 1]; // [N,M];
union = area1[colon, None] + area2 - inter;
@@ -191,15 +193,16 @@ private static Tensor _box_diou_iou(Tensor boxes1, Tensor boxes2, out Tensor iou
iou = box_iou(boxes1, boxes2);
var lti = torch.min(boxes1[colon, None, (null, 2)], boxes2[colon, (null, 2)]);
var rbi = torch.max(boxes1[colon, None, (2, null)], boxes2[colon, (2, null)]);
- var whi = _upcast(rbi - lti).clamp(min: 0); // [N,M,2];
- var diagonal_distance_squared = whi[colon, colon, 0].pow(2) + whi[colon, colon, 1].pow(2) + eps;
+ using var zero_scalar = 0.ToScalar();
+ var whi = _upcast(rbi - lti).clamp(min: zero_scalar); // [N,M,2];
+ var diagonal_distance_squared = whi[colon, colon, 0].square() + whi[colon, colon, 1].square() + eps;
// centers of boxes
var x_p = (boxes1[colon, 0] + boxes1[colon, 2]) / 2;
var y_p = (boxes1[colon, 1] + boxes1[colon, 3]) / 2;
var x_g = (boxes2[colon, 0] + boxes2[colon, 2]) / 2;
var y_g = (boxes2[colon, 1] + boxes2[colon, 3]) / 2;
// The distance between boxes' centers squared.
- var centers_distance_squared = _upcast((x_p[colon, None] - x_g[None, colon])).pow(2) + _upcast((y_p[colon, None] - y_g[None, colon])).pow(2);
+ var centers_distance_squared = _upcast((x_p[colon, None] - x_g[None, colon])).square() + _upcast((y_p[colon, None] - y_g[None, colon])).square();
// The distance IoU is the IoU penalized by a normalized
// distance between boxes' centers squared.
return iou - (centers_distance_squared / diagonal_distance_squared);
diff --git a/src/TorchVision/Ops/Losses.cs b/src/TorchVision/Ops/Losses.cs
index a01824120..4481f8906 100644
--- a/src/TorchVision/Ops/Losses.cs
+++ b/src/TorchVision/Ops/Losses.cs
@@ -36,7 +36,8 @@ public static Tensor sigmoid_focal_loss(
var p = torch.sigmoid(inputs);
var ce_loss = binary_cross_entropy_with_logits(inputs, targets, reduction: nn.Reduction.None);
var p_t = p * targets + (1 - p) * (1 - targets);
- var loss = ce_loss * (1 - p_t).pow(gamma);
+ using var gamma_scalar = gamma.ToScalar();
+ var loss = ce_loss * (1 - p_t).pow(gamma_scalar);
if (alpha >= 0) {
var alpha_t = alpha * targets + (1 - alpha) * (1 - targets);
diff --git a/src/TorchVision/Ops/StochasticDepth.cs b/src/TorchVision/Ops/StochasticDepth.cs
index cb615bc2b..64c450232 100644
--- a/src/TorchVision/Ops/StochasticDepth.cs
+++ b/src/TorchVision/Ops/StochasticDepth.cs
@@ -53,7 +53,8 @@ public static Tensor stochastic_depth(Tensor input, double p, StochasticDepth.Mo
noise.bernoulli_(survival_rate);
if (survival_rate > 0) {
- noise.div_(survival_rate);
+ using var survival_rate_scalar = survival_rate.ToScalar();
+ noise.div_(survival_rate_scalar);
}
return input * noise;
}
diff --git a/src/TorchVision/Ops/Utils.cs b/src/TorchVision/Ops/Utils.cs
index cbc74acf4..5227ede57 100644
--- a/src/TorchVision/Ops/Utils.cs
+++ b/src/TorchVision/Ops/Utils.cs
@@ -76,7 +76,7 @@ internal static (Tensor, Tensor) _diou_iou_loss(Tensor boxes1, Tensor boxes2, do
var xc2 = max(x2, x2g);
var yc2 = max(y2, y2g);
- var diagonal_distance_squared = (xc2 - xc1).pow(2) + (yc2 - yc1).pow(2) + eps;
+ var diagonal_distance_squared = (xc2 - xc1).square() + (yc2 - yc1).square() + eps;
// centers of boxes
var x_p = (x2 + x1) / 2;
var y_p = (y2 + y1) / 2;
@@ -84,7 +84,7 @@ internal static (Tensor, Tensor) _diou_iou_loss(Tensor boxes1, Tensor boxes2, do
var y_g = (y1g + y2g) / 2;
// The distance between boxes' centers squared.
- var centers_distance_squared = (x_p - x_g).pow(2) + (y_p - y_g).pow(2);
+ var centers_distance_squared = (x_p - x_g).square() + (y_p - y_g).square();
// The distance IoU is the IoU penalized by a normalized
// distance between boxes' centers squared.
var loss = 1 - iou + (centers_distance_squared / diagonal_distance_squared);
diff --git a/src/TorchVision/Utils.cs b/src/TorchVision/Utils.cs
index 623f332b3..4d543a326 100644
--- a/src/TorchVision/Utils.cs
+++ b/src/TorchVision/Utils.cs
@@ -87,8 +87,11 @@ public static Tensor make_grid(
tensor = tensor.clone(); // avoid modifying tensor in-place
void norm_ip(Tensor img, double low, double high)
{
- img.clamp_(min: low, max: high);
- img.sub_(low).div_(Math.Max(high - low, 1e-5));
+ using var low_scalar = low.ToScalar();
+ using var high_scalar = high.ToScalar();
+ using var denom_scalar = Math.Max(high - low, 1e-5).ToScalar();
+ img.clamp_(min: low_scalar, max: high_scalar);
+ img.sub_(low_scalar).div_(denom_scalar);
}
void norm_range(Tensor t, (double low, double high)? range)
@@ -129,7 +132,8 @@ void norm_range(Tensor t, (double low, double high)? range)
var height = tensor.size(2) + padding;
var num_channels = tensor.size(1);
- var grid = tensor.new_full(new[] { num_channels, height * ymaps + padding, width * xmaps + padding }, pad_value);
+ using var pad_value_scalar = pad_value.ToScalar();
+ var grid = tensor.new_full(new[] { num_channels, height * ymaps + padding, width * xmaps + padding }, pad_value_scalar);
var k = 0L;
for (long y = 0; y < ymaps; ++y)
{
@@ -217,7 +221,11 @@ public static void save_image(
using var _ = torch.NewDisposeScope();
var grid = make_grid(tensor, nrow, padding, normalize, value_range, scale_each, pad_value);
// Add 0.5 after unnormalizing to [0, 255] to round to nearest integer
- var narr = grid.mul(255).add_(0.5).clamp_(0, 255).to(uint8, CPU);
+ // FIXME: Why not torch.Tensor.round_?
+ using var uint8_min_scalar = 0.ToScalar(); // FIXME: No torch.min_int_value?
+ using var uint8_max_scalar = torch.max_int_value(uint8).ToScalar();
+ using var eps_scalar = 0.5.ToScalar();
+ var narr = grid.mul(uint8_max_scalar).add_(eps_scalar).clamp_(uint8_min_scalar, uint8_max_scalar).to(uint8, CPU);
(imager ?? DefaultImager).EncodeImage(narr, format, filestream);
}
}
diff --git a/test/TorchSharpTest/TestAutogradFunction.cs b/test/TorchSharpTest/TestAutogradFunction.cs
index 075306d3d..f2043ddca 100644
--- a/test/TorchSharpTest/TestAutogradFunction.cs
+++ b/test/TorchSharpTest/TestAutogradFunction.cs
@@ -76,7 +76,7 @@ private float TrainXOR(Device device)
var input = torch.tensor(new float[] { i, j }, device: device).unsqueeze(0);
var output = LinearFunction.apply(input, weight1);
output = LinearFunction.apply(torch.nn.functional.tanh(input), weight2);
- var loss = (output - (i ^ j)).pow(2);
+ var loss = (output - (i ^ j)).square();
loss.backward();
optim.step();
lastLoss = loss.item();
diff --git a/test/TorchSharpTest/TestTorchTensor.cs b/test/TorchSharpTest/TestTorchTensor.cs
index 69ba732f3..4181dd7ac 100644
--- a/test/TorchSharpTest/TestTorchTensor.cs
+++ b/test/TorchSharpTest/TestTorchTensor.cs
@@ -3980,9 +3980,9 @@ public void SquareEuclideanDistance()
var ones = torch.ones(new long[] { 1, 9 });
var centroids = torch.cat(new Tensor[] { zeros, ones }, 0);
- var distanceFromZero = input.reshape(new long[] { -1, 1, 9 }).sub(zeros).pow(2.ToScalar()).sum(new long[] { 2 });
- var distanceFromOne = input.reshape(new long[] { -1, 1, 9 }).sub(ones).pow(2.ToScalar()).sum(new long[] { 2 });
- var distanceFromCentroids = input.reshape(new long[] { -1, 1, 9 }).sub(centroids).pow(2.ToScalar()).sum(new long[] { 2 });
+ var distanceFromZero = input.reshape(new long[] { -1, 1, 9 }).sub(zeros).square().sum(new long[] { 2 });
+ var distanceFromOne = input.reshape(new long[] { -1, 1, 9 }).sub(ones).square().sum(new long[] { 2 });
+ var distanceFromCentroids = input.reshape(new long[] { -1, 1, 9 }).sub(centroids).square().sum(new long[] { 2 });
Assert.True(true);
}