Skip to content

Commit

Permalink
Fix Clippy warnings
Browse files Browse the repository at this point in the history
  • Loading branch information
danieldk committed Oct 14, 2023
1 parent a3d67ca commit ffc691e
Show file tree
Hide file tree
Showing 13 changed files with 45 additions and 45 deletions.
18 changes: 9 additions & 9 deletions syntaxdot-cli/src/subcommands/distill.rs
Original file line number Diff line number Diff line change
Expand Up @@ -405,7 +405,7 @@ impl DistillApp {
let token_mask = token_mask.with_root()?;

let mse_loss = MSELoss::new(MSELossNormalization::SquaredL2Norm);
let mut loss = Tensor::zeros(&[], (Kind::Float, self.device));
let mut loss = Tensor::zeros([], (Kind::Float, self.device));

let (batch_size, _) = token_mask
.size2()
Expand All @@ -427,8 +427,8 @@ impl DistillApp {
.f_matmul(&mapping.mapping)?
.f_masked_select(&token_mask.f_unsqueeze(-1)?)?;

let teacher_hidden = teacher_hidden.f_reshape(&[batch_size, -1])?;
let student_hidden = student_hidden.f_reshape(&[batch_size, -1])?;
let teacher_hidden = teacher_hidden.f_reshape([batch_size, -1])?;
let student_hidden = student_hidden.f_reshape([batch_size, -1])?;

let _ = loss.f_add_(&mse_loss.forward(&student_hidden, &teacher_hidden)?);
}
Expand Down Expand Up @@ -472,18 +472,18 @@ impl DistillApp {
student_encoder_logits: HashMap<String, Tensor>,
token_mask: &TokenMask,
) -> Result<Tensor, SyntaxDotError> {
let mut loss = Tensor::zeros(&[], (Kind::Float, token_mask.device()));
let mut loss = Tensor::zeros([], (Kind::Float, token_mask.device()));

for (encoder_name, teacher_logits) in teacher_encoder_logits {
let n_labels = teacher_logits.size()[2];

// Select the outputs for the relevant time steps.
let student_logits = student_encoder_logits[&encoder_name]
.masked_select(&token_mask.unsqueeze(-1))
.reshape(&[-1, n_labels]);
.reshape([-1, n_labels]);
let teacher_logits = teacher_logits
.masked_select(&token_mask.unsqueeze(-1))
.reshape(&[-1, n_labels]);
.reshape([-1, n_labels]);

// Compute the soft loss.
let teacher_probs = teacher_logits.f_softmax(-1, Kind::Float)?;
Expand Down Expand Up @@ -551,7 +551,7 @@ impl DistillApp {
},
)?;

let mut soft_loss = Tensor::zeros(&[], (Kind::Float, self.device));
let mut soft_loss = Tensor::zeros([], (Kind::Float, self.device));

// Compute biaffine encoder/decoder loss.
match (
Expand Down Expand Up @@ -582,7 +582,7 @@ impl DistillApp {
let attention_loss = if self.attention_loss {
self.attention_loss(&teacher_layer_outputs, &student_layer_outputs)?
} else {
Tensor::zeros(&[], (Kind::Float, self.device))
Tensor::zeros([], (Kind::Float, self.device))
};

let hidden_loss = match auxiliary_params.hidden_mappings {
Expand All @@ -592,7 +592,7 @@ impl DistillApp {
&teacher_layer_outputs,
&student_layer_outputs,
)?,
None => Tensor::zeros(&[], (Kind::Float, self.device)),
None => Tensor::zeros([], (Kind::Float, self.device)),
};

Ok(DistillLoss {
Expand Down
10 changes: 5 additions & 5 deletions syntaxdot-transformers/src/layers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,9 +59,9 @@ impl FallibleModule for Conv1D {
xs,
&self.ws,
self.bs.as_ref(),
&[self.config.stride],
&[self.config.padding],
&[self.config.dilation],
[self.config.stride],
[self.config.padding],
[self.config.dilation],
self.config.groups,
)?)
}
Expand Down Expand Up @@ -286,7 +286,7 @@ impl PairwiseBilinear {

let (batch_size, seq_len, _) = u.size3()?;

let ones = Tensor::ones(&[batch_size, seq_len, 1], (u.kind(), u.device()));
let ones = Tensor::ones([batch_size, seq_len, 1], (u.kind(), u.device()));

let u = if self.bias_u {
Tensor::f_cat(&[u, &ones], -1)?
Expand Down Expand Up @@ -346,7 +346,7 @@ impl FallibleModuleT for VariationalDropout {
}

let (batch_size, _, repr_size) = xs.size3()?;
let dropout_mask = Tensor::f_ones(&[batch_size, 1, repr_size], (xs.kind(), xs.device()))?
let dropout_mask = Tensor::f_ones([batch_size, 1, repr_size], (xs.kind(), xs.device()))?
.f_dropout_(self.p, true)?;
Ok(xs.f_mul(&dropout_mask)?)
}
Expand Down
2 changes: 1 addition & 1 deletion syntaxdot-transformers/src/loss.rs
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ impl MSELoss {
match self.normalization {
MSELossNormalization::Mean => loss,
MSELossNormalization::SquaredL2Norm => {
let norm = target.f_frobenius_norm(&[1], true)?.f_square()?;
let norm = target.f_frobenius_norm([1], true)?.f_square()?;
let (batch_size, _) = target.size2()?;
loss?
.f_div(&norm)?
Expand Down
4 changes: 2 additions & 2 deletions syntaxdot-transformers/src/models/bert/layer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -257,7 +257,7 @@ impl BertSelfAttention {

let context_layer = attention_probs.f_matmul(&value_layer)?;

let context_layer = context_layer.f_permute(&[0, 2, 1, 3])?.f_contiguous()?;
let context_layer = context_layer.f_permute([0, 2, 1, 3])?.f_contiguous()?;
let mut new_context_layer_shape = context_layer.size();
new_context_layer_shape.splice(
new_context_layer_shape.len() - 2..,
Expand All @@ -273,7 +273,7 @@ impl BertSelfAttention {
new_x_shape.pop();
new_x_shape.extend(&[self.num_attention_heads, self.attention_head_size]);

Ok(x.f_view_(&new_x_shape)?.f_permute(&[0, 2, 1, 3])?)
Ok(x.f_view_(&new_x_shape)?.f_permute([0, 2, 1, 3])?)
}
}

Expand Down
4 changes: 2 additions & 2 deletions syntaxdot-transformers/src/models/squeeze_albert/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -232,7 +232,7 @@ impl Encoder for SqueezeAlbertEncoder {
) -> Result<Vec<LayerOutput>, TransformerError> {
let hidden_states = self.projection.forward(input);

let input = hidden_states.f_permute(&[0, 2, 1])?;
let input = hidden_states.f_permute([0, 2, 1])?;

let mut all_layer_outputs = Vec::with_capacity(self.n_layers as usize + 1);
all_layer_outputs.push(LayerOutput::Embedding(hidden_states.shallow_clone()));
Expand All @@ -256,7 +256,7 @@ impl Encoder for SqueezeAlbertEncoder {

// Convert hidden states to [batch_size, seq_len, hidden_size].
for layer_output in &mut all_layer_outputs {
*layer_output.output_mut() = layer_output.output().f_permute(&[0, 2, 1])?;
*layer_output.output_mut() = layer_output.output().f_permute([0, 2, 1])?;
}

Ok(all_layer_outputs)
Expand Down
4 changes: 2 additions & 2 deletions syntaxdot-transformers/src/models/squeeze_bert/encoder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ impl Encoder for SqueezeBertEncoder {
let attention_mask = attention_mask.map(LogitsMask::from_bool_mask).transpose()?;

// [batch_size, seq_len, hidden_size] -> [batch_size, hidden_size, seq_len]
let mut hidden_states = input.f_permute(&[0, 2, 1])?;
let mut hidden_states = input.f_permute([0, 2, 1])?;

let mut all_layer_outputs = Vec::with_capacity(self.layers.len() + 1);
all_layer_outputs.push(LayerOutput::Embedding(hidden_states.shallow_clone()));
Expand All @@ -73,7 +73,7 @@ impl Encoder for SqueezeBertEncoder {

// Convert hidden states to [batch_size, seq_len, hidden_size].
for layer_output in &mut all_layer_outputs {
*layer_output.output_mut() = layer_output.output().f_permute(&[0, 2, 1])?;
*layer_output.output_mut() = layer_output.output().f_permute([0, 2, 1])?;
}

Ok(all_layer_outputs)
Expand Down
8 changes: 4 additions & 4 deletions syntaxdot-transformers/src/models/squeeze_bert/layer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -49,9 +49,9 @@ impl FallibleModule for SqueezeBertLayerNorm {
type Error = TransformerError;

fn forward(&self, xs: &Tensor) -> Result<Tensor, Self::Error> {
let xs_perm = xs.f_permute(&[0, 2, 1])?;
let xs_perm = xs.f_permute([0, 2, 1])?;
let xs_perm_norm = self.layer_norm.forward(&xs_perm)?;
Ok(xs_perm_norm.f_permute(&[0, 2, 1])?)
Ok(xs_perm_norm.f_permute([0, 2, 1])?)
}
}

Expand Down Expand Up @@ -235,7 +235,7 @@ impl SqueezeBertSelfAttention {
*x_size.last().unwrap(),
];

Ok(x.f_view_(new_x_shape)?.f_permute(&[0, 1, 3, 2])?)
Ok(x.f_view_(new_x_shape)?.f_permute([0, 1, 3, 2])?)
}

fn transpose_key_for_scores(&self, x: &Tensor) -> Result<Tensor, TransformerError> {
Expand All @@ -251,7 +251,7 @@ impl SqueezeBertSelfAttention {
}

fn transpose_output(&self, x: &Tensor) -> Result<Tensor, TransformerError> {
let x = x.f_permute(&[0, 1, 3, 2])?.f_contiguous()?;
let x = x.f_permute([0, 1, 3, 2])?.f_contiguous()?;
let x_size = x.size();
let new_x_shape = &[x_size[0], self.all_head_size, x_size[3]];
Ok(x.f_view_(new_x_shape)?)
Expand Down
4 changes: 2 additions & 2 deletions syntaxdot-transformers/src/util.rs
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ impl SinusoidalPositions for Tensor {

if let Some(p) = p_norm {
// Compute the p-norm.
let norm = self.f_norm_scalaropt_dim(p, &[-1], true)?;
let norm = self.f_norm_scalaropt_dim(p, [-1], true)?;

// Normalize embeddings.
let _ = self.f_div_(&norm)?;
Expand All @@ -145,7 +145,7 @@ impl SinusoidalPositions for Tensor {
dims
);

let mut positions = Tensor::f_empty(&[n_positions, dims], options)?;
let mut positions = Tensor::f_empty([n_positions, dims], options)?;
positions.sinusoidal_positions_(p_norm)?;

Ok(positions)
Expand Down
16 changes: 8 additions & 8 deletions syntaxdot/src/model/biaffine_dependency_layer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -268,7 +268,7 @@ impl BiaffineDependencyLayer {
1,
&heads
.f_unsqueeze(-1)?
.f_expand(&[batch_size, n_tokens, label_hidden_size], true)?,
.f_expand([batch_size, n_tokens, label_hidden_size], true)?,
false,
)?;

Expand Down Expand Up @@ -370,8 +370,8 @@ impl BiaffineDependencyLayer {
let head_logits = biaffine_logits
.head_score_logits
// Last dimension is ROOT + all tokens as head candidates.
.f_reshape(&[-1, seq_len + 1])?;
let head_targets = &targets.heads.f_view_(&[-1])?;
.f_reshape([-1, seq_len + 1])?;
let head_targets = &targets.heads.f_view_([-1])?;
let head_loss = CrossEntropyLoss::new(-1, label_smoothing, Reduction::Mean).forward(
&head_logits,
head_targets,
Expand All @@ -380,18 +380,18 @@ impl BiaffineDependencyLayer {
// [batch_size, seq_len + 1] -> [batch_size, 1, seq_len + 1]
.f_unsqueeze(1)?
// [batch_size, 1, seq_len + 1] -> [batch_size, seq_len, seq_len + 1].
.f_expand(&[-1, seq_len, -1], true)?
.f_expand([-1, seq_len, -1], true)?
// [batch_size, seq_len, seq_len + 1] -> [batch_size * seq_len, seq_len + 1]
.f_reshape(&[-1, seq_len + 1])?,
.f_reshape([-1, seq_len + 1])?,
),
)?;

// Get the logits for the correct heads.
let label_score_logits = biaffine_logits
.relation_score_logits
.f_reshape(&[-1, self.n_relations])?;
.f_reshape([-1, self.n_relations])?;

let relation_targets = targets.relations.f_view_(&[-1])?;
let relation_targets = targets.relations.f_view_([-1])?;
let relation_loss = CrossEntropyLoss::new(-1, label_smoothing, Reduction::Mean).forward(
&label_score_logits,
&relation_targets,
Expand Down Expand Up @@ -423,7 +423,7 @@ impl BiaffineDependencyLayer {
.f_argmax(-1, false)?;
let relations_correct = relations_predicted
.f_eq_tensor(&targets.relations)?
.f_view_(&[batch_size, seq_len])?;
.f_view_([batch_size, seq_len])?;

let head_and_relations_correct = head_correct.f_logical_and(&relations_correct)?;

Expand Down
2 changes: 1 addition & 1 deletion syntaxdot/src/model/pooling.rs
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ impl EmbeddingsPerToken for TokenSpansWithRoot {
1,
&piece_indices
.f_view([batch_size, -1, 1])?
.f_expand(&[-1, -1, embed_size], true)?,
.f_expand([-1, -1, embed_size], true)?,
false,
)?
.f_view([batch_size, tokens_len, max_token_len, embed_size])?
Expand Down
2 changes: 1 addition & 1 deletion syntaxdot/src/model/seq_classifiers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ impl SequenceClassifiers {
}

let summed_loss = encoder_losses.values().try_fold(
Tensor::f_zeros(&[], (Kind::Float, layers_without_root[0].output().device()))?,
Tensor::f_zeros([], (Kind::Float, layers_without_root[0].output().device()))?,
|summed_loss, loss| summed_loss.f_add(loss),
)?;

Expand Down
6 changes: 3 additions & 3 deletions syntaxdot/src/optimizers/grad_scale.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,9 +59,9 @@ where

optimizer,

found_inf: Tensor::full(&[1], 0.0, (Kind::Float, device)),
growth_tracker: Tensor::full(&[1], 0, (Kind::Int, device)),
scale: Tensor::full(&[1], init_scale, (Kind::Float, device)),
found_inf: Tensor::full([1], 0.0, (Kind::Float, device)),
growth_tracker: Tensor::full([1], 0, (Kind::Int, device)),
scale: Tensor::full([1], init_scale, (Kind::Float, device)),
})
}

Expand Down
10 changes: 5 additions & 5 deletions syntaxdot/src/tensor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -333,8 +333,8 @@ impl SequenceLengths {
Ok(Tensor::f_arange(max_len, (Kind::Int, self.inner.device()))?
// Construct a matrix [batch_size, max_len] where each row
// is 0..(max_len - 1).
.f_repeat(&[batch_size])?
.f_view_(&[batch_size, max_len])?
.f_repeat([batch_size])?
.f_view_([batch_size, max_len])?
// Time steps less than the length in the sequence lengths are active.
.f_lt_tensor(&self.inner.unsqueeze(1))?
// For some reason the kind is Int?
Expand Down Expand Up @@ -403,13 +403,13 @@ impl TokenSpans {

let root_offset = Tensor::from(0)
.f_view([1, 1])?
.f_expand(&[batch_size, 1], true)?
.f_expand([batch_size, 1], true)?
.to_device(self.offsets.device());
let offsets = Tensor::f_cat(&[&root_offset, &self.offsets], 1)?;

let root_len = Tensor::from(1)
.f_view([1, 1])?
.f_expand(&[batch_size, 1], true)?
.f_expand([batch_size, 1], true)?
.to_device(self.lens.device());
let lens = Tensor::f_cat(&[&root_len, &self.lens], 1)?;

Expand Down Expand Up @@ -460,7 +460,7 @@ impl TokenMask {
let (batch_size, _seq_len) = self.inner.size2()?;

let root_mask = Tensor::from(true)
.f_expand(&[batch_size, 1], true)?
.f_expand([batch_size, 1], true)?
.to_device(self.inner.device());

let token_mask_with_root = Tensor::f_cat(&[&root_mask, &self.inner], -1)?;
Expand Down

0 comments on commit ffc691e

Please sign in to comment.