Skip to content

Commit

Permalink
Improve file limit handling (#4242)
Browse files Browse the repository at this point in the history
* Improve file limit handling

* Oops

* Update PostgreSQL migration

* Review comments

---------

Co-authored-by: BlackDex <[email protected]>
  • Loading branch information
dani-garcia and BlackDex committed Jan 27, 2024
1 parent 8b66e34 commit edf7484
Show file tree
Hide file tree
Showing 26 changed files with 281 additions and 98 deletions.
4 changes: 4 additions & 0 deletions .env.template
Original file line number Diff line number Diff line change
Expand Up @@ -308,6 +308,10 @@
## Max kilobytes of attachment storage allowed per user.
## When this limit is reached, the user will not be allowed to upload further attachments.
# USER_ATTACHMENT_LIMIT=
## Per-user send storage limit (KB)
## Max kilobytes of send storage allowed per user.
## When this limit is reached, the user will not be allowed to upload further sends.
# USER_SEND_LIMIT=

## Number of days to wait before auto-deleting a trashed item.
## If unset (the default), trashed items are not auto-deleted.
Expand Down
28 changes: 26 additions & 2 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 2 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ once_cell = "1.19.0"
# Numerical libraries
num-traits = "0.2.17"
num-derive = "0.4.1"
bigdecimal = "0.4.2"

# Web framework
rocket = { version = "0.5.0", features = ["tls", "json"], default-features = false }
Expand All @@ -74,7 +75,7 @@ serde = { version = "1.0.195", features = ["derive"] }
serde_json = "1.0.111"

# A safe, extensible ORM and Query builder
diesel = { version = "2.1.4", features = ["chrono", "r2d2"] }
diesel = { version = "2.1.4", features = ["chrono", "r2d2", "numeric"] }
diesel_migrations = "2.1.0"
diesel_logger = { version = "0.3.0", optional = true }

Expand Down
Empty file.
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
ALTER TABLE attachments MODIFY file_size BIGINT NOT NULL;
Empty file.
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
ALTER TABLE attachments
ALTER COLUMN file_size TYPE BIGINT,
ALTER COLUMN file_size SET NOT NULL;
Empty file.
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
-- Integer size in SQLite is already i64, so we don't need to do anything
7 changes: 4 additions & 3 deletions src/api/admin.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ use rocket::{
use crate::{
api::{
core::{log_event, two_factor},
unregister_push_device, ApiResult, EmptyResult, JsonResult, Notify, NumberOrString,
unregister_push_device, ApiResult, EmptyResult, JsonResult, Notify,
},
auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp},
config::ConfigBuilder,
Expand All @@ -24,6 +24,7 @@ use crate::{
mail,
util::{
docker_base_image, format_naive_datetime_local, get_display_size, get_reqwest_client, is_running_in_docker,
NumberOrString,
},
CONFIG, VERSION,
};
Expand Down Expand Up @@ -345,7 +346,7 @@ async fn users_overview(_token: AdminToken, mut conn: DbConn) -> ApiResult<Html<
let mut usr = u.to_json(&mut conn).await;
usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &mut conn).await);
usr["attachment_count"] = json!(Attachment::count_by_user(&u.uuid, &mut conn).await);
usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &mut conn).await as i32));
usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &mut conn).await));
usr["user_enabled"] = json!(u.enabled);
usr["created_at"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT));
usr["last_active"] = match u.last_active(&mut conn).await {
Expand Down Expand Up @@ -549,7 +550,7 @@ async fn organizations_overview(_token: AdminToken, mut conn: DbConn) -> ApiResu
org["group_count"] = json!(Group::count_by_org(&o.uuid, &mut conn).await);
org["event_count"] = json!(Event::count_by_org(&o.uuid, &mut conn).await);
org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &mut conn).await);
org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &mut conn).await as i32));
org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &mut conn).await));
organizations_json.push(org);
}

Expand Down
6 changes: 4 additions & 2 deletions src/api/core/accounts.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,14 @@ use serde_json::Value;
use crate::{
api::{
core::log_user_event, register_push_device, unregister_push_device, AnonymousNotify, EmptyResult, JsonResult,
JsonUpcase, Notify, NumberOrString, PasswordOrOtpData, UpdateType,
JsonUpcase, Notify, PasswordOrOtpData, UpdateType,
},
auth::{decode_delete, decode_invite, decode_verify_email, ClientHeaders, Headers},
crypto,
db::{models::*, DbConn},
mail, CONFIG,
mail,
util::NumberOrString,
CONFIG,
};

use rocket::{
Expand Down
78 changes: 58 additions & 20 deletions src/api/core/ciphers.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
use std::collections::{HashMap, HashSet};

use chrono::{NaiveDateTime, Utc};
use num_traits::ToPrimitive;
use rocket::fs::TempFile;
use rocket::serde::json::Json;
use rocket::{
Expand Down Expand Up @@ -956,7 +957,7 @@ async fn get_attachment(uuid: &str, attachment_id: &str, headers: Headers, mut c
struct AttachmentRequestData {
Key: String,
FileName: String,
FileSize: i32,
FileSize: i64,
AdminRequest: Option<bool>, // true when attaching from an org vault view
}

Expand Down Expand Up @@ -985,8 +986,11 @@ async fn post_attachment_v2(
err!("Cipher is not write accessible")
}

let attachment_id = crypto::generate_attachment_id();
let data: AttachmentRequestData = data.into_inner().data;
if data.FileSize < 0 {
err!("Attachment size can't be negative")
}
let attachment_id = crypto::generate_attachment_id();
let attachment =
Attachment::new(attachment_id.clone(), cipher.uuid.clone(), data.FileName, data.FileSize, Some(data.Key));
attachment.save(&mut conn).await.expect("Error saving attachment");
Expand Down Expand Up @@ -1028,6 +1032,15 @@ async fn save_attachment(
mut conn: DbConn,
nt: Notify<'_>,
) -> Result<(Cipher, DbConn), crate::error::Error> {
let mut data = data.into_inner();

let Some(size) = data.data.len().to_i64() else {
err!("Attachment data size overflow");
};
if size < 0 {
err!("Attachment size can't be negative")
}

let cipher = match Cipher::find_by_uuid(cipher_uuid, &mut conn).await {
Some(cipher) => cipher,
None => err!("Cipher doesn't exist"),
Expand All @@ -1040,42 +1053,60 @@ async fn save_attachment(
// In the v2 API, the attachment record has already been created,
// so the size limit needs to be adjusted to account for that.
let size_adjust = match &attachment {
None => 0, // Legacy API
Some(a) => i64::from(a.file_size), // v2 API
None => 0, // Legacy API
Some(a) => a.file_size, // v2 API
};

let size_limit = if let Some(ref user_uuid) = cipher.user_uuid {
match CONFIG.user_attachment_limit() {
Some(0) => err!("Attachments are disabled"),
Some(limit_kb) => {
let left = (limit_kb * 1024) - Attachment::size_by_user(user_uuid, &mut conn).await + size_adjust;
let already_used = Attachment::size_by_user(user_uuid, &mut conn).await;
let left = limit_kb
.checked_mul(1024)
.and_then(|l| l.checked_sub(already_used))
.and_then(|l| l.checked_add(size_adjust));

let Some(left) = left else {
err!("Attachment size overflow");
};

if left <= 0 {
err!("Attachment storage limit reached! Delete some attachments to free up space")
}
Some(left as u64)

Some(left)
}
None => None,
}
} else if let Some(ref org_uuid) = cipher.organization_uuid {
match CONFIG.org_attachment_limit() {
Some(0) => err!("Attachments are disabled"),
Some(limit_kb) => {
let left = (limit_kb * 1024) - Attachment::size_by_org(org_uuid, &mut conn).await + size_adjust;
let already_used = Attachment::size_by_org(org_uuid, &mut conn).await;
let left = limit_kb
.checked_mul(1024)
.and_then(|l| l.checked_sub(already_used))
.and_then(|l| l.checked_add(size_adjust));

let Some(left) = left else {
err!("Attachment size overflow");
};

if left <= 0 {
err!("Attachment storage limit reached! Delete some attachments to free up space")
}
Some(left as u64)

Some(left)
}
None => None,
}
} else {
err!("Cipher is neither owned by a user nor an organization");
};

let mut data = data.into_inner();

if let Some(size_limit) = size_limit {
if data.data.len() > size_limit {
if size > size_limit {
err!("Attachment storage limit exceeded with this file");
}
}
Expand All @@ -1085,20 +1116,19 @@ async fn save_attachment(
None => crypto::generate_attachment_id(), // Legacy API
};

let folder_path = tokio::fs::canonicalize(&CONFIG.attachments_folder()).await?.join(cipher_uuid);
let file_path = folder_path.join(&file_id);
tokio::fs::create_dir_all(&folder_path).await?;

let size = data.data.len() as i32;
if let Some(attachment) = &mut attachment {
// v2 API

// Check the actual size against the size initially provided by
// the client. Upstream allows +/- 1 MiB deviation from this
// size, but it's not clear when or why this is needed.
const LEEWAY: i32 = 1024 * 1024; // 1 MiB
let min_size = attachment.file_size - LEEWAY;
let max_size = attachment.file_size + LEEWAY;
const LEEWAY: i64 = 1024 * 1024; // 1 MiB
let Some(min_size) = attachment.file_size.checked_add(LEEWAY) else {
err!("Invalid attachment size min")
};
let Some(max_size) = attachment.file_size.checked_sub(LEEWAY) else {
err!("Invalid attachment size max")
};

if min_size <= size && size <= max_size {
if size != attachment.file_size {
Expand All @@ -1113,6 +1143,10 @@ async fn save_attachment(
}
} else {
// Legacy API

// SAFETY: This value is only stored in the database and is not used to access the file system.
// As a result, the conditions specified by Rocket [0] are met and this is safe to use.
// [0]: https://docs.rs/rocket/latest/rocket/fs/struct.FileName.html#-danger-
let encrypted_filename = data.data.raw_name().map(|s| s.dangerous_unsafe_unsanitized_raw().to_string());

if encrypted_filename.is_none() {
Expand All @@ -1122,10 +1156,14 @@ async fn save_attachment(
err!("No attachment key provided")
}
let attachment =
Attachment::new(file_id, String::from(cipher_uuid), encrypted_filename.unwrap(), size, data.key);
Attachment::new(file_id.clone(), String::from(cipher_uuid), encrypted_filename.unwrap(), size, data.key);
attachment.save(&mut conn).await.expect("Error saving attachment");
}

let folder_path = tokio::fs::canonicalize(&CONFIG.attachments_folder()).await?.join(cipher_uuid);
let file_path = folder_path.join(&file_id);
tokio::fs::create_dir_all(&folder_path).await?;

if let Err(_err) = data.data.persist_to(&file_path).await {
data.data.move_copy_to(file_path).await?
}
Expand Down
6 changes: 4 additions & 2 deletions src/api/core/emergency_access.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,13 @@ use serde_json::Value;
use crate::{
api::{
core::{CipherSyncData, CipherSyncType},
EmptyResult, JsonResult, JsonUpcase, NumberOrString,
EmptyResult, JsonResult, JsonUpcase,
},
auth::{decode_emergency_access_invite, Headers},
db::{models::*, DbConn, DbPool},
mail, CONFIG,
mail,
util::NumberOrString,
CONFIG,
};

pub fn routes() -> Vec<Route> {
Expand Down
5 changes: 2 additions & 3 deletions src/api/core/organizations.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,13 @@ use serde_json::Value;
use crate::{
api::{
core::{log_event, two_factor, CipherSyncData, CipherSyncType},
EmptyResult, JsonResult, JsonUpcase, JsonUpcaseVec, JsonVec, Notify, NumberOrString, PasswordOrOtpData,
UpdateType,
EmptyResult, JsonResult, JsonUpcase, JsonUpcaseVec, JsonVec, Notify, PasswordOrOtpData, UpdateType,
},
auth::{decode_invite, AdminHeaders, Headers, ManagerHeaders, ManagerHeadersLoose, OwnerHeaders},
db::{models::*, DbConn},
error::Error,
mail,
util::convert_json_key_lcase_first,
util::{convert_json_key_lcase_first, NumberOrString},
CONFIG,
};

Expand Down
Loading

0 comments on commit edf7484

Please sign in to comment.