1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
use std::sync::Arc;
use http::StatusCode;
use super::core::DbfsCore;
use super::error::parse_error;
use crate::raw::*;
use crate::*;
pub struct DbfsWriter {
core: Arc<DbfsCore>,
path: String,
}
impl DbfsWriter {
const MAX_SIMPLE_SIZE: usize = 1024 * 1024;
pub fn new(core: Arc<DbfsCore>, _op: OpWrite, path: String) -> Self {
DbfsWriter { core, path }
}
}
impl oio::OneShotWrite for DbfsWriter {
async fn write_once(&self, bs: Buffer) -> Result<Metadata> {
let size = bs.len();
// MAX_BLOCK_SIZE_EXCEEDED will be thrown if this limit(1MB) is exceeded.
if size >= Self::MAX_SIMPLE_SIZE {
return Err(Error::new(
ErrorKind::Unsupported,
"AppendWrite has not been implemented for Dbfs",
));
}
let req = self
.core
.dbfs_create_file_request(&self.path, bs.to_bytes())?;
let resp = self.core.client.send(req).await?;
let status = resp.status();
match status {
StatusCode::CREATED | StatusCode::OK => Ok(Metadata::default()),
_ => Err(parse_error(resp)),
}
}
}