-
Notifications
You must be signed in to change notification settings - Fork 40
Expand file tree
/
Copy pathin_memory_cluster.rs
More file actions
122 lines (104 loc) · 3.92 KB
/
in_memory_cluster.rs
File metadata and controls
122 lines (104 loc) · 3.92 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
use arrow::util::pretty::pretty_format_batches;
use async_trait::async_trait;
use datafusion::common::DataFusionError;
use datafusion::execution::SessionStateBuilder;
use datafusion::prelude::{ParquetReadOptions, SessionContext};
use datafusion_distributed::{
BoxCloneSyncChannel, ChannelResolver, DistributedExt, SessionStateBuilderExt, Worker,
WorkerQueryContext, WorkerResolver, WorkerServiceClient, create_worker_client,
display_plan_ascii,
};
use futures::TryStreamExt;
use hyper_util::rt::TokioIo;
use std::error::Error;
use structopt::StructOpt;
use tonic::transport::{Endpoint, Server};
#[derive(StructOpt)]
#[structopt(
name = "run",
about = "Run a query in an in-memory Distributed DataFusion cluster"
)]
struct Args {
/// The SQL query to run.
#[structopt()]
query: String,
/// Whether the distributed plan should be rendered instead of executing the query.
#[structopt(long)]
show_distributed_plan: bool,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn Error>> {
let args = Args::from_args();
let state = SessionStateBuilder::new()
.with_default_features()
.with_distributed_worker_resolver(InMemoryWorkerResolver)
.with_distributed_channel_resolver(InMemoryChannelResolver::new())
.with_distributed_planner()
.with_distributed_files_per_task(1)?
.build();
let ctx = SessionContext::from(state);
ctx.register_parquet("weather", "testdata/weather", ParquetReadOptions::default())
.await?;
let df = ctx.sql(&args.query).await?;
if args.show_distributed_plan {
let plan = df.create_physical_plan().await?;
println!("{}", display_plan_ascii(plan.as_ref(), false));
} else {
let stream = df.execute_stream().await?;
let batches = stream.try_collect::<Vec<_>>().await?;
let formatted = pretty_format_batches(&batches)?;
println!("{formatted}");
}
Ok(())
}
const DUMMY_URL: &str = "http://localhost:50051";
/// [ChannelResolver] implementation that returns gRPC clients baked by an in-memory
/// tokio duplex rather than a TCP connection.
#[derive(Clone)]
struct InMemoryChannelResolver {
channel: WorkerServiceClient<BoxCloneSyncChannel>,
}
impl InMemoryChannelResolver {
fn new() -> Self {
let (client, server) = tokio::io::duplex(1024 * 1024);
let mut client = Some(client);
let channel = Endpoint::try_from(DUMMY_URL)
.expect("Invalid dummy URL for building an endpoint. This should never happen")
.connect_with_connector_lazy(tower::service_fn(move |_| {
let client = client
.take()
.expect("Client taken twice. This should never happen");
async move { Ok::<_, std::io::Error>(TokioIo::new(client)) }
}));
let this = Self {
channel: create_worker_client(BoxCloneSyncChannel::new(channel)),
};
let this_clone = this.clone();
let endpoint = Worker::from_session_builder(move |ctx: WorkerQueryContext| {
let this = this.clone();
async move { Ok(ctx.builder.with_distributed_channel_resolver(this).build()) }
});
tokio::spawn(async move {
Server::builder()
.add_service(endpoint.into_worker_server())
.serve_with_incoming(tokio_stream::once(Ok::<_, std::io::Error>(server)))
.await
});
this_clone
}
}
#[async_trait]
impl ChannelResolver for InMemoryChannelResolver {
async fn get_worker_client_for_url(
&self,
_: &url::Url,
) -> Result<WorkerServiceClient<BoxCloneSyncChannel>, DataFusionError> {
Ok(self.channel.clone())
}
}
struct InMemoryWorkerResolver;
impl WorkerResolver for InMemoryWorkerResolver {
fn get_urls(&self) -> Result<Vec<url::Url>, DataFusionError> {
Ok(vec![url::Url::parse(DUMMY_URL).unwrap(); 16]) // simulate 16 workers.
}
}