import{s as ae,n as se,o as le}from"../chunks/scheduler.d6170356.js";import{S as ne,i as ie,g as i,s as l,r as p,A as oe,h as o,f as a,c as n,j as te,u as r,x as d,k as ee,y as de,a as s,v as m,d as u,t as h,w as f}from"../chunks/index.fcd4cc08.js";import{C as c}from"../chunks/CodeBlock.7b16bdef.js";import{H as Ft,E as pe}from"../chunks/EditOnGithub.da2b595c.js";function re(It){let y,K,L,O,M,tt,w,vt='<a href="https://github.com/dask/dask" rel="nofollow">Dask</a> is a parallel and distributed computing library that scales the existing Python and PyData ecosystem.',et,k,xt="In particular, we can use Dask DataFrame to scale up pandas workflows. Dask DataFrame parallelizes pandas to handle large tabular data. It closely mirrors the pandas API, making it simple to transition from testing on a single dataset to processing the full dataset. Dask is particularly effective with Parquet, the default format on Hugging Face Datasets, as it supports rich data types, efficient columnar filtering, and compression.",at,g,Ht='A good practical use case for Dask is running data processing or model inference on a dataset in a distributed manner. See, for example, Coiled’s excellent blog post on <a href="https://huggingface.co/blog/dask-scaling" rel="nofollow">Scaling AI-Based Data Processing with Hugging Face + Dask</a>.',st,J,lt,T,Vt='Since Dask uses <a href="https://filesystem-spec.readthedocs.io" rel="nofollow">fsspec</a> to read and write remote data, you can use the Hugging Face paths (<a href="/docs/huggingface_hub/guides/hf_file_system#integrations"><code>hf://</code></a>) to read and write data on the Hub;',nt,b,qt='First you need to <a href="/docs/huggingface_hub/quick-start#login">Login with your Hugging Face account</a>, for example using:',it,G,ot,$,Yt='Then you can <a href="/docs/huggingface_hub/quick-start#create-a-repository">Create a dataset repository</a>, for example using:',dt,Z,pt,U,Wt=`Finally, you can use <a href="/docs/huggingface_hub/guides/hf_file_system#integrations">Hugging Face paths</a> in Dask.
Dask DataFrame supports distributed writing to Parquet on Hugging Face, which uses commits to track dataset changes:`,rt,j,mt,_,zt="Since this creates one commit per file, it is recommended to squash the history after the upload:",ut,B,ht,X,Pt=`This creates a dataset repository <code>username/my_dataset</code> containing your Dask dataset in Parquet format.
You can reload it later:`,ft,R,ct,C,Qt='For more information on the Hugging Face paths and how they are implemented, please refer to the <a href="/docs/huggingface_hub/guides/hf_file_system">the client library’s documentation on the HfFileSystem</a>.',yt,F,Mt,I,Nt="To process a dataset in parallel using Dask, you can first define your data processing function for a pandas DataFrame or Series, and then use the Dask <code>map_partitions</code> function to apply this function to all the partitions of a dataset in parallel:",wt,v,kt,x,St="In pandas you can use this function on a text column:",gt,H,Jt,V,Dt="And in Dask you can run this function on every partition:",Tt,q,bt,Y,Et=`Note that you also need to provide <code>meta</code> which is the type of the pandas Series or DataFrame in the output of your function.
This is needed because Dask DataFrame uses a lazy API. Since Dask will only run the data processing once <code>.compute()</code> is called, it needs
the <code>meta</code> argument to know the type of the new column in the meantime.`,Gt,W,$t,z,Lt="When reading Parquet data from Hugging Face, Dask automatically leverages the metadata in Parquet files to skip entire files or row groups if they are not needed. For example if you apply a filter (predicate) on a Hugging Face Dataset in Parquet format or if you select a subset of the columns (projection), Dask will read the metadata of the Paquet files to discard the parts that are not needed without downloading them.",Zt,P,At='This is possible thanks to the <code>dask-expr</code> package which is generally installed by default with Dask. You can read more about <code>dask-expr</code> in its <a href="https://blog.dask.org/2023/08/25/dask-expr-introduction" rel="nofollow">introduction blog post</a> and in this more recent <a href="https://blog.dask.org/2024/05/30/dask-is-fast#optimizer" rel="nofollow">blog post on dask optimizations</a>',Ut,Q,Kt="For example this subset of FineWeb-Edu contains many Parquet files. If you can filter the dataset to keep the text from recent CC dumps, Dask will skip most of the files and only download the data that match the filter:",jt,N,_t,S,Ot=`Dask will also read only the required columns for your computation and skip the rest.
For example if you drop a column late in your code, it will not bother to load it early on in the pipeline if it’s not needed.
This is useful when you want to manipulate a subset of the columns or for analytics:`,Bt,D,Xt,E,Rt,A,Ct;return M=new Ft({props:{title:"Dask",local:"dask",headingTag:"h1"}}),J=new Ft({props:{title:"Read and Write",local:"read-and-write",headingTag:"h2"}}),G=new c({props:{code:"aHVnZ2luZ2ZhY2UtY2xpJTIwbG9naW4=",highlighted:'huggingface-<span class="hljs-keyword">cli</span> login',wrap:!1}}),Z=new c({props:{code:"ZnJvbSUyMGh1Z2dpbmdmYWNlX2h1YiUyMGltcG9ydCUyMEhmQXBpJTBBJTBBSGZBcGkoKS5jcmVhdGVfcmVwbyhyZXBvX2lkJTNEJTIydXNlcm5hbWUlMkZteV9kYXRhc2V0JTIyJTJDJTIwcmVwb190eXBlJTNEJTIyZGF0YXNldCUyMik=",highlighted:`<span class="hljs-keyword">from</span> huggingface_hub <span class="hljs-keyword">import</span> HfApi

HfApi().create_repo(repo_id=<span class="hljs-string">&quot;username/my_dataset&quot;</span>, repo_type=<span class="hljs-string">&quot;dataset&quot;</span>)`,wrap:!1}}),j=new c({props:{code:"aW1wb3J0JTIwZGFzay5kYXRhZnJhbWUlMjBhcyUyMGRkJTBBJTBBZGYudG9fcGFycXVldCglMjJoZiUzQSUyRiUyRmRhdGFzZXRzJTJGdXNlcm5hbWUlMkZteV9kYXRhc2V0JTIyKSUwQSUwQSUyMyUyMG9yJTIwd3JpdGUlMjBpbiUyMHNlcGFyYXRlJTIwZGlyZWN0b3JpZXMlMjBpZiUyMHRoZSUyMGRhdGFzZXQlMjBoYXMlMjB0cmFpbiUyRnZhbGlkYXRpb24lMkZ0ZXN0JTIwc3BsaXRzJTBBZGZfdHJhaW4udG9fcGFycXVldCglMjJoZiUzQSUyRiUyRmRhdGFzZXRzJTJGdXNlcm5hbWUlMkZteV9kYXRhc2V0JTJGdHJhaW4lMjIpJTBBZGZfdmFsaWQudG9fcGFycXVldCglMjJoZiUzQSUyRiUyRmRhdGFzZXRzJTJGdXNlcm5hbWUlMkZteV9kYXRhc2V0JTJGdmFsaWRhdGlvbiUyMiklMEFkZl90ZXN0JTIwLnRvX3BhcnF1ZXQoJTIyaGYlM0ElMkYlMkZkYXRhc2V0cyUyRnVzZXJuYW1lJTJGbXlfZGF0YXNldCUyRnRlc3QlMjIp",highlighted:`<span class="hljs-keyword">import</span> dask.dataframe <span class="hljs-keyword">as</span> dd

df.to_parquet(<span class="hljs-string">&quot;hf://datasets/username/my_dataset&quot;</span>)

<span class="hljs-comment"># or write in separate directories if the dataset has train/validation/test splits</span>
df_train.to_parquet(<span class="hljs-string">&quot;hf://datasets/username/my_dataset/train&quot;</span>)
df_valid.to_parquet(<span class="hljs-string">&quot;hf://datasets/username/my_dataset/validation&quot;</span>)
df_test .to_parquet(<span class="hljs-string">&quot;hf://datasets/username/my_dataset/test&quot;</span>)`,wrap:!1}}),B=new c({props:{code:"ZnJvbSUyMGh1Z2dpbmdmYWNlX2h1YiUyMGltcG9ydCUyMEhmQXBpJTBBJTBBSGZBcGkoKS5zdXBlcl9zcXVhc2hfaGlzdG9yeShyZXBvX2lkJTNEcmVwb19pZCUyQyUyMHJlcG9fdHlwZSUzRCUyMmRhdGFzZXQlMjIp",highlighted:`<span class="hljs-keyword">from</span> huggingface_hub <span class="hljs-keyword">import</span> HfApi

HfApi().super_squash_history(repo_id=repo_id, repo_type=<span class="hljs-string">&quot;dataset&quot;</span>)`,wrap:!1}}),R=new c({props:{code:"aW1wb3J0JTIwZGFzay5kYXRhZnJhbWUlMjBhcyUyMGRkJTBBJTBBZGYlMjAlM0QlMjBkZC5yZWFkX3BhcnF1ZXQoJTIyaGYlM0ElMkYlMkZkYXRhc2V0cyUyRnVzZXJuYW1lJTJGbXlfZGF0YXNldCUyMiklMEElMEElMjMlMjBvciUyMHJlYWQlMjBmcm9tJTIwc2VwYXJhdGUlMjBkaXJlY3RvcmllcyUyMGlmJTIwdGhlJTIwZGF0YXNldCUyMGhhcyUyMHRyYWluJTJGdmFsaWRhdGlvbiUyRnRlc3QlMjBzcGxpdHMlMEFkZl90cmFpbiUyMCUzRCUyMGRkLnJlYWRfcGFycXVldCglMjJoZiUzQSUyRiUyRmRhdGFzZXRzJTJGdXNlcm5hbWUlMkZteV9kYXRhc2V0JTJGdHJhaW4lMjIpJTBBZGZfdmFsaWQlMjAlM0QlMjBkZC5yZWFkX3BhcnF1ZXQoJTIyaGYlM0ElMkYlMkZkYXRhc2V0cyUyRnVzZXJuYW1lJTJGbXlfZGF0YXNldCUyRnZhbGlkYXRpb24lMjIpJTBBZGZfdGVzdCUyMCUyMCUzRCUyMGRkLnJlYWRfcGFycXVldCglMjJoZiUzQSUyRiUyRmRhdGFzZXRzJTJGdXNlcm5hbWUlMkZteV9kYXRhc2V0JTJGdGVzdCUyMik=",highlighted:`<span class="hljs-keyword">import</span> dask.dataframe <span class="hljs-keyword">as</span> dd

df = dd.read_parquet(<span class="hljs-string">&quot;hf://datasets/username/my_dataset&quot;</span>)

<span class="hljs-comment"># or read from separate directories if the dataset has train/validation/test splits</span>
df_train = dd.read_parquet(<span class="hljs-string">&quot;hf://datasets/username/my_dataset/train&quot;</span>)
df_valid = dd.read_parquet(<span class="hljs-string">&quot;hf://datasets/username/my_dataset/validation&quot;</span>)
df_test  = dd.read_parquet(<span class="hljs-string">&quot;hf://datasets/username/my_dataset/test&quot;</span>)`,wrap:!1}}),F=new Ft({props:{title:"Process data",local:"process-data",headingTag:"h2"}}),v=new c({props:{code:"ZGVmJTIwZHVtbXlfY291bnRfd29yZHModGV4dHMpJTNBJTBBJTIwJTIwJTIwJTIwcmV0dXJuJTIwcGQuU2VyaWVzKCU1Qmxlbih0ZXh0LnNwbGl0KCUyMiUyMCUyMikpJTIwZm9yJTIwdGV4dCUyMGluJTIwdGV4dHMlNUQp",highlighted:`<span class="hljs-keyword">def</span> <span class="hljs-title function_">dummy_count_words</span>(<span class="hljs-params">texts</span>):
    <span class="hljs-keyword">return</span> pd.Series([<span class="hljs-built_in">len</span>(text.split(<span class="hljs-string">&quot; &quot;</span>)) <span class="hljs-keyword">for</span> text <span class="hljs-keyword">in</span> texts])`,wrap:!1}}),H=new c({props:{code:"JTIzJTIwcGFuZGFzJTIwQVBJJTBBZGYlNUIlMjJudW1fd29yZHMlMjIlNUQlMjAlM0QlMjBkdW1teV9jb3VudF93b3JkcyhkZi50ZXh0KQ==",highlighted:`<span class="hljs-comment"># pandas API</span>
df[<span class="hljs-string">&quot;num_words&quot;</span>] = dummy_count_words(df.text)`,wrap:!1}}),q=new c({props:{code:"JTIzJTIwRGFzayUyMEFQSSUzQSUyMHJ1biUyMHRoZSUyMGZ1bmN0aW9uJTIwb24lMjBldmVyeSUyMHBhcnRpdGlvbiUwQWRmJTVCJTIybnVtX3dvcmRzJTIyJTVEJTIwJTNEJTIwZGYudGV4dC5tYXBfcGFydGl0aW9ucyhkdW1teV9jb3VudF93b3JkcyUyQyUyMG1ldGElM0RpbnQp",highlighted:`<span class="hljs-comment"># Dask API: run the function on every partition</span>
df[<span class="hljs-string">&quot;num_words&quot;</span>] = df.text.map_partitions(dummy_count_words, meta=<span class="hljs-built_in">int</span>)`,wrap:!1}}),W=new Ft({props:{title:"Predicate and Projection Pushdown",local:"predicate-and-projection-pushdown",headingTag:"h2"}}),N=new c({props:{code:"aW1wb3J0JTIwZGFzay5kYXRhZnJhbWUlMjBhcyUyMGRkJTBBJTBBZGYlMjAlM0QlMjBkZC5yZWFkX3BhcnF1ZXQoJTIyaGYlM0ElMkYlMkZkYXRhc2V0cyUyRkh1Z2dpbmdGYWNlRlclMkZmaW5ld2ViLWVkdSUyRnNhbXBsZSUyRjEwQlQlMkYqLnBhcnF1ZXQlMjIpJTBBJTBBJTIzJTIwRGFzayUyMHdpbGwlMjBza2lwJTIwdGhlJTIwZmlsZXMlMjBvciUyMHJvdyUyMGdyb3VwcyUyMHRoYXQlMjBkb24ndCUwQSUyMyUyMG1hdGNoJTIwdGhlJTIwcXVlcnklMjB3aXRob3V0JTIwZG93bmxvYWRpbmclMjB0aGVtLiUwQWRmJTIwJTNEJTIwZGYlNUJkZi5kdW1wJTIwJTNFJTNEJTIwJTIyQ0MtTUFJTi0yMDIzJTIyJTVE",highlighted:`<span class="hljs-keyword">import</span> dask.dataframe <span class="hljs-keyword">as</span> dd

df = dd.read_parquet(<span class="hljs-string">&quot;hf://datasets/HuggingFaceFW/fineweb-edu/sample/10BT/*.parquet&quot;</span>)

<span class="hljs-comment"># Dask will skip the files or row groups that don&#x27;t</span>
<span class="hljs-comment"># match the query without downloading them.</span>
df = df[df.dump &gt;= <span class="hljs-string">&quot;CC-MAIN-2023&quot;</span>]`,wrap:!1}}),D=new c({props:{code:"JTIzJTIwRGFzayUyMHdpbGwlMjBkb3dubG9hZCUyMHRoZSUyMCdkdW1wJyUyMGFuZCUyMCd0b2tlbl9jb3VudCclMjBuZWVkZWQlMEElMjMlMjBmb3IlMjB0aGUlMjBmaWx0ZXJpbmclMjBhbmQlMjBjb21wdXRhdGlvbiUyMGFuZCUyMHNraXAlMjB0aGUlMjBvdGhlciUyMGNvbHVtbnMuJTBBZGYudG9rZW5fY291bnQubWVhbigpLmNvbXB1dGUoKQ==",highlighted:`<span class="hljs-comment"># Dask will download the &#x27;dump&#x27; and &#x27;token_count&#x27; needed</span>
<span class="hljs-comment"># for the filtering and computation and skip the other columns.</span>
df.token_count.mean().compute()`,wrap:!1}}),E=new pe({props:{source:"https://github.com/huggingface/hub-docs/blob/main/docs/hub/datasets-dask.md"}}),{c(){y=i("meta"),K=l(),L=i("p"),O=l(),p(M.$$.fragment),tt=l(),w=i("p"),w.innerHTML=vt,et=l(),k=i("p"),k.textContent=xt,at=l(),g=i("p"),g.innerHTML=Ht,st=l(),p(J.$$.fragment),lt=l(),T=i("p"),T.innerHTML=Vt,nt=l(),b=i("p"),b.innerHTML=qt,it=l(),p(G.$$.fragment),ot=l(),$=i("p"),$.innerHTML=Yt,dt=l(),p(Z.$$.fragment),pt=l(),U=i("p"),U.innerHTML=Wt,rt=l(),p(j.$$.fragment),mt=l(),_=i("p"),_.textContent=zt,ut=l(),p(B.$$.fragment),ht=l(),X=i("p"),X.innerHTML=Pt,ft=l(),p(R.$$.fragment),ct=l(),C=i("p"),C.innerHTML=Qt,yt=l(),p(F.$$.fragment),Mt=l(),I=i("p"),I.innerHTML=Nt,wt=l(),p(v.$$.fragment),kt=l(),x=i("p"),x.textContent=St,gt=l(),p(H.$$.fragment),Jt=l(),V=i("p"),V.textContent=Dt,Tt=l(),p(q.$$.fragment),bt=l(),Y=i("p"),Y.innerHTML=Et,Gt=l(),p(W.$$.fragment),$t=l(),z=i("p"),z.textContent=Lt,Zt=l(),P=i("p"),P.innerHTML=At,Ut=l(),Q=i("p"),Q.textContent=Kt,jt=l(),p(N.$$.fragment),_t=l(),S=i("p"),S.textContent=Ot,Bt=l(),p(D.$$.fragment),Xt=l(),p(E.$$.fragment),Rt=l(),A=i("p"),this.h()},l(t){const e=oe("svelte-u9bgzb",document.head);y=o(e,"META",{name:!0,content:!0}),e.forEach(a),K=n(t),L=o(t,"P",{}),te(L).forEach(a),O=n(t),r(M.$$.fragment,t),tt=n(t),w=o(t,"P",{"data-svelte-h":!0}),d(w)!=="svelte-95bzmo"&&(w.innerHTML=vt),et=n(t),k=o(t,"P",{"data-svelte-h":!0}),d(k)!=="svelte-1msg9ee"&&(k.textContent=xt),at=n(t),g=o(t,"P",{"data-svelte-h":!0}),d(g)!=="svelte-lxhxnp"&&(g.innerHTML=Ht),st=n(t),r(J.$$.fragment,t),lt=n(t),T=o(t,"P",{"data-svelte-h":!0}),d(T)!=="svelte-187c3vf"&&(T.innerHTML=Vt),nt=n(t),b=o(t,"P",{"data-svelte-h":!0}),d(b)!=="svelte-1jdzygp"&&(b.innerHTML=qt),it=n(t),r(G.$$.fragment,t),ot=n(t),$=o(t,"P",{"data-svelte-h":!0}),d($)!=="svelte-wfb25d"&&($.innerHTML=Yt),dt=n(t),r(Z.$$.fragment,t),pt=n(t),U=o(t,"P",{"data-svelte-h":!0}),d(U)!=="svelte-1qjbxya"&&(U.innerHTML=Wt),rt=n(t),r(j.$$.fragment,t),mt=n(t),_=o(t,"P",{"data-svelte-h":!0}),d(_)!=="svelte-1g4eau0"&&(_.textContent=zt),ut=n(t),r(B.$$.fragment,t),ht=n(t),X=o(t,"P",{"data-svelte-h":!0}),d(X)!=="svelte-14rczfs"&&(X.innerHTML=Pt),ft=n(t),r(R.$$.fragment,t),ct=n(t),C=o(t,"P",{"data-svelte-h":!0}),d(C)!=="svelte-smuvdf"&&(C.innerHTML=Qt),yt=n(t),r(F.$$.fragment,t),Mt=n(t),I=o(t,"P",{"data-svelte-h":!0}),d(I)!=="svelte-jyx2ul"&&(I.innerHTML=Nt),wt=n(t),r(v.$$.fragment,t),kt=n(t),x=o(t,"P",{"data-svelte-h":!0}),d(x)!=="svelte-1qxyr3j"&&(x.textContent=St),gt=n(t),r(H.$$.fragment,t),Jt=n(t),V=o(t,"P",{"data-svelte-h":!0}),d(V)!=="svelte-1387ba7"&&(V.textContent=Dt),Tt=n(t),r(q.$$.fragment,t),bt=n(t),Y=o(t,"P",{"data-svelte-h":!0}),d(Y)!=="svelte-1php7k6"&&(Y.innerHTML=Et),Gt=n(t),r(W.$$.fragment,t),$t=n(t),z=o(t,"P",{"data-svelte-h":!0}),d(z)!=="svelte-zxghfy"&&(z.textContent=Lt),Zt=n(t),P=o(t,"P",{"data-svelte-h":!0}),d(P)!=="svelte-dqvw6a"&&(P.innerHTML=At),Ut=n(t),Q=o(t,"P",{"data-svelte-h":!0}),d(Q)!=="svelte-nn7d20"&&(Q.textContent=Kt),jt=n(t),r(N.$$.fragment,t),_t=n(t),S=o(t,"P",{"data-svelte-h":!0}),d(S)!=="svelte-13j15e1"&&(S.textContent=Ot),Bt=n(t),r(D.$$.fragment,t),Xt=n(t),r(E.$$.fragment,t),Rt=n(t),A=o(t,"P",{}),te(A).forEach(a),this.h()},h(){ee(y,"name","hf:doc:metadata"),ee(y,"content",me)},m(t,e){de(document.head,y),s(t,K,e),s(t,L,e),s(t,O,e),m(M,t,e),s(t,tt,e),s(t,w,e),s(t,et,e),s(t,k,e),s(t,at,e),s(t,g,e),s(t,st,e),m(J,t,e),s(t,lt,e),s(t,T,e),s(t,nt,e),s(t,b,e),s(t,it,e),m(G,t,e),s(t,ot,e),s(t,$,e),s(t,dt,e),m(Z,t,e),s(t,pt,e),s(t,U,e),s(t,rt,e),m(j,t,e),s(t,mt,e),s(t,_,e),s(t,ut,e),m(B,t,e),s(t,ht,e),s(t,X,e),s(t,ft,e),m(R,t,e),s(t,ct,e),s(t,C,e),s(t,yt,e),m(F,t,e),s(t,Mt,e),s(t,I,e),s(t,wt,e),m(v,t,e),s(t,kt,e),s(t,x,e),s(t,gt,e),m(H,t,e),s(t,Jt,e),s(t,V,e),s(t,Tt,e),m(q,t,e),s(t,bt,e),s(t,Y,e),s(t,Gt,e),m(W,t,e),s(t,$t,e),s(t,z,e),s(t,Zt,e),s(t,P,e),s(t,Ut,e),s(t,Q,e),s(t,jt,e),m(N,t,e),s(t,_t,e),s(t,S,e),s(t,Bt,e),m(D,t,e),s(t,Xt,e),m(E,t,e),s(t,Rt,e),s(t,A,e),Ct=!0},p:se,i(t){Ct||(u(M.$$.fragment,t),u(J.$$.fragment,t),u(G.$$.fragment,t),u(Z.$$.fragment,t),u(j.$$.fragment,t),u(B.$$.fragment,t),u(R.$$.fragment,t),u(F.$$.fragment,t),u(v.$$.fragment,t),u(H.$$.fragment,t),u(q.$$.fragment,t),u(W.$$.fragment,t),u(N.$$.fragment,t),u(D.$$.fragment,t),u(E.$$.fragment,t),Ct=!0)},o(t){h(M.$$.fragment,t),h(J.$$.fragment,t),h(G.$$.fragment,t),h(Z.$$.fragment,t),h(j.$$.fragment,t),h(B.$$.fragment,t),h(R.$$.fragment,t),h(F.$$.fragment,t),h(v.$$.fragment,t),h(H.$$.fragment,t),h(q.$$.fragment,t),h(W.$$.fragment,t),h(N.$$.fragment,t),h(D.$$.fragment,t),h(E.$$.fragment,t),Ct=!1},d(t){t&&(a(K),a(L),a(O),a(tt),a(w),a(et),a(k),a(at),a(g),a(st),a(lt),a(T),a(nt),a(b),a(it),a(ot),a($),a(dt),a(pt),a(U),a(rt),a(mt),a(_),a(ut),a(ht),a(X),a(ft),a(ct),a(C),a(yt),a(Mt),a(I),a(wt),a(kt),a(x),a(gt),a(Jt),a(V),a(Tt),a(bt),a(Y),a(Gt),a($t),a(z),a(Zt),a(P),a(Ut),a(Q),a(jt),a(_t),a(S),a(Bt),a(Xt),a(Rt),a(A)),a(y),f(M,t),f(J,t),f(G,t),f(Z,t),f(j,t),f(B,t),f(R,t),f(F,t),f(v,t),f(H,t),f(q,t),f(W,t),f(N,t),f(D,t),f(E,t)}}}const me='{"title":"Dask","local":"dask","sections":[{"title":"Read and Write","local":"read-and-write","sections":[],"depth":2},{"title":"Process data","local":"process-data","sections":[],"depth":2},{"title":"Predicate and Projection Pushdown","local":"predicate-and-projection-pushdown","sections":[],"depth":2}],"depth":1}';function ue(It){return le(()=>{new URLSearchParams(window.location.search).get("fw")}),[]}class Me extends ne{constructor(y){super(),ie(this,y,ue,re,ae,{})}}export{Me as component};
