Cheerio Scraper
No credit card required
Actor Metrics
1 monthly user
-
2 bookmarks
>99% runs succeeded
Created in Apr 2019
Modified 6 years ago
You can access the Cheerio Scraper programmatically from your own applications by using the Apify API. You can choose the language preference from below. To use the Apify API, you’ll need an Apify account and your API token, found in Integrations settings in Apify Console.
1{
2 "openapi": "3.0.1",
3 "info": {
4 "version": "0.0",
5 "x-build-id": "jwg4pAhw2i5uMPmKy"
6 },
7 "servers": [
8 {
9 "url": "https://api.apify.com/v2"
10 }
11 ],
12 "paths": {
13 "/acts/apify~cheerio-scraper/run-sync-get-dataset-items": {
14 "post": {
15 "operationId": "run-sync-get-dataset-items-apify-cheerio-scraper",
16 "x-openai-isConsequential": false,
17 "summary": "Executes an Actor, waits for its completion, and returns Actor's dataset items in response.",
18 "tags": [
19 "Run Actor"
20 ],
21 "requestBody": {
22 "required": true,
23 "content": {
24 "application/json": {
25 "schema": {
26 "$ref": "#/components/schemas/inputSchema"
27 }
28 }
29 }
30 },
31 "parameters": [
32 {
33 "name": "token",
34 "in": "query",
35 "required": true,
36 "schema": {
37 "type": "string"
38 },
39 "description": "Enter your Apify token here"
40 }
41 ],
42 "responses": {
43 "200": {
44 "description": "OK"
45 }
46 }
47 }
48 },
49 "/acts/apify~cheerio-scraper/runs": {
50 "post": {
51 "operationId": "runs-sync-apify-cheerio-scraper",
52 "x-openai-isConsequential": false,
53 "summary": "Executes an Actor and returns information about the initiated run in response.",
54 "tags": [
55 "Run Actor"
56 ],
57 "requestBody": {
58 "required": true,
59 "content": {
60 "application/json": {
61 "schema": {
62 "$ref": "#/components/schemas/inputSchema"
63 }
64 }
65 }
66 },
67 "parameters": [
68 {
69 "name": "token",
70 "in": "query",
71 "required": true,
72 "schema": {
73 "type": "string"
74 },
75 "description": "Enter your Apify token here"
76 }
77 ],
78 "responses": {
79 "200": {
80 "description": "OK",
81 "content": {
82 "application/json": {
83 "schema": {
84 "$ref": "#/components/schemas/runsResponseSchema"
85 }
86 }
87 }
88 }
89 }
90 }
91 },
92 "/acts/apify~cheerio-scraper/run-sync": {
93 "post": {
94 "operationId": "run-sync-apify-cheerio-scraper",
95 "x-openai-isConsequential": false,
96 "summary": "Executes an Actor, waits for completion, and returns the OUTPUT from Key-value store in response.",
97 "tags": [
98 "Run Actor"
99 ],
100 "requestBody": {
101 "required": true,
102 "content": {
103 "application/json": {
104 "schema": {
105 "$ref": "#/components/schemas/inputSchema"
106 }
107 }
108 }
109 },
110 "parameters": [
111 {
112 "name": "token",
113 "in": "query",
114 "required": true,
115 "schema": {
116 "type": "string"
117 },
118 "description": "Enter your Apify token here"
119 }
120 ],
121 "responses": {
122 "200": {
123 "description": "OK"
124 }
125 }
126 }
127 }
128 },
129 "components": {
130 "schemas": {
131 "inputSchema": {
132 "type": "object",
133 "required": [
134 "startUrls",
135 "pageFunction"
136 ],
137 "properties": {
138 "startUrls": {
139 "title": "Start URLs",
140 "type": "array",
141 "description": "URLs to start with",
142 "items": {
143 "type": "object",
144 "required": [
145 "url"
146 ],
147 "properties": {
148 "url": {
149 "type": "string",
150 "title": "URL of a web page",
151 "format": "uri"
152 }
153 }
154 }
155 },
156 "useRequestQueue": {
157 "title": "Use request queue",
158 "type": "boolean",
159 "description": "Request queue enables recursive crawling and the use of Pseudo-URLs and Link selector.",
160 "default": true
161 },
162 "pseudoUrls": {
163 "title": "Pseudo-URLs",
164 "type": "array",
165 "description": "Pseudo-URLs to match links in the page that you want to enqueue. Combine with Link selector to tell the crawler where to find links.",
166 "default": [],
167 "items": {
168 "type": "object",
169 "required": [
170 "purl"
171 ],
172 "properties": {
173 "purl": {
174 "type": "string",
175 "title": "Pseudo-URL of a web page"
176 }
177 }
178 }
179 },
180 "linkSelector": {
181 "title": "Link selector",
182 "type": "string",
183 "description": "CSS selector matching elements with 'href' attributes that should be enqueued. To enqueue urls from '<div class=\"my-class\" href=...>' tags, you would enter 'div.my-class'."
184 },
185 "pageFunction": {
186 "title": "Page function",
187 "type": "string",
188 "description": "Function executed for each request"
189 },
190 "proxyConfiguration": {
191 "title": "Proxy configuration",
192 "type": "object",
193 "description": "Choose to use no proxy, Apify Proxy, or provide custom proxy URLs.",
194 "default": {}
195 },
196 "debugLog": {
197 "title": "Debug log",
198 "type": "boolean",
199 "description": "Debug messages will be included in the log. Use <code>context.log.debug('message')</code> to log your own debug messages.",
200 "default": false
201 },
202 "ignoreSslErrors": {
203 "title": "Ignore SSL errors",
204 "type": "boolean",
205 "description": "Crawler will ignore SSL certificate errors.",
206 "default": false
207 },
208 "useCookieJar": {
209 "title": "(UNSTABLE) Save cookies",
210 "type": "boolean",
211 "description": "The scraper will use a cookie jar to persist cookies between requests. This is a temporary solution and the feature is UNSTABLE, meaning that it will most likely be removed in the future and replaced with a different API. Use at your own risk.",
212 "default": false
213 },
214 "maxRequestRetries": {
215 "title": "Max request retries",
216 "minimum": 0,
217 "type": "integer",
218 "description": "Maximum number of times the request for the page will be retried in case of an error. Setting it to 0 means that the request will be attempted once and will not be retried if it fails.",
219 "default": 3
220 },
221 "maxPagesPerCrawl": {
222 "title": "Max pages per crawl",
223 "minimum": 0,
224 "type": "integer",
225 "description": "Maximum number of pages that the crawler will open. 0 means unlimited.",
226 "default": 0
227 },
228 "maxResultsPerCrawl": {
229 "title": "Max result records",
230 "minimum": 0,
231 "type": "integer",
232 "description": "Maximum number of results that will be saved to dataset. The crawler will terminate afterwards. 0 means unlimited.",
233 "default": 0
234 },
235 "maxCrawlingDepth": {
236 "title": "Max crawling depth",
237 "minimum": 0,
238 "type": "integer",
239 "description": "Defines how many links away from the StartURLs will the crawler descend. 0 means unlimited.",
240 "default": 0
241 },
242 "maxConcurrency": {
243 "title": "Max concurrency",
244 "minimum": 1,
245 "type": "integer",
246 "description": "Defines how many pages can be processed by the scraper in parallel. The scraper automatically increases and decreases concurrency based on available system resources. Use this option to set a hard limit.",
247 "default": 50
248 },
249 "pageLoadTimeoutSecs": {
250 "title": "Page load timeout",
251 "minimum": 1,
252 "maximum": 360,
253 "type": "integer",
254 "description": "Maximum time the crawler will allow a web page to load in seconds.",
255 "default": 60
256 },
257 "pageFunctionTimeoutSecs": {
258 "title": "Page function timeout",
259 "minimum": 1,
260 "maximum": 360,
261 "type": "integer",
262 "description": "Maximum time the crawler will wait for the page function to execute in seconds.",
263 "default": 60
264 },
265 "customData": {
266 "title": "Custom data",
267 "type": "object",
268 "description": "This object will be available on pageFunction's context as customData.",
269 "default": {}
270 },
271 "initialCookies": {
272 "title": "Initial cookies",
273 "type": "array",
274 "description": "The provided cookies will be pre-set to all pages the scraper opens.",
275 "default": []
276 }
277 }
278 },
279 "runsResponseSchema": {
280 "type": "object",
281 "properties": {
282 "data": {
283 "type": "object",
284 "properties": {
285 "id": {
286 "type": "string"
287 },
288 "actId": {
289 "type": "string"
290 },
291 "userId": {
292 "type": "string"
293 },
294 "startedAt": {
295 "type": "string",
296 "format": "date-time",
297 "example": "2025-01-08T00:00:00.000Z"
298 },
299 "finishedAt": {
300 "type": "string",
301 "format": "date-time",
302 "example": "2025-01-08T00:00:00.000Z"
303 },
304 "status": {
305 "type": "string",
306 "example": "READY"
307 },
308 "meta": {
309 "type": "object",
310 "properties": {
311 "origin": {
312 "type": "string",
313 "example": "API"
314 },
315 "userAgent": {
316 "type": "string"
317 }
318 }
319 },
320 "stats": {
321 "type": "object",
322 "properties": {
323 "inputBodyLen": {
324 "type": "integer",
325 "example": 2000
326 },
327 "rebootCount": {
328 "type": "integer",
329 "example": 0
330 },
331 "restartCount": {
332 "type": "integer",
333 "example": 0
334 },
335 "resurrectCount": {
336 "type": "integer",
337 "example": 0
338 },
339 "computeUnits": {
340 "type": "integer",
341 "example": 0
342 }
343 }
344 },
345 "options": {
346 "type": "object",
347 "properties": {
348 "build": {
349 "type": "string",
350 "example": "latest"
351 },
352 "timeoutSecs": {
353 "type": "integer",
354 "example": 300
355 },
356 "memoryMbytes": {
357 "type": "integer",
358 "example": 1024
359 },
360 "diskMbytes": {
361 "type": "integer",
362 "example": 2048
363 }
364 }
365 },
366 "buildId": {
367 "type": "string"
368 },
369 "defaultKeyValueStoreId": {
370 "type": "string"
371 },
372 "defaultDatasetId": {
373 "type": "string"
374 },
375 "defaultRequestQueueId": {
376 "type": "string"
377 },
378 "buildNumber": {
379 "type": "string",
380 "example": "1.0.0"
381 },
382 "containerUrl": {
383 "type": "string"
384 },
385 "usage": {
386 "type": "object",
387 "properties": {
388 "ACTOR_COMPUTE_UNITS": {
389 "type": "integer",
390 "example": 0
391 },
392 "DATASET_READS": {
393 "type": "integer",
394 "example": 0
395 },
396 "DATASET_WRITES": {
397 "type": "integer",
398 "example": 0
399 },
400 "KEY_VALUE_STORE_READS": {
401 "type": "integer",
402 "example": 0
403 },
404 "KEY_VALUE_STORE_WRITES": {
405 "type": "integer",
406 "example": 1
407 },
408 "KEY_VALUE_STORE_LISTS": {
409 "type": "integer",
410 "example": 0
411 },
412 "REQUEST_QUEUE_READS": {
413 "type": "integer",
414 "example": 0
415 },
416 "REQUEST_QUEUE_WRITES": {
417 "type": "integer",
418 "example": 0
419 },
420 "DATA_TRANSFER_INTERNAL_GBYTES": {
421 "type": "integer",
422 "example": 0
423 },
424 "DATA_TRANSFER_EXTERNAL_GBYTES": {
425 "type": "integer",
426 "example": 0
427 },
428 "PROXY_RESIDENTIAL_TRANSFER_GBYTES": {
429 "type": "integer",
430 "example": 0
431 },
432 "PROXY_SERPS": {
433 "type": "integer",
434 "example": 0
435 }
436 }
437 },
438 "usageTotalUsd": {
439 "type": "number",
440 "example": 0.00005
441 },
442 "usageUsd": {
443 "type": "object",
444 "properties": {
445 "ACTOR_COMPUTE_UNITS": {
446 "type": "integer",
447 "example": 0
448 },
449 "DATASET_READS": {
450 "type": "integer",
451 "example": 0
452 },
453 "DATASET_WRITES": {
454 "type": "integer",
455 "example": 0
456 },
457 "KEY_VALUE_STORE_READS": {
458 "type": "integer",
459 "example": 0
460 },
461 "KEY_VALUE_STORE_WRITES": {
462 "type": "number",
463 "example": 0.00005
464 },
465 "KEY_VALUE_STORE_LISTS": {
466 "type": "integer",
467 "example": 0
468 },
469 "REQUEST_QUEUE_READS": {
470 "type": "integer",
471 "example": 0
472 },
473 "REQUEST_QUEUE_WRITES": {
474 "type": "integer",
475 "example": 0
476 },
477 "DATA_TRANSFER_INTERNAL_GBYTES": {
478 "type": "integer",
479 "example": 0
480 },
481 "DATA_TRANSFER_EXTERNAL_GBYTES": {
482 "type": "integer",
483 "example": 0
484 },
485 "PROXY_RESIDENTIAL_TRANSFER_GBYTES": {
486 "type": "integer",
487 "example": 0
488 },
489 "PROXY_SERPS": {
490 "type": "integer",
491 "example": 0
492 }
493 }
494 }
495 }
496 }
497 }
498 }
499 }
500 }
501}
Cheerio Scraper OpenAPI definition
OpenAPI is a standard for designing and describing RESTful APIs, allowing developers to define API structure, endpoints, and data formats in a machine-readable way. It simplifies API development, integration, and documentation.
OpenAPI is effective when used with AI agents and GPTs by standardizing how these systems interact with various APIs, for reliable integrations and efficient communication.
By defining machine-readable API specifications, OpenAPI allows AI models like GPTs to understand and use varied data sources, improving accuracy. This accelerates development, reduces errors, and provides context-aware responses, making OpenAPI a core component for AI applications.
You can download the OpenAPI definitions for Cheerio Scraper from the options below:
If you’d like to learn more about how OpenAPI powers GPTs, read our blog post.
You can also check out our other API clients: