blob: 7685680dfa1c4293c1245a2fbd49b89ca437bb5c [file] [log] [blame]
Mike Frysinger1fd98e02005-05-09 22:10:42 +00001/*
2 * block.c --- iterate over all blocks in an inode
3 *
4 * Copyright (C) 1993, 1994, 1995, 1996 Theodore Ts'o.
5 *
6 * %Begin-Header%
7 * This file may be redistributed under the terms of the GNU Public
8 * License.
9 * %End-Header%
10 */
11
12#include <stdio.h>
13#include <string.h>
14#if HAVE_UNISTD_H
15#include <unistd.h>
16#endif
17
18#include "ext2_fs.h"
19#include "ext2fs.h"
20
21struct block_context {
22 ext2_filsys fs;
23 int (*func)(ext2_filsys fs,
24 blk_t *blocknr,
25 e2_blkcnt_t bcount,
26 blk_t ref_blk,
27 int ref_offset,
28 void *priv_data);
29 e2_blkcnt_t bcount;
30 int bsize;
31 int flags;
32 errcode_t errcode;
33 char *ind_buf;
34 char *dind_buf;
35 char *tind_buf;
36 void *priv_data;
37};
38
39static int block_iterate_ind(blk_t *ind_block, blk_t ref_block,
40 int ref_offset, struct block_context *ctx)
41{
42 int ret = 0, changed = 0;
43 int i, flags, limit, offset;
44 blk_t *block_nr;
45
46 limit = ctx->fs->blocksize >> 2;
47 if (!(ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
48 !(ctx->flags & BLOCK_FLAG_DATA_ONLY))
49 ret = (*ctx->func)(ctx->fs, ind_block,
50 BLOCK_COUNT_IND, ref_block,
51 ref_offset, ctx->priv_data);
52 if (!*ind_block || (ret & BLOCK_ABORT)) {
53 ctx->bcount += limit;
54 return ret;
55 }
56 if (*ind_block >= ctx->fs->super->s_blocks_count ||
57 *ind_block < ctx->fs->super->s_first_data_block) {
58 ctx->errcode = EXT2_ET_BAD_IND_BLOCK;
59 ret |= BLOCK_ERROR;
60 return ret;
61 }
62 ctx->errcode = ext2fs_read_ind_block(ctx->fs, *ind_block,
63 ctx->ind_buf);
64 if (ctx->errcode) {
65 ret |= BLOCK_ERROR;
66 return ret;
67 }
68
69 block_nr = (blk_t *) ctx->ind_buf;
70 offset = 0;
71 if (ctx->flags & BLOCK_FLAG_APPEND) {
72 for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) {
73 flags = (*ctx->func)(ctx->fs, block_nr, ctx->bcount,
74 *ind_block, offset,
75 ctx->priv_data);
76 changed |= flags;
77 if (flags & BLOCK_ABORT) {
78 ret |= BLOCK_ABORT;
79 break;
80 }
81 offset += sizeof(blk_t);
82 }
83 } else {
84 for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) {
85 if (*block_nr == 0)
86 continue;
87 flags = (*ctx->func)(ctx->fs, block_nr, ctx->bcount,
88 *ind_block, offset,
89 ctx->priv_data);
90 changed |= flags;
91 if (flags & BLOCK_ABORT) {
92 ret |= BLOCK_ABORT;
93 break;
94 }
95 offset += sizeof(blk_t);
96 }
97 }
98 if (changed & BLOCK_CHANGED) {
99 ctx->errcode = ext2fs_write_ind_block(ctx->fs, *ind_block,
100 ctx->ind_buf);
101 if (ctx->errcode)
102 ret |= BLOCK_ERROR | BLOCK_ABORT;
103 }
104 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
105 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
106 !(ret & BLOCK_ABORT))
107 ret |= (*ctx->func)(ctx->fs, ind_block,
108 BLOCK_COUNT_IND, ref_block,
109 ref_offset, ctx->priv_data);
110 return ret;
111}
112
113static int block_iterate_dind(blk_t *dind_block, blk_t ref_block,
114 int ref_offset, struct block_context *ctx)
115{
116 int ret = 0, changed = 0;
117 int i, flags, limit, offset;
118 blk_t *block_nr;
119
120 limit = ctx->fs->blocksize >> 2;
121 if (!(ctx->flags & (BLOCK_FLAG_DEPTH_TRAVERSE |
122 BLOCK_FLAG_DATA_ONLY)))
123 ret = (*ctx->func)(ctx->fs, dind_block,
124 BLOCK_COUNT_DIND, ref_block,
125 ref_offset, ctx->priv_data);
126 if (!*dind_block || (ret & BLOCK_ABORT)) {
127 ctx->bcount += limit*limit;
128 return ret;
129 }
130 if (*dind_block >= ctx->fs->super->s_blocks_count ||
131 *dind_block < ctx->fs->super->s_first_data_block) {
132 ctx->errcode = EXT2_ET_BAD_DIND_BLOCK;
133 ret |= BLOCK_ERROR;
134 return ret;
135 }
136 ctx->errcode = ext2fs_read_ind_block(ctx->fs, *dind_block,
137 ctx->dind_buf);
138 if (ctx->errcode) {
139 ret |= BLOCK_ERROR;
140 return ret;
141 }
142
143 block_nr = (blk_t *) ctx->dind_buf;
144 offset = 0;
145 if (ctx->flags & BLOCK_FLAG_APPEND) {
146 for (i = 0; i < limit; i++, block_nr++) {
147 flags = block_iterate_ind(block_nr,
148 *dind_block, offset,
149 ctx);
150 changed |= flags;
151 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
152 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
153 break;
154 }
155 offset += sizeof(blk_t);
156 }
157 } else {
158 for (i = 0; i < limit; i++, block_nr++) {
159 if (*block_nr == 0) {
160 ctx->bcount += limit;
161 continue;
162 }
163 flags = block_iterate_ind(block_nr,
164 *dind_block, offset,
165 ctx);
166 changed |= flags;
167 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
168 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
169 break;
170 }
171 offset += sizeof(blk_t);
172 }
173 }
174 if (changed & BLOCK_CHANGED) {
175 ctx->errcode = ext2fs_write_ind_block(ctx->fs, *dind_block,
176 ctx->dind_buf);
177 if (ctx->errcode)
178 ret |= BLOCK_ERROR | BLOCK_ABORT;
179 }
180 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
181 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
182 !(ret & BLOCK_ABORT))
183 ret |= (*ctx->func)(ctx->fs, dind_block,
184 BLOCK_COUNT_DIND, ref_block,
185 ref_offset, ctx->priv_data);
186 return ret;
187}
188
189static int block_iterate_tind(blk_t *tind_block, blk_t ref_block,
190 int ref_offset, struct block_context *ctx)
191{
192 int ret = 0, changed = 0;
193 int i, flags, limit, offset;
194 blk_t *block_nr;
195
196 limit = ctx->fs->blocksize >> 2;
197 if (!(ctx->flags & (BLOCK_FLAG_DEPTH_TRAVERSE |
198 BLOCK_FLAG_DATA_ONLY)))
199 ret = (*ctx->func)(ctx->fs, tind_block,
200 BLOCK_COUNT_TIND, ref_block,
201 ref_offset, ctx->priv_data);
202 if (!*tind_block || (ret & BLOCK_ABORT)) {
203 ctx->bcount += limit*limit*limit;
204 return ret;
205 }
206 if (*tind_block >= ctx->fs->super->s_blocks_count ||
207 *tind_block < ctx->fs->super->s_first_data_block) {
208 ctx->errcode = EXT2_ET_BAD_TIND_BLOCK;
209 ret |= BLOCK_ERROR;
210 return ret;
211 }
212 ctx->errcode = ext2fs_read_ind_block(ctx->fs, *tind_block,
213 ctx->tind_buf);
214 if (ctx->errcode) {
215 ret |= BLOCK_ERROR;
216 return ret;
217 }
218
219 block_nr = (blk_t *) ctx->tind_buf;
220 offset = 0;
221 if (ctx->flags & BLOCK_FLAG_APPEND) {
222 for (i = 0; i < limit; i++, block_nr++) {
223 flags = block_iterate_dind(block_nr,
224 *tind_block,
225 offset, ctx);
226 changed |= flags;
227 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
228 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
229 break;
230 }
231 offset += sizeof(blk_t);
232 }
233 } else {
234 for (i = 0; i < limit; i++, block_nr++) {
235 if (*block_nr == 0) {
236 ctx->bcount += limit*limit;
237 continue;
238 }
239 flags = block_iterate_dind(block_nr,
240 *tind_block,
241 offset, ctx);
242 changed |= flags;
243 if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
244 ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
245 break;
246 }
247 offset += sizeof(blk_t);
248 }
249 }
250 if (changed & BLOCK_CHANGED) {
251 ctx->errcode = ext2fs_write_ind_block(ctx->fs, *tind_block,
252 ctx->tind_buf);
253 if (ctx->errcode)
254 ret |= BLOCK_ERROR | BLOCK_ABORT;
255 }
256 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
257 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
258 !(ret & BLOCK_ABORT))
259 ret |= (*ctx->func)(ctx->fs, tind_block,
260 BLOCK_COUNT_TIND, ref_block,
261 ref_offset, ctx->priv_data);
262
263 return ret;
264}
265
266errcode_t ext2fs_block_iterate2(ext2_filsys fs,
267 ext2_ino_t ino,
268 int flags,
269 char *block_buf,
270 int (*func)(ext2_filsys fs,
271 blk_t *blocknr,
272 e2_blkcnt_t blockcnt,
273 blk_t ref_blk,
274 int ref_offset,
275 void *priv_data),
276 void *priv_data)
277{
278 int i;
279 int got_inode = 0;
280 int ret = 0;
281 blk_t blocks[EXT2_N_BLOCKS]; /* directory data blocks */
282 struct ext2_inode inode;
283 errcode_t retval;
284 struct block_context ctx;
285 int limit;
286
287 EXT2_CHECK_MAGIC(fs, EXT2_ET_MAGIC_EXT2FS_FILSYS);
288
289 /*
290 * Check to see if we need to limit large files
291 */
292 if (flags & BLOCK_FLAG_NO_LARGE) {
293 ctx.errcode = ext2fs_read_inode(fs, ino, &inode);
294 if (ctx.errcode)
295 return ctx.errcode;
296 got_inode = 1;
297 if (!LINUX_S_ISDIR(inode.i_mode) &&
298 (inode.i_size_high != 0))
299 return EXT2_ET_FILE_TOO_BIG;
300 }
301
302 retval = ext2fs_get_blocks(fs, ino, blocks);
303 if (retval)
304 return retval;
305
306 limit = fs->blocksize >> 2;
307
308 ctx.fs = fs;
309 ctx.func = func;
310 ctx.priv_data = priv_data;
311 ctx.flags = flags;
312 ctx.bcount = 0;
313 if (block_buf) {
314 ctx.ind_buf = block_buf;
315 } else {
316 retval = ext2fs_get_mem(fs->blocksize * 3, &ctx.ind_buf);
317 if (retval)
318 return retval;
319 }
320 ctx.dind_buf = ctx.ind_buf + fs->blocksize;
321 ctx.tind_buf = ctx.dind_buf + fs->blocksize;
322
323 /*
324 * Iterate over the HURD translator block (if present)
325 */
326 if ((fs->super->s_creator_os == EXT2_OS_HURD) &&
327 !(flags & BLOCK_FLAG_DATA_ONLY)) {
328 ctx.errcode = ext2fs_read_inode(fs, ino, &inode);
329 if (ctx.errcode)
330 goto abort_exit;
331 got_inode = 1;
332 if (inode.osd1.hurd1.h_i_translator) {
333 ret |= (*ctx.func)(fs,
334 &inode.osd1.hurd1.h_i_translator,
335 BLOCK_COUNT_TRANSLATOR,
336 0, 0, priv_data);
337 if (ret & BLOCK_ABORT)
338 goto abort_exit;
339 }
340 }
341
342 /*
343 * Iterate over normal data blocks
344 */
345 for (i = 0; i < EXT2_NDIR_BLOCKS ; i++, ctx.bcount++) {
346 if (blocks[i] || (flags & BLOCK_FLAG_APPEND)) {
347 ret |= (*ctx.func)(fs, &blocks[i],
348 ctx.bcount, 0, i, priv_data);
349 if (ret & BLOCK_ABORT)
350 goto abort_exit;
351 }
352 }
353 if (*(blocks + EXT2_IND_BLOCK) || (flags & BLOCK_FLAG_APPEND)) {
354 ret |= block_iterate_ind(blocks + EXT2_IND_BLOCK,
355 0, EXT2_IND_BLOCK, &ctx);
356 if (ret & BLOCK_ABORT)
357 goto abort_exit;
358 } else
359 ctx.bcount += limit;
360 if (*(blocks + EXT2_DIND_BLOCK) || (flags & BLOCK_FLAG_APPEND)) {
361 ret |= block_iterate_dind(blocks + EXT2_DIND_BLOCK,
362 0, EXT2_DIND_BLOCK, &ctx);
363 if (ret & BLOCK_ABORT)
364 goto abort_exit;
365 } else
366 ctx.bcount += limit * limit;
367 if (*(blocks + EXT2_TIND_BLOCK) || (flags & BLOCK_FLAG_APPEND)) {
368 ret |= block_iterate_tind(blocks + EXT2_TIND_BLOCK,
369 0, EXT2_TIND_BLOCK, &ctx);
370 if (ret & BLOCK_ABORT)
371 goto abort_exit;
372 }
373
374abort_exit:
375 if (ret & BLOCK_CHANGED) {
376 if (!got_inode) {
377 retval = ext2fs_read_inode(fs, ino, &inode);
378 if (retval)
379 return retval;
380 }
381 for (i=0; i < EXT2_N_BLOCKS; i++)
382 inode.i_block[i] = blocks[i];
383 retval = ext2fs_write_inode(fs, ino, &inode);
384 if (retval)
385 return retval;
386 }
387
388 if (!block_buf)
389 ext2fs_free_mem(&ctx.ind_buf);
390
391 return (ret & BLOCK_ERROR) ? ctx.errcode : 0;
392}
393
394/*
395 * Emulate the old ext2fs_block_iterate function!
396 */
397
398struct xlate {
399 int (*func)(ext2_filsys fs,
400 blk_t *blocknr,
401 int bcount,
402 void *priv_data);
403 void *real_private;
404};
405
406#ifdef __TURBOC__
407 #pragma argsused
408#endif
409static int xlate_func(ext2_filsys fs, blk_t *blocknr, e2_blkcnt_t blockcnt,
410 blk_t ref_block EXT2FS_ATTR((unused)),
411 int ref_offset EXT2FS_ATTR((unused)),
412 void *priv_data)
413{
414 struct xlate *xl = (struct xlate *) priv_data;
415
416 return (*xl->func)(fs, blocknr, (int) blockcnt, xl->real_private);
417}
418
419errcode_t ext2fs_block_iterate(ext2_filsys fs,
420 ext2_ino_t ino,
421 int flags,
422 char *block_buf,
423 int (*func)(ext2_filsys fs,
424 blk_t *blocknr,
425 int blockcnt,
426 void *priv_data),
427 void *priv_data)
428{
429 struct xlate xl;
430
431 xl.real_private = priv_data;
432 xl.func = func;
433
434 return ext2fs_block_iterate2(fs, ino, BLOCK_FLAG_NO_LARGE | flags,
435 block_buf, xlate_func, &xl);
436}
437