Commit | Line | Data |
---|---|---|
68e342b3 DC |
1 | /* |
2 | * ispstat.c | |
3 | * | |
4 | * TI OMAP3 ISP - Statistics core | |
5 | * | |
6 | * Copyright (C) 2010 Nokia Corporation | |
7 | * Copyright (C) 2009 Texas Instruments, Inc | |
8 | * | |
9 | * Contacts: David Cohen <dacohen@gmail.com> | |
10 | * Laurent Pinchart <laurent.pinchart@ideasonboard.com> | |
11 | * Sakari Ailus <sakari.ailus@iki.fi> | |
12 | * | |
13 | * This program is free software; you can redistribute it and/or modify | |
14 | * it under the terms of the GNU General Public License version 2 as | |
15 | * published by the Free Software Foundation. | |
16 | * | |
17 | * This program is distributed in the hope that it will be useful, but | |
18 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
20 | * General Public License for more details. | |
21 | * | |
22 | * You should have received a copy of the GNU General Public License | |
23 | * along with this program; if not, write to the Free Software | |
24 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | |
25 | * 02110-1301 USA | |
26 | */ | |
27 | ||
28 | #include <linux/dma-mapping.h> | |
29 | #include <linux/slab.h> | |
30 | #include <linux/uaccess.h> | |
31 | ||
32 | #include "isp.h" | |
33 | ||
34 | #define IS_COHERENT_BUF(stat) ((stat)->dma_ch >= 0) | |
35 | ||
36 | /* | |
37 | * MAGIC_SIZE must always be the greatest common divisor of | |
38 | * AEWB_PACKET_SIZE and AF_PAXEL_SIZE. | |
39 | */ | |
40 | #define MAGIC_SIZE 16 | |
41 | #define MAGIC_NUM 0x55 | |
42 | ||
43 | /* HACK: AF module seems to be writing one more paxel data than it should. */ | |
44 | #define AF_EXTRA_DATA OMAP3ISP_AF_PAXEL_SIZE | |
45 | ||
46 | /* | |
47 | * HACK: H3A modules go to an invalid state after have a SBL overflow. It makes | |
48 | * the next buffer to start to be written in the same point where the overflow | |
49 | * occurred instead of the configured address. The only known way to make it to | |
50 | * go back to a valid state is having a valid buffer processing. Of course it | |
51 | * requires at least a doubled buffer size to avoid an access to invalid memory | |
52 | * region. But it does not fix everything. It may happen more than one | |
53 | * consecutive SBL overflows. In that case, it might be unpredictable how many | |
54 | * buffers the allocated memory should fit. For that case, a recover | |
55 | * configuration was created. It produces the minimum buffer size for each H3A | |
56 | * module and decrease the change for more SBL overflows. This recover state | |
57 | * will be enabled every time a SBL overflow occur. As the output buffer size | |
58 | * isn't big, it's possible to have an extra size able to fit many recover | |
59 | * buffers making it extreamily unlikely to have an access to invalid memory | |
60 | * region. | |
61 | */ | |
62 | #define NUM_H3A_RECOVER_BUFS 10 | |
63 | ||
64 | /* | |
65 | * HACK: Because of HW issues the generic layer sometimes need to have | |
66 | * different behaviour for different statistic modules. | |
67 | */ | |
68 | #define IS_H3A_AF(stat) ((stat) == &(stat)->isp->isp_af) | |
69 | #define IS_H3A_AEWB(stat) ((stat) == &(stat)->isp->isp_aewb) | |
70 | #define IS_H3A(stat) (IS_H3A_AF(stat) || IS_H3A_AEWB(stat)) | |
71 | ||
72 | static void __isp_stat_buf_sync_magic(struct ispstat *stat, | |
73 | struct ispstat_buffer *buf, | |
74 | u32 buf_size, enum dma_data_direction dir, | |
75 | void (*dma_sync)(struct device *, | |
76 | dma_addr_t, unsigned long, size_t, | |
77 | enum dma_data_direction)) | |
78 | { | |
79 | struct device *dev = stat->isp->dev; | |
80 | struct page *pg; | |
81 | dma_addr_t dma_addr; | |
82 | u32 offset; | |
83 | ||
84 | /* Initial magic words */ | |
85 | pg = vmalloc_to_page(buf->virt_addr); | |
86 | dma_addr = pfn_to_dma(dev, page_to_pfn(pg)); | |
87 | dma_sync(dev, dma_addr, 0, MAGIC_SIZE, dir); | |
88 | ||
89 | /* Final magic words */ | |
90 | pg = vmalloc_to_page(buf->virt_addr + buf_size); | |
91 | dma_addr = pfn_to_dma(dev, page_to_pfn(pg)); | |
92 | offset = ((u32)buf->virt_addr + buf_size) & ~PAGE_MASK; | |
93 | dma_sync(dev, dma_addr, offset, MAGIC_SIZE, dir); | |
94 | } | |
95 | ||
96 | static void isp_stat_buf_sync_magic_for_device(struct ispstat *stat, | |
97 | struct ispstat_buffer *buf, | |
98 | u32 buf_size, | |
99 | enum dma_data_direction dir) | |
100 | { | |
101 | if (IS_COHERENT_BUF(stat)) | |
102 | return; | |
103 | ||
104 | __isp_stat_buf_sync_magic(stat, buf, buf_size, dir, | |
105 | dma_sync_single_range_for_device); | |
106 | } | |
107 | ||
108 | static void isp_stat_buf_sync_magic_for_cpu(struct ispstat *stat, | |
109 | struct ispstat_buffer *buf, | |
110 | u32 buf_size, | |
111 | enum dma_data_direction dir) | |
112 | { | |
113 | if (IS_COHERENT_BUF(stat)) | |
114 | return; | |
115 | ||
116 | __isp_stat_buf_sync_magic(stat, buf, buf_size, dir, | |
117 | dma_sync_single_range_for_cpu); | |
118 | } | |
119 | ||
120 | static int isp_stat_buf_check_magic(struct ispstat *stat, | |
121 | struct ispstat_buffer *buf) | |
122 | { | |
123 | const u32 buf_size = IS_H3A_AF(stat) ? | |
124 | buf->buf_size + AF_EXTRA_DATA : buf->buf_size; | |
125 | u8 *w; | |
126 | u8 *end; | |
127 | int ret = -EINVAL; | |
128 | ||
129 | isp_stat_buf_sync_magic_for_cpu(stat, buf, buf_size, DMA_FROM_DEVICE); | |
130 | ||
131 | /* Checking initial magic numbers. They shouldn't be here anymore. */ | |
132 | for (w = buf->virt_addr, end = w + MAGIC_SIZE; w < end; w++) | |
133 | if (likely(*w != MAGIC_NUM)) | |
134 | ret = 0; | |
135 | ||
136 | if (ret) { | |
137 | dev_dbg(stat->isp->dev, "%s: beginning magic check does not " | |
138 | "match.\n", stat->subdev.name); | |
139 | return ret; | |
140 | } | |
141 | ||
142 | /* Checking magic numbers at the end. They must be still here. */ | |
143 | for (w = buf->virt_addr + buf_size, end = w + MAGIC_SIZE; | |
144 | w < end; w++) { | |
145 | if (unlikely(*w != MAGIC_NUM)) { | |
146 | dev_dbg(stat->isp->dev, "%s: endding magic check does " | |
147 | "not match.\n", stat->subdev.name); | |
148 | return -EINVAL; | |
149 | } | |
150 | } | |
151 | ||
152 | isp_stat_buf_sync_magic_for_device(stat, buf, buf_size, | |
153 | DMA_FROM_DEVICE); | |
154 | ||
155 | return 0; | |
156 | } | |
157 | ||
158 | static void isp_stat_buf_insert_magic(struct ispstat *stat, | |
159 | struct ispstat_buffer *buf) | |
160 | { | |
161 | const u32 buf_size = IS_H3A_AF(stat) ? | |
162 | stat->buf_size + AF_EXTRA_DATA : stat->buf_size; | |
163 | ||
164 | isp_stat_buf_sync_magic_for_cpu(stat, buf, buf_size, DMA_FROM_DEVICE); | |
165 | ||
166 | /* | |
167 | * Inserting MAGIC_NUM at the beginning and end of the buffer. | |
168 | * buf->buf_size is set only after the buffer is queued. For now the | |
169 | * right buf_size for the current configuration is pointed by | |
170 | * stat->buf_size. | |
171 | */ | |
172 | memset(buf->virt_addr, MAGIC_NUM, MAGIC_SIZE); | |
173 | memset(buf->virt_addr + buf_size, MAGIC_NUM, MAGIC_SIZE); | |
174 | ||
175 | isp_stat_buf_sync_magic_for_device(stat, buf, buf_size, | |
176 | DMA_BIDIRECTIONAL); | |
177 | } | |
178 | ||
179 | static void isp_stat_buf_sync_for_device(struct ispstat *stat, | |
180 | struct ispstat_buffer *buf) | |
181 | { | |
182 | if (IS_COHERENT_BUF(stat)) | |
183 | return; | |
184 | ||
185 | dma_sync_sg_for_device(stat->isp->dev, buf->iovm->sgt->sgl, | |
186 | buf->iovm->sgt->nents, DMA_FROM_DEVICE); | |
187 | } | |
188 | ||
189 | static void isp_stat_buf_sync_for_cpu(struct ispstat *stat, | |
190 | struct ispstat_buffer *buf) | |
191 | { | |
192 | if (IS_COHERENT_BUF(stat)) | |
193 | return; | |
194 | ||
195 | dma_sync_sg_for_cpu(stat->isp->dev, buf->iovm->sgt->sgl, | |
196 | buf->iovm->sgt->nents, DMA_FROM_DEVICE); | |
197 | } | |
198 | ||
199 | static void isp_stat_buf_clear(struct ispstat *stat) | |
200 | { | |
201 | int i; | |
202 | ||
203 | for (i = 0; i < STAT_MAX_BUFS; i++) | |
204 | stat->buf[i].empty = 1; | |
205 | } | |
206 | ||
207 | static struct ispstat_buffer * | |
208 | __isp_stat_buf_find(struct ispstat *stat, int look_empty) | |
209 | { | |
210 | struct ispstat_buffer *found = NULL; | |
211 | int i; | |
212 | ||
213 | for (i = 0; i < STAT_MAX_BUFS; i++) { | |
214 | struct ispstat_buffer *curr = &stat->buf[i]; | |
215 | ||
216 | /* | |
217 | * Don't select the buffer which is being copied to | |
218 | * userspace or used by the module. | |
219 | */ | |
220 | if (curr == stat->locked_buf || curr == stat->active_buf) | |
221 | continue; | |
222 | ||
223 | /* Don't select uninitialised buffers if it's not required */ | |
224 | if (!look_empty && curr->empty) | |
225 | continue; | |
226 | ||
227 | /* Pick uninitialised buffer over anything else if look_empty */ | |
228 | if (curr->empty) { | |
229 | found = curr; | |
230 | break; | |
231 | } | |
232 | ||
233 | /* Choose the oldest buffer */ | |
234 | if (!found || | |
235 | (s32)curr->frame_number - (s32)found->frame_number < 0) | |
236 | found = curr; | |
237 | } | |
238 | ||
239 | return found; | |
240 | } | |
241 | ||
242 | static inline struct ispstat_buffer * | |
243 | isp_stat_buf_find_oldest(struct ispstat *stat) | |
244 | { | |
245 | return __isp_stat_buf_find(stat, 0); | |
246 | } | |
247 | ||
248 | static inline struct ispstat_buffer * | |
249 | isp_stat_buf_find_oldest_or_empty(struct ispstat *stat) | |
250 | { | |
251 | return __isp_stat_buf_find(stat, 1); | |
252 | } | |
253 | ||
254 | static int isp_stat_buf_queue(struct ispstat *stat) | |
255 | { | |
256 | if (!stat->active_buf) | |
257 | return STAT_NO_BUF; | |
258 | ||
259 | do_gettimeofday(&stat->active_buf->ts); | |
260 | ||
261 | stat->active_buf->buf_size = stat->buf_size; | |
262 | if (isp_stat_buf_check_magic(stat, stat->active_buf)) { | |
263 | dev_dbg(stat->isp->dev, "%s: data wasn't properly written.\n", | |
264 | stat->subdev.name); | |
265 | return STAT_NO_BUF; | |
266 | } | |
267 | stat->active_buf->config_counter = stat->config_counter; | |
268 | stat->active_buf->frame_number = stat->frame_number; | |
269 | stat->active_buf->empty = 0; | |
270 | stat->active_buf = NULL; | |
271 | ||
272 | return STAT_BUF_DONE; | |
273 | } | |
274 | ||
275 | /* Get next free buffer to write the statistics to and mark it active. */ | |
276 | static void isp_stat_buf_next(struct ispstat *stat) | |
277 | { | |
278 | if (unlikely(stat->active_buf)) | |
279 | /* Overwriting unused active buffer */ | |
280 | dev_dbg(stat->isp->dev, "%s: new buffer requested without " | |
281 | "queuing active one.\n", | |
282 | stat->subdev.name); | |
283 | else | |
284 | stat->active_buf = isp_stat_buf_find_oldest_or_empty(stat); | |
285 | } | |
286 | ||
287 | static void isp_stat_buf_release(struct ispstat *stat) | |
288 | { | |
289 | unsigned long flags; | |
290 | ||
291 | isp_stat_buf_sync_for_device(stat, stat->locked_buf); | |
292 | spin_lock_irqsave(&stat->isp->stat_lock, flags); | |
293 | stat->locked_buf = NULL; | |
294 | spin_unlock_irqrestore(&stat->isp->stat_lock, flags); | |
295 | } | |
296 | ||
297 | /* Get buffer to userspace. */ | |
298 | static struct ispstat_buffer *isp_stat_buf_get(struct ispstat *stat, | |
299 | struct omap3isp_stat_data *data) | |
300 | { | |
301 | int rval = 0; | |
302 | unsigned long flags; | |
303 | struct ispstat_buffer *buf; | |
304 | ||
305 | spin_lock_irqsave(&stat->isp->stat_lock, flags); | |
306 | ||
307 | while (1) { | |
308 | buf = isp_stat_buf_find_oldest(stat); | |
309 | if (!buf) { | |
310 | spin_unlock_irqrestore(&stat->isp->stat_lock, flags); | |
311 | dev_dbg(stat->isp->dev, "%s: cannot find a buffer.\n", | |
312 | stat->subdev.name); | |
313 | return ERR_PTR(-EBUSY); | |
314 | } | |
315 | if (isp_stat_buf_check_magic(stat, buf)) { | |
316 | dev_dbg(stat->isp->dev, "%s: current buffer has " | |
317 | "corrupted data\n.", stat->subdev.name); | |
318 | /* Mark empty because it doesn't have valid data. */ | |
319 | buf->empty = 1; | |
320 | } else { | |
321 | /* Buffer isn't corrupted. */ | |
322 | break; | |
323 | } | |
324 | } | |
325 | ||
326 | stat->locked_buf = buf; | |
327 | ||
328 | spin_unlock_irqrestore(&stat->isp->stat_lock, flags); | |
329 | ||
330 | if (buf->buf_size > data->buf_size) { | |
331 | dev_warn(stat->isp->dev, "%s: userspace's buffer size is " | |
332 | "not enough.\n", stat->subdev.name); | |
333 | isp_stat_buf_release(stat); | |
334 | return ERR_PTR(-EINVAL); | |
335 | } | |
336 | ||
337 | isp_stat_buf_sync_for_cpu(stat, buf); | |
338 | ||
339 | rval = copy_to_user(data->buf, | |
340 | buf->virt_addr, | |
341 | buf->buf_size); | |
342 | ||
343 | if (rval) { | |
344 | dev_info(stat->isp->dev, | |
345 | "%s: failed copying %d bytes of stat data\n", | |
346 | stat->subdev.name, rval); | |
347 | buf = ERR_PTR(-EFAULT); | |
348 | isp_stat_buf_release(stat); | |
349 | } | |
350 | ||
351 | return buf; | |
352 | } | |
353 | ||
354 | static void isp_stat_bufs_free(struct ispstat *stat) | |
355 | { | |
356 | struct isp_device *isp = stat->isp; | |
357 | int i; | |
358 | ||
359 | for (i = 0; i < STAT_MAX_BUFS; i++) { | |
360 | struct ispstat_buffer *buf = &stat->buf[i]; | |
361 | ||
362 | if (!IS_COHERENT_BUF(stat)) { | |
363 | if (IS_ERR_OR_NULL((void *)buf->iommu_addr)) | |
364 | continue; | |
365 | if (buf->iovm) | |
366 | dma_unmap_sg(isp->dev, buf->iovm->sgt->sgl, | |
367 | buf->iovm->sgt->nents, | |
368 | DMA_FROM_DEVICE); | |
f626b52d | 369 | iommu_vfree(isp->domain, isp->iommu, buf->iommu_addr); |
68e342b3 DC |
370 | } else { |
371 | if (!buf->virt_addr) | |
372 | continue; | |
373 | dma_free_coherent(stat->isp->dev, stat->buf_alloc_size, | |
374 | buf->virt_addr, buf->dma_addr); | |
375 | } | |
376 | buf->iommu_addr = 0; | |
377 | buf->iovm = NULL; | |
378 | buf->dma_addr = 0; | |
379 | buf->virt_addr = NULL; | |
380 | buf->empty = 1; | |
381 | } | |
382 | ||
383 | dev_dbg(stat->isp->dev, "%s: all buffers were freed.\n", | |
384 | stat->subdev.name); | |
385 | ||
386 | stat->buf_alloc_size = 0; | |
387 | stat->active_buf = NULL; | |
388 | } | |
389 | ||
390 | static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size) | |
391 | { | |
392 | struct isp_device *isp = stat->isp; | |
393 | int i; | |
394 | ||
395 | stat->buf_alloc_size = size; | |
396 | ||
397 | for (i = 0; i < STAT_MAX_BUFS; i++) { | |
398 | struct ispstat_buffer *buf = &stat->buf[i]; | |
399 | struct iovm_struct *iovm; | |
400 | ||
401 | WARN_ON(buf->dma_addr); | |
f626b52d OBC |
402 | buf->iommu_addr = iommu_vmalloc(isp->domain, isp->iommu, 0, |
403 | size, IOMMU_FLAG); | |
68e342b3 DC |
404 | if (IS_ERR((void *)buf->iommu_addr)) { |
405 | dev_err(stat->isp->dev, | |
406 | "%s: Can't acquire memory for " | |
407 | "buffer %d\n", stat->subdev.name, i); | |
408 | isp_stat_bufs_free(stat); | |
409 | return -ENOMEM; | |
410 | } | |
411 | ||
412 | iovm = find_iovm_area(isp->iommu, buf->iommu_addr); | |
413 | if (!iovm || | |
414 | !dma_map_sg(isp->dev, iovm->sgt->sgl, iovm->sgt->nents, | |
415 | DMA_FROM_DEVICE)) { | |
416 | isp_stat_bufs_free(stat); | |
417 | return -ENOMEM; | |
418 | } | |
419 | buf->iovm = iovm; | |
420 | ||
421 | buf->virt_addr = da_to_va(stat->isp->iommu, | |
422 | (u32)buf->iommu_addr); | |
423 | buf->empty = 1; | |
424 | dev_dbg(stat->isp->dev, "%s: buffer[%d] allocated." | |
425 | "iommu_addr=0x%08lx virt_addr=0x%08lx", | |
426 | stat->subdev.name, i, buf->iommu_addr, | |
427 | (unsigned long)buf->virt_addr); | |
428 | } | |
429 | ||
430 | return 0; | |
431 | } | |
432 | ||
433 | static int isp_stat_bufs_alloc_dma(struct ispstat *stat, unsigned int size) | |
434 | { | |
435 | int i; | |
436 | ||
437 | stat->buf_alloc_size = size; | |
438 | ||
439 | for (i = 0; i < STAT_MAX_BUFS; i++) { | |
440 | struct ispstat_buffer *buf = &stat->buf[i]; | |
441 | ||
442 | WARN_ON(buf->iommu_addr); | |
443 | buf->virt_addr = dma_alloc_coherent(stat->isp->dev, size, | |
444 | &buf->dma_addr, GFP_KERNEL | GFP_DMA); | |
445 | ||
446 | if (!buf->virt_addr || !buf->dma_addr) { | |
447 | dev_info(stat->isp->dev, | |
448 | "%s: Can't acquire memory for " | |
449 | "DMA buffer %d\n", stat->subdev.name, i); | |
450 | isp_stat_bufs_free(stat); | |
451 | return -ENOMEM; | |
452 | } | |
453 | buf->empty = 1; | |
454 | ||
455 | dev_dbg(stat->isp->dev, "%s: buffer[%d] allocated." | |
456 | "dma_addr=0x%08lx virt_addr=0x%08lx\n", | |
457 | stat->subdev.name, i, (unsigned long)buf->dma_addr, | |
458 | (unsigned long)buf->virt_addr); | |
459 | } | |
460 | ||
461 | return 0; | |
462 | } | |
463 | ||
464 | static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size) | |
465 | { | |
466 | unsigned long flags; | |
467 | ||
468 | spin_lock_irqsave(&stat->isp->stat_lock, flags); | |
469 | ||
470 | BUG_ON(stat->locked_buf != NULL); | |
471 | ||
472 | /* Are the old buffers big enough? */ | |
473 | if (stat->buf_alloc_size >= size) { | |
474 | spin_unlock_irqrestore(&stat->isp->stat_lock, flags); | |
475 | return 0; | |
476 | } | |
477 | ||
478 | if (stat->state != ISPSTAT_DISABLED || stat->buf_processing) { | |
479 | dev_info(stat->isp->dev, | |
480 | "%s: trying to allocate memory when busy\n", | |
481 | stat->subdev.name); | |
482 | spin_unlock_irqrestore(&stat->isp->stat_lock, flags); | |
483 | return -EBUSY; | |
484 | } | |
485 | ||
486 | spin_unlock_irqrestore(&stat->isp->stat_lock, flags); | |
487 | ||
488 | isp_stat_bufs_free(stat); | |
489 | ||
490 | if (IS_COHERENT_BUF(stat)) | |
491 | return isp_stat_bufs_alloc_dma(stat, size); | |
492 | else | |
493 | return isp_stat_bufs_alloc_iommu(stat, size); | |
494 | } | |
495 | ||
496 | static void isp_stat_queue_event(struct ispstat *stat, int err) | |
497 | { | |
498 | struct video_device *vdev = &stat->subdev.devnode; | |
499 | struct v4l2_event event; | |
500 | struct omap3isp_stat_event_status *status = (void *)event.u.data; | |
501 | ||
502 | memset(&event, 0, sizeof(event)); | |
503 | if (!err) { | |
504 | status->frame_number = stat->frame_number; | |
505 | status->config_counter = stat->config_counter; | |
506 | } else { | |
507 | status->buf_err = 1; | |
508 | } | |
509 | event.type = stat->event_type; | |
510 | v4l2_event_queue(vdev, &event); | |
511 | } | |
512 | ||
513 | ||
514 | /* | |
515 | * omap3isp_stat_request_statistics - Request statistics. | |
516 | * @data: Pointer to return statistics data. | |
517 | * | |
518 | * Returns 0 if successful. | |
519 | */ | |
520 | int omap3isp_stat_request_statistics(struct ispstat *stat, | |
521 | struct omap3isp_stat_data *data) | |
522 | { | |
523 | struct ispstat_buffer *buf; | |
524 | ||
525 | if (stat->state != ISPSTAT_ENABLED) { | |
526 | dev_dbg(stat->isp->dev, "%s: engine not enabled.\n", | |
527 | stat->subdev.name); | |
528 | return -EINVAL; | |
529 | } | |
530 | ||
531 | mutex_lock(&stat->ioctl_lock); | |
532 | buf = isp_stat_buf_get(stat, data); | |
533 | if (IS_ERR(buf)) { | |
534 | mutex_unlock(&stat->ioctl_lock); | |
535 | return PTR_ERR(buf); | |
536 | } | |
537 | ||
538 | data->ts = buf->ts; | |
539 | data->config_counter = buf->config_counter; | |
540 | data->frame_number = buf->frame_number; | |
541 | data->buf_size = buf->buf_size; | |
542 | ||
543 | buf->empty = 1; | |
544 | isp_stat_buf_release(stat); | |
545 | mutex_unlock(&stat->ioctl_lock); | |
546 | ||
547 | return 0; | |
548 | } | |
549 | ||
550 | /* | |
551 | * omap3isp_stat_config - Receives new statistic engine configuration. | |
552 | * @new_conf: Pointer to config structure. | |
553 | * | |
554 | * Returns 0 if successful, -EINVAL if new_conf pointer is NULL, -ENOMEM if | |
555 | * was unable to allocate memory for the buffer, or other errors if parameters | |
556 | * are invalid. | |
557 | */ | |
558 | int omap3isp_stat_config(struct ispstat *stat, void *new_conf) | |
559 | { | |
560 | int ret; | |
561 | unsigned long irqflags; | |
562 | struct ispstat_generic_config *user_cfg = new_conf; | |
563 | u32 buf_size = user_cfg->buf_size; | |
564 | ||
565 | if (!new_conf) { | |
566 | dev_dbg(stat->isp->dev, "%s: configuration is NULL\n", | |
567 | stat->subdev.name); | |
568 | return -EINVAL; | |
569 | } | |
570 | ||
571 | mutex_lock(&stat->ioctl_lock); | |
572 | ||
573 | dev_dbg(stat->isp->dev, "%s: configuring module with buffer " | |
574 | "size=0x%08lx\n", stat->subdev.name, (unsigned long)buf_size); | |
575 | ||
576 | ret = stat->ops->validate_params(stat, new_conf); | |
577 | if (ret) { | |
578 | mutex_unlock(&stat->ioctl_lock); | |
579 | dev_dbg(stat->isp->dev, "%s: configuration values are " | |
580 | "invalid.\n", stat->subdev.name); | |
581 | return ret; | |
582 | } | |
583 | ||
584 | if (buf_size != user_cfg->buf_size) | |
585 | dev_dbg(stat->isp->dev, "%s: driver has corrected buffer size " | |
586 | "request to 0x%08lx\n", stat->subdev.name, | |
587 | (unsigned long)user_cfg->buf_size); | |
588 | ||
589 | /* | |
590 | * Hack: H3A modules may need a doubled buffer size to avoid access | |
591 | * to a invalid memory address after a SBL overflow. | |
592 | * The buffer size is always PAGE_ALIGNED. | |
593 | * Hack 2: MAGIC_SIZE is added to buf_size so a magic word can be | |
594 | * inserted at the end to data integrity check purpose. | |
595 | * Hack 3: AF module writes one paxel data more than it should, so | |
596 | * the buffer allocation must consider it to avoid invalid memory | |
597 | * access. | |
598 | * Hack 4: H3A need to allocate extra space for the recover state. | |
599 | */ | |
600 | if (IS_H3A(stat)) { | |
601 | buf_size = user_cfg->buf_size * 2 + MAGIC_SIZE; | |
602 | if (IS_H3A_AF(stat)) | |
603 | /* | |
604 | * Adding one extra paxel data size for each recover | |
605 | * buffer + 2 regular ones. | |
606 | */ | |
607 | buf_size += AF_EXTRA_DATA * (NUM_H3A_RECOVER_BUFS + 2); | |
608 | if (stat->recover_priv) { | |
609 | struct ispstat_generic_config *recover_cfg = | |
610 | stat->recover_priv; | |
611 | buf_size += recover_cfg->buf_size * | |
612 | NUM_H3A_RECOVER_BUFS; | |
613 | } | |
614 | buf_size = PAGE_ALIGN(buf_size); | |
615 | } else { /* Histogram */ | |
616 | buf_size = PAGE_ALIGN(user_cfg->buf_size + MAGIC_SIZE); | |
617 | } | |
618 | ||
619 | ret = isp_stat_bufs_alloc(stat, buf_size); | |
620 | if (ret) { | |
621 | mutex_unlock(&stat->ioctl_lock); | |
622 | return ret; | |
623 | } | |
624 | ||
625 | spin_lock_irqsave(&stat->isp->stat_lock, irqflags); | |
626 | stat->ops->set_params(stat, new_conf); | |
627 | spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); | |
628 | ||
629 | /* | |
630 | * Returning the right future config_counter for this setup, so | |
631 | * userspace can *know* when it has been applied. | |
632 | */ | |
633 | user_cfg->config_counter = stat->config_counter + stat->inc_config; | |
634 | ||
635 | /* Module has a valid configuration. */ | |
636 | stat->configured = 1; | |
637 | dev_dbg(stat->isp->dev, "%s: module has been successfully " | |
638 | "configured.\n", stat->subdev.name); | |
639 | ||
640 | mutex_unlock(&stat->ioctl_lock); | |
641 | ||
642 | return 0; | |
643 | } | |
644 | ||
645 | /* | |
646 | * isp_stat_buf_process - Process statistic buffers. | |
647 | * @buf_state: points out if buffer is ready to be processed. It's necessary | |
648 | * because histogram needs to copy the data from internal memory | |
649 | * before be able to process the buffer. | |
650 | */ | |
651 | static int isp_stat_buf_process(struct ispstat *stat, int buf_state) | |
652 | { | |
653 | int ret = STAT_NO_BUF; | |
654 | ||
655 | if (!atomic_add_unless(&stat->buf_err, -1, 0) && | |
656 | buf_state == STAT_BUF_DONE && stat->state == ISPSTAT_ENABLED) { | |
657 | ret = isp_stat_buf_queue(stat); | |
658 | isp_stat_buf_next(stat); | |
659 | } | |
660 | ||
661 | return ret; | |
662 | } | |
663 | ||
664 | int omap3isp_stat_pcr_busy(struct ispstat *stat) | |
665 | { | |
666 | return stat->ops->busy(stat); | |
667 | } | |
668 | ||
669 | int omap3isp_stat_busy(struct ispstat *stat) | |
670 | { | |
671 | return omap3isp_stat_pcr_busy(stat) | stat->buf_processing | | |
672 | (stat->state != ISPSTAT_DISABLED); | |
673 | } | |
674 | ||
675 | /* | |
676 | * isp_stat_pcr_enable - Disables/Enables statistic engines. | |
677 | * @pcr_enable: 0/1 - Disables/Enables the engine. | |
678 | * | |
679 | * Must be called from ISP driver when the module is idle and synchronized | |
680 | * with CCDC. | |
681 | */ | |
682 | static void isp_stat_pcr_enable(struct ispstat *stat, u8 pcr_enable) | |
683 | { | |
684 | if ((stat->state != ISPSTAT_ENABLING && | |
685 | stat->state != ISPSTAT_ENABLED) && pcr_enable) | |
686 | /* Userspace has disabled the module. Aborting. */ | |
687 | return; | |
688 | ||
689 | stat->ops->enable(stat, pcr_enable); | |
690 | if (stat->state == ISPSTAT_DISABLING && !pcr_enable) | |
691 | stat->state = ISPSTAT_DISABLED; | |
692 | else if (stat->state == ISPSTAT_ENABLING && pcr_enable) | |
693 | stat->state = ISPSTAT_ENABLED; | |
694 | } | |
695 | ||
696 | void omap3isp_stat_suspend(struct ispstat *stat) | |
697 | { | |
698 | unsigned long flags; | |
699 | ||
700 | spin_lock_irqsave(&stat->isp->stat_lock, flags); | |
701 | ||
702 | if (stat->state != ISPSTAT_DISABLED) | |
703 | stat->ops->enable(stat, 0); | |
704 | if (stat->state == ISPSTAT_ENABLED) | |
705 | stat->state = ISPSTAT_SUSPENDED; | |
706 | ||
707 | spin_unlock_irqrestore(&stat->isp->stat_lock, flags); | |
708 | } | |
709 | ||
710 | void omap3isp_stat_resume(struct ispstat *stat) | |
711 | { | |
712 | /* Module will be re-enabled with its pipeline */ | |
713 | if (stat->state == ISPSTAT_SUSPENDED) | |
714 | stat->state = ISPSTAT_ENABLING; | |
715 | } | |
716 | ||
717 | static void isp_stat_try_enable(struct ispstat *stat) | |
718 | { | |
719 | unsigned long irqflags; | |
720 | ||
721 | if (stat->priv == NULL) | |
722 | /* driver wasn't initialised */ | |
723 | return; | |
724 | ||
725 | spin_lock_irqsave(&stat->isp->stat_lock, irqflags); | |
726 | if (stat->state == ISPSTAT_ENABLING && !stat->buf_processing && | |
727 | stat->buf_alloc_size) { | |
728 | /* | |
729 | * Userspace's requested to enable the engine but it wasn't yet. | |
730 | * Let's do that now. | |
731 | */ | |
732 | stat->update = 1; | |
733 | isp_stat_buf_next(stat); | |
734 | stat->ops->setup_regs(stat, stat->priv); | |
735 | isp_stat_buf_insert_magic(stat, stat->active_buf); | |
736 | ||
737 | /* | |
738 | * H3A module has some hw issues which forces the driver to | |
739 | * ignore next buffers even if it was disabled in the meantime. | |
740 | * On the other hand, Histogram shouldn't ignore buffers anymore | |
741 | * if it's being enabled. | |
742 | */ | |
743 | if (!IS_H3A(stat)) | |
744 | atomic_set(&stat->buf_err, 0); | |
745 | ||
746 | isp_stat_pcr_enable(stat, 1); | |
747 | spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); | |
748 | dev_dbg(stat->isp->dev, "%s: module is enabled.\n", | |
749 | stat->subdev.name); | |
750 | } else { | |
751 | spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); | |
752 | } | |
753 | } | |
754 | ||
755 | void omap3isp_stat_isr_frame_sync(struct ispstat *stat) | |
756 | { | |
757 | isp_stat_try_enable(stat); | |
758 | } | |
759 | ||
760 | void omap3isp_stat_sbl_overflow(struct ispstat *stat) | |
761 | { | |
762 | unsigned long irqflags; | |
763 | ||
764 | spin_lock_irqsave(&stat->isp->stat_lock, irqflags); | |
765 | /* | |
766 | * Due to a H3A hw issue which prevents the next buffer to start from | |
767 | * the correct memory address, 2 buffers must be ignored. | |
768 | */ | |
769 | atomic_set(&stat->buf_err, 2); | |
770 | ||
771 | /* | |
772 | * If more than one SBL overflow happen in a row, H3A module may access | |
773 | * invalid memory region. | |
774 | * stat->sbl_ovl_recover is set to tell to the driver to temporarily use | |
775 | * a soft configuration which helps to avoid consecutive overflows. | |
776 | */ | |
777 | if (stat->recover_priv) | |
778 | stat->sbl_ovl_recover = 1; | |
779 | spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); | |
780 | } | |
781 | ||
782 | /* | |
783 | * omap3isp_stat_enable - Disable/Enable statistic engine as soon as possible | |
784 | * @enable: 0/1 - Disables/Enables the engine. | |
785 | * | |
786 | * Client should configure all the module registers before this. | |
787 | * This function can be called from a userspace request. | |
788 | */ | |
789 | int omap3isp_stat_enable(struct ispstat *stat, u8 enable) | |
790 | { | |
791 | unsigned long irqflags; | |
792 | ||
793 | dev_dbg(stat->isp->dev, "%s: user wants to %s module.\n", | |
794 | stat->subdev.name, enable ? "enable" : "disable"); | |
795 | ||
796 | /* Prevent enabling while configuring */ | |
797 | mutex_lock(&stat->ioctl_lock); | |
798 | ||
799 | spin_lock_irqsave(&stat->isp->stat_lock, irqflags); | |
800 | ||
801 | if (!stat->configured && enable) { | |
802 | spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); | |
803 | mutex_unlock(&stat->ioctl_lock); | |
804 | dev_dbg(stat->isp->dev, "%s: cannot enable module as it's " | |
805 | "never been successfully configured so far.\n", | |
806 | stat->subdev.name); | |
807 | return -EINVAL; | |
808 | } | |
809 | ||
810 | if (enable) { | |
811 | if (stat->state == ISPSTAT_DISABLING) | |
812 | /* Previous disabling request wasn't done yet */ | |
813 | stat->state = ISPSTAT_ENABLED; | |
814 | else if (stat->state == ISPSTAT_DISABLED) | |
815 | /* Module is now being enabled */ | |
816 | stat->state = ISPSTAT_ENABLING; | |
817 | } else { | |
818 | if (stat->state == ISPSTAT_ENABLING) { | |
819 | /* Previous enabling request wasn't done yet */ | |
820 | stat->state = ISPSTAT_DISABLED; | |
821 | } else if (stat->state == ISPSTAT_ENABLED) { | |
822 | /* Module is now being disabled */ | |
823 | stat->state = ISPSTAT_DISABLING; | |
824 | isp_stat_buf_clear(stat); | |
825 | } | |
826 | } | |
827 | ||
828 | spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); | |
829 | mutex_unlock(&stat->ioctl_lock); | |
830 | ||
831 | return 0; | |
832 | } | |
833 | ||
834 | int omap3isp_stat_s_stream(struct v4l2_subdev *subdev, int enable) | |
835 | { | |
836 | struct ispstat *stat = v4l2_get_subdevdata(subdev); | |
837 | ||
838 | if (enable) { | |
839 | /* | |
840 | * Only set enable PCR bit if the module was previously | |
841 | * enabled through ioct. | |
842 | */ | |
843 | isp_stat_try_enable(stat); | |
844 | } else { | |
845 | unsigned long flags; | |
846 | /* Disable PCR bit and config enable field */ | |
847 | omap3isp_stat_enable(stat, 0); | |
848 | spin_lock_irqsave(&stat->isp->stat_lock, flags); | |
849 | stat->ops->enable(stat, 0); | |
850 | spin_unlock_irqrestore(&stat->isp->stat_lock, flags); | |
851 | ||
852 | /* | |
853 | * If module isn't busy, a new interrupt may come or not to | |
854 | * set the state to DISABLED. As Histogram needs to read its | |
855 | * internal memory to clear it, let interrupt handler | |
856 | * responsible of changing state to DISABLED. If the last | |
857 | * interrupt is coming, it's still safe as the handler will | |
858 | * ignore the second time when state is already set to DISABLED. | |
859 | * It's necessary to synchronize Histogram with streamoff, once | |
860 | * the module may be considered idle before last SDMA transfer | |
861 | * starts if we return here. | |
862 | */ | |
863 | if (!omap3isp_stat_pcr_busy(stat)) | |
864 | omap3isp_stat_isr(stat); | |
865 | ||
866 | dev_dbg(stat->isp->dev, "%s: module is being disabled\n", | |
867 | stat->subdev.name); | |
868 | } | |
869 | ||
870 | return 0; | |
871 | } | |
872 | ||
873 | /* | |
874 | * __stat_isr - Interrupt handler for statistic drivers | |
875 | */ | |
876 | static void __stat_isr(struct ispstat *stat, int from_dma) | |
877 | { | |
878 | int ret = STAT_BUF_DONE; | |
879 | int buf_processing; | |
880 | unsigned long irqflags; | |
881 | struct isp_pipeline *pipe; | |
882 | ||
883 | /* | |
884 | * stat->buf_processing must be set before disable module. It's | |
885 | * necessary to not inform too early the buffers aren't busy in case | |
886 | * of SDMA is going to be used. | |
887 | */ | |
888 | spin_lock_irqsave(&stat->isp->stat_lock, irqflags); | |
889 | if (stat->state == ISPSTAT_DISABLED) { | |
890 | spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); | |
891 | return; | |
892 | } | |
893 | buf_processing = stat->buf_processing; | |
894 | stat->buf_processing = 1; | |
895 | stat->ops->enable(stat, 0); | |
896 | ||
897 | if (buf_processing && !from_dma) { | |
898 | if (stat->state == ISPSTAT_ENABLED) { | |
899 | spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); | |
900 | dev_err(stat->isp->dev, | |
901 | "%s: interrupt occurred when module was still " | |
902 | "processing a buffer.\n", stat->subdev.name); | |
903 | ret = STAT_NO_BUF; | |
904 | goto out; | |
905 | } else { | |
906 | /* | |
907 | * Interrupt handler was called from streamoff when | |
908 | * the module wasn't busy anymore to ensure it is being | |
909 | * disabled after process last buffer. If such buffer | |
910 | * processing has already started, no need to do | |
911 | * anything else. | |
912 | */ | |
913 | spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); | |
914 | return; | |
915 | } | |
916 | } | |
917 | spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); | |
918 | ||
919 | /* If it's busy we can't process this buffer anymore */ | |
920 | if (!omap3isp_stat_pcr_busy(stat)) { | |
921 | if (!from_dma && stat->ops->buf_process) | |
922 | /* Module still need to copy data to buffer. */ | |
923 | ret = stat->ops->buf_process(stat); | |
924 | if (ret == STAT_BUF_WAITING_DMA) | |
925 | /* Buffer is not ready yet */ | |
926 | return; | |
927 | ||
928 | spin_lock_irqsave(&stat->isp->stat_lock, irqflags); | |
929 | ||
930 | /* | |
931 | * Histogram needs to read its internal memory to clear it | |
932 | * before be disabled. For that reason, common statistic layer | |
933 | * can return only after call stat's buf_process() operator. | |
934 | */ | |
935 | if (stat->state == ISPSTAT_DISABLING) { | |
936 | stat->state = ISPSTAT_DISABLED; | |
937 | spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); | |
938 | stat->buf_processing = 0; | |
939 | return; | |
940 | } | |
941 | pipe = to_isp_pipeline(&stat->subdev.entity); | |
942 | stat->frame_number = atomic_read(&pipe->frame_number); | |
943 | ||
944 | /* | |
945 | * Before this point, 'ret' stores the buffer's status if it's | |
946 | * ready to be processed. Afterwards, it holds the status if | |
947 | * it was processed successfully. | |
948 | */ | |
949 | ret = isp_stat_buf_process(stat, ret); | |
950 | ||
951 | if (likely(!stat->sbl_ovl_recover)) { | |
952 | stat->ops->setup_regs(stat, stat->priv); | |
953 | } else { | |
954 | /* | |
955 | * Using recover config to increase the chance to have | |
956 | * a good buffer processing and make the H3A module to | |
957 | * go back to a valid state. | |
958 | */ | |
959 | stat->update = 1; | |
960 | stat->ops->setup_regs(stat, stat->recover_priv); | |
961 | stat->sbl_ovl_recover = 0; | |
962 | ||
963 | /* | |
964 | * Set 'update' in case of the module needs to use | |
965 | * regular configuration after next buffer. | |
966 | */ | |
967 | stat->update = 1; | |
968 | } | |
969 | ||
970 | isp_stat_buf_insert_magic(stat, stat->active_buf); | |
971 | ||
972 | /* | |
973 | * Hack: H3A modules may access invalid memory address or send | |
974 | * corrupted data to userspace if more than 1 SBL overflow | |
975 | * happens in a row without re-writing its buffer's start memory | |
976 | * address in the meantime. Such situation is avoided if the | |
977 | * module is not immediately re-enabled when the ISR misses the | |
978 | * timing to process the buffer and to setup the registers. | |
979 | * Because of that, pcr_enable(1) was moved to inside this 'if' | |
980 | * block. But the next interruption will still happen as during | |
981 | * pcr_enable(0) the module was busy. | |
982 | */ | |
983 | isp_stat_pcr_enable(stat, 1); | |
984 | spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); | |
985 | } else { | |
986 | /* | |
987 | * If a SBL overflow occurs and the H3A driver misses the timing | |
988 | * to process the buffer, stat->buf_err is set and won't be | |
989 | * cleared now. So the next buffer will be correctly ignored. | |
990 | * It's necessary due to a hw issue which makes the next H3A | |
991 | * buffer to start from the memory address where the previous | |
992 | * one stopped, instead of start where it was configured to. | |
993 | * Do not "stat->buf_err = 0" here. | |
994 | */ | |
995 | ||
996 | if (stat->ops->buf_process) | |
997 | /* | |
998 | * Driver may need to erase current data prior to | |
999 | * process a new buffer. If it misses the timing, the | |
1000 | * next buffer might be wrong. So should be ignored. | |
1001 | * It happens only for Histogram. | |
1002 | */ | |
1003 | atomic_set(&stat->buf_err, 1); | |
1004 | ||
1005 | ret = STAT_NO_BUF; | |
1006 | dev_dbg(stat->isp->dev, "%s: cannot process buffer, " | |
1007 | "device is busy.\n", stat->subdev.name); | |
1008 | } | |
1009 | ||
1010 | out: | |
1011 | stat->buf_processing = 0; | |
1012 | isp_stat_queue_event(stat, ret != STAT_BUF_DONE); | |
1013 | } | |
1014 | ||
1015 | void omap3isp_stat_isr(struct ispstat *stat) | |
1016 | { | |
1017 | __stat_isr(stat, 0); | |
1018 | } | |
1019 | ||
1020 | void omap3isp_stat_dma_isr(struct ispstat *stat) | |
1021 | { | |
1022 | __stat_isr(stat, 1); | |
1023 | } | |
1024 | ||
1025 | static int isp_stat_init_entities(struct ispstat *stat, const char *name, | |
1026 | const struct v4l2_subdev_ops *sd_ops) | |
1027 | { | |
1028 | struct v4l2_subdev *subdev = &stat->subdev; | |
1029 | struct media_entity *me = &subdev->entity; | |
1030 | ||
1031 | v4l2_subdev_init(subdev, sd_ops); | |
1032 | snprintf(subdev->name, V4L2_SUBDEV_NAME_SIZE, "OMAP3 ISP %s", name); | |
1033 | subdev->grp_id = 1 << 16; /* group ID for isp subdevs */ | |
1034 | subdev->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE; | |
68e342b3 DC |
1035 | v4l2_set_subdevdata(subdev, stat); |
1036 | ||
1037 | stat->pad.flags = MEDIA_PAD_FL_SINK; | |
1038 | me->ops = NULL; | |
1039 | ||
1040 | return media_entity_init(me, 1, &stat->pad, 0); | |
1041 | } | |
1042 | ||
1043 | int omap3isp_stat_subscribe_event(struct v4l2_subdev *subdev, | |
1044 | struct v4l2_fh *fh, | |
1045 | struct v4l2_event_subscription *sub) | |
1046 | { | |
1047 | struct ispstat *stat = v4l2_get_subdevdata(subdev); | |
1048 | ||
1049 | if (sub->type != stat->event_type) | |
1050 | return -EINVAL; | |
1051 | ||
f1e393de | 1052 | return v4l2_event_subscribe(fh, sub, STAT_NEVENTS); |
68e342b3 DC |
1053 | } |
1054 | ||
1055 | int omap3isp_stat_unsubscribe_event(struct v4l2_subdev *subdev, | |
1056 | struct v4l2_fh *fh, | |
1057 | struct v4l2_event_subscription *sub) | |
1058 | { | |
1059 | return v4l2_event_unsubscribe(fh, sub); | |
1060 | } | |
1061 | ||
1062 | void omap3isp_stat_unregister_entities(struct ispstat *stat) | |
1063 | { | |
1064 | media_entity_cleanup(&stat->subdev.entity); | |
1065 | v4l2_device_unregister_subdev(&stat->subdev); | |
1066 | } | |
1067 | ||
1068 | int omap3isp_stat_register_entities(struct ispstat *stat, | |
1069 | struct v4l2_device *vdev) | |
1070 | { | |
1071 | return v4l2_device_register_subdev(vdev, &stat->subdev); | |
1072 | } | |
1073 | ||
1074 | int omap3isp_stat_init(struct ispstat *stat, const char *name, | |
1075 | const struct v4l2_subdev_ops *sd_ops) | |
1076 | { | |
1077 | stat->buf = kcalloc(STAT_MAX_BUFS, sizeof(*stat->buf), GFP_KERNEL); | |
1078 | if (!stat->buf) | |
1079 | return -ENOMEM; | |
1080 | isp_stat_buf_clear(stat); | |
1081 | mutex_init(&stat->ioctl_lock); | |
1082 | atomic_set(&stat->buf_err, 0); | |
1083 | ||
1084 | return isp_stat_init_entities(stat, name, sd_ops); | |
1085 | } | |
1086 | ||
1087 | void omap3isp_stat_free(struct ispstat *stat) | |
1088 | { | |
1089 | isp_stat_bufs_free(stat); | |
1090 | kfree(stat->buf); | |
1091 | } |