hc
2024-05-10 37f49e37ab4cb5d0bc4c60eb5c6d4dd57db767bb
kernel/kernel/bpf/helpers.c
....@@ -1,13 +1,5 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2
- *
3
- * This program is free software; you can redistribute it and/or
4
- * modify it under the terms of version 2 of the GNU General Public
5
- * License as published by the Free Software Foundation.
6
- *
7
- * This program is distributed in the hope that it will be useful, but
8
- * WITHOUT ANY WARRANTY; without even the implied warranty of
9
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10
- * General Public License for more details.
113 */
124 #include <linux/bpf.h>
135 #include <linux/rcupdate.h>
....@@ -18,6 +10,13 @@
1810 #include <linux/sched.h>
1911 #include <linux/uidgid.h>
2012 #include <linux/filter.h>
13
+#include <linux/ctype.h>
14
+#include <linux/jiffies.h>
15
+#include <linux/pid_namespace.h>
16
+#include <linux/proc_ns.h>
17
+#include <linux/security.h>
18
+
19
+#include "../../lib/kstrtox.h"
2120
2221 /* If kernel subsystem is allowing eBPF programs to call this function,
2322 * inside its own verifier_ops->get_func_proto() callback it should return
....@@ -74,6 +73,47 @@
7473 .ret_type = RET_INTEGER,
7574 .arg1_type = ARG_CONST_MAP_PTR,
7675 .arg2_type = ARG_PTR_TO_MAP_KEY,
76
+};
77
+
78
+BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
79
+{
80
+ return map->ops->map_push_elem(map, value, flags);
81
+}
82
+
83
+const struct bpf_func_proto bpf_map_push_elem_proto = {
84
+ .func = bpf_map_push_elem,
85
+ .gpl_only = false,
86
+ .pkt_access = true,
87
+ .ret_type = RET_INTEGER,
88
+ .arg1_type = ARG_CONST_MAP_PTR,
89
+ .arg2_type = ARG_PTR_TO_MAP_VALUE,
90
+ .arg3_type = ARG_ANYTHING,
91
+};
92
+
93
+BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
94
+{
95
+ return map->ops->map_pop_elem(map, value);
96
+}
97
+
98
+const struct bpf_func_proto bpf_map_pop_elem_proto = {
99
+ .func = bpf_map_pop_elem,
100
+ .gpl_only = false,
101
+ .ret_type = RET_INTEGER,
102
+ .arg1_type = ARG_CONST_MAP_PTR,
103
+ .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE,
104
+};
105
+
106
+BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
107
+{
108
+ return map->ops->map_peek_elem(map, value);
109
+}
110
+
111
+const struct bpf_func_proto bpf_map_peek_elem_proto = {
112
+ .func = bpf_map_peek_elem,
113
+ .gpl_only = false,
114
+ .ret_type = RET_INTEGER,
115
+ .arg1_type = ARG_CONST_MAP_PTR,
116
+ .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE,
77117 };
78118
79119 const struct bpf_func_proto bpf_get_prandom_u32_proto = {
....@@ -192,12 +232,119 @@
192232 .arg2_type = ARG_CONST_SIZE,
193233 };
194234
235
+#if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK)
236
+
237
+static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
238
+{
239
+ arch_spinlock_t *l = (void *)lock;
240
+ union {
241
+ __u32 val;
242
+ arch_spinlock_t lock;
243
+ } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED };
244
+
245
+ compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
246
+ BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
247
+ BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32));
248
+ arch_spin_lock(l);
249
+}
250
+
251
+static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
252
+{
253
+ arch_spinlock_t *l = (void *)lock;
254
+
255
+ arch_spin_unlock(l);
256
+}
257
+
258
+#else
259
+
260
+static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
261
+{
262
+ atomic_t *l = (void *)lock;
263
+
264
+ BUILD_BUG_ON(sizeof(*l) != sizeof(*lock));
265
+ do {
266
+ atomic_cond_read_relaxed(l, !VAL);
267
+ } while (atomic_xchg(l, 1));
268
+}
269
+
270
+static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
271
+{
272
+ atomic_t *l = (void *)lock;
273
+
274
+ atomic_set_release(l, 0);
275
+}
276
+
277
+#endif
278
+
279
+static DEFINE_PER_CPU(unsigned long, irqsave_flags);
280
+
281
+notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
282
+{
283
+ unsigned long flags;
284
+
285
+ local_irq_save(flags);
286
+ __bpf_spin_lock(lock);
287
+ __this_cpu_write(irqsave_flags, flags);
288
+ return 0;
289
+}
290
+
291
+const struct bpf_func_proto bpf_spin_lock_proto = {
292
+ .func = bpf_spin_lock,
293
+ .gpl_only = false,
294
+ .ret_type = RET_VOID,
295
+ .arg1_type = ARG_PTR_TO_SPIN_LOCK,
296
+};
297
+
298
+notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
299
+{
300
+ unsigned long flags;
301
+
302
+ flags = __this_cpu_read(irqsave_flags);
303
+ __bpf_spin_unlock(lock);
304
+ local_irq_restore(flags);
305
+ return 0;
306
+}
307
+
308
+const struct bpf_func_proto bpf_spin_unlock_proto = {
309
+ .func = bpf_spin_unlock,
310
+ .gpl_only = false,
311
+ .ret_type = RET_VOID,
312
+ .arg1_type = ARG_PTR_TO_SPIN_LOCK,
313
+};
314
+
315
+void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
316
+ bool lock_src)
317
+{
318
+ struct bpf_spin_lock *lock;
319
+
320
+ if (lock_src)
321
+ lock = src + map->spin_lock_off;
322
+ else
323
+ lock = dst + map->spin_lock_off;
324
+ preempt_disable();
325
+ ____bpf_spin_lock(lock);
326
+ copy_map_value(map, dst, src);
327
+ ____bpf_spin_unlock(lock);
328
+ preempt_enable();
329
+}
330
+
331
+BPF_CALL_0(bpf_jiffies64)
332
+{
333
+ return get_jiffies_64();
334
+}
335
+
336
+const struct bpf_func_proto bpf_jiffies64_proto = {
337
+ .func = bpf_jiffies64,
338
+ .gpl_only = false,
339
+ .ret_type = RET_INTEGER,
340
+};
341
+
195342 #ifdef CONFIG_CGROUPS
196343 BPF_CALL_0(bpf_get_current_cgroup_id)
197344 {
198345 struct cgroup *cgrp = task_dfl_cgroup(current);
199346
200
- return cgrp->kn->id.id;
347
+ return cgroup_id(cgrp);
201348 }
202349
203350 const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
....@@ -206,16 +353,53 @@
206353 .ret_type = RET_INTEGER,
207354 };
208355
209
-DECLARE_PER_CPU(void*, bpf_cgroup_storage);
356
+BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level)
357
+{
358
+ struct cgroup *cgrp = task_dfl_cgroup(current);
359
+ struct cgroup *ancestor;
360
+
361
+ ancestor = cgroup_ancestor(cgrp, ancestor_level);
362
+ if (!ancestor)
363
+ return 0;
364
+ return cgroup_id(ancestor);
365
+}
366
+
367
+const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = {
368
+ .func = bpf_get_current_ancestor_cgroup_id,
369
+ .gpl_only = false,
370
+ .ret_type = RET_INTEGER,
371
+ .arg1_type = ARG_ANYTHING,
372
+};
373
+
374
+#ifdef CONFIG_CGROUP_BPF
375
+DECLARE_PER_CPU(struct bpf_cgroup_storage_info,
376
+ bpf_cgroup_storage_info[BPF_CGROUP_STORAGE_NEST_MAX]);
210377
211378 BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
212379 {
213
- /* map and flags arguments are not used now,
214
- * but provide an ability to extend the API
215
- * for other types of local storages.
216
- * verifier checks that their values are correct.
380
+ /* flags argument is not used now,
381
+ * but provides an ability to extend the API.
382
+ * verifier checks that its value is correct.
217383 */
218
- return (unsigned long) this_cpu_read(bpf_cgroup_storage);
384
+ enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
385
+ struct bpf_cgroup_storage *storage = NULL;
386
+ void *ptr;
387
+ int i;
388
+
389
+ for (i = BPF_CGROUP_STORAGE_NEST_MAX - 1; i >= 0; i--) {
390
+ if (likely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
391
+ continue;
392
+
393
+ storage = this_cpu_read(bpf_cgroup_storage_info[i].storage[stype]);
394
+ break;
395
+ }
396
+
397
+ if (stype == BPF_CGROUP_STORAGE_SHARED)
398
+ ptr = &READ_ONCE(storage->buf)->data[0];
399
+ else
400
+ ptr = this_cpu_ptr(storage->percpu_buf);
401
+
402
+ return (unsigned long)ptr;
219403 }
220404
221405 const struct bpf_func_proto bpf_get_local_storage_proto = {
....@@ -226,3 +410,342 @@
226410 .arg2_type = ARG_ANYTHING,
227411 };
228412 #endif
413
+
414
+#define BPF_STRTOX_BASE_MASK 0x1F
415
+
416
+static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags,
417
+ unsigned long long *res, bool *is_negative)
418
+{
419
+ unsigned int base = flags & BPF_STRTOX_BASE_MASK;
420
+ const char *cur_buf = buf;
421
+ size_t cur_len = buf_len;
422
+ unsigned int consumed;
423
+ size_t val_len;
424
+ char str[64];
425
+
426
+ if (!buf || !buf_len || !res || !is_negative)
427
+ return -EINVAL;
428
+
429
+ if (base != 0 && base != 8 && base != 10 && base != 16)
430
+ return -EINVAL;
431
+
432
+ if (flags & ~BPF_STRTOX_BASE_MASK)
433
+ return -EINVAL;
434
+
435
+ while (cur_buf < buf + buf_len && isspace(*cur_buf))
436
+ ++cur_buf;
437
+
438
+ *is_negative = (cur_buf < buf + buf_len && *cur_buf == '-');
439
+ if (*is_negative)
440
+ ++cur_buf;
441
+
442
+ consumed = cur_buf - buf;
443
+ cur_len -= consumed;
444
+ if (!cur_len)
445
+ return -EINVAL;
446
+
447
+ cur_len = min(cur_len, sizeof(str) - 1);
448
+ memcpy(str, cur_buf, cur_len);
449
+ str[cur_len] = '\0';
450
+ cur_buf = str;
451
+
452
+ cur_buf = _parse_integer_fixup_radix(cur_buf, &base);
453
+ val_len = _parse_integer(cur_buf, base, res);
454
+
455
+ if (val_len & KSTRTOX_OVERFLOW)
456
+ return -ERANGE;
457
+
458
+ if (val_len == 0)
459
+ return -EINVAL;
460
+
461
+ cur_buf += val_len;
462
+ consumed += cur_buf - str;
463
+
464
+ return consumed;
465
+}
466
+
467
+static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags,
468
+ long long *res)
469
+{
470
+ unsigned long long _res;
471
+ bool is_negative;
472
+ int err;
473
+
474
+ err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
475
+ if (err < 0)
476
+ return err;
477
+ if (is_negative) {
478
+ if ((long long)-_res > 0)
479
+ return -ERANGE;
480
+ *res = -_res;
481
+ } else {
482
+ if ((long long)_res < 0)
483
+ return -ERANGE;
484
+ *res = _res;
485
+ }
486
+ return err;
487
+}
488
+
489
+BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags,
490
+ long *, res)
491
+{
492
+ long long _res;
493
+ int err;
494
+
495
+ err = __bpf_strtoll(buf, buf_len, flags, &_res);
496
+ if (err < 0)
497
+ return err;
498
+ if (_res != (long)_res)
499
+ return -ERANGE;
500
+ *res = _res;
501
+ return err;
502
+}
503
+
504
+const struct bpf_func_proto bpf_strtol_proto = {
505
+ .func = bpf_strtol,
506
+ .gpl_only = false,
507
+ .ret_type = RET_INTEGER,
508
+ .arg1_type = ARG_PTR_TO_MEM,
509
+ .arg2_type = ARG_CONST_SIZE,
510
+ .arg3_type = ARG_ANYTHING,
511
+ .arg4_type = ARG_PTR_TO_LONG,
512
+};
513
+
514
+BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags,
515
+ unsigned long *, res)
516
+{
517
+ unsigned long long _res;
518
+ bool is_negative;
519
+ int err;
520
+
521
+ err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
522
+ if (err < 0)
523
+ return err;
524
+ if (is_negative)
525
+ return -EINVAL;
526
+ if (_res != (unsigned long)_res)
527
+ return -ERANGE;
528
+ *res = _res;
529
+ return err;
530
+}
531
+
532
+const struct bpf_func_proto bpf_strtoul_proto = {
533
+ .func = bpf_strtoul,
534
+ .gpl_only = false,
535
+ .ret_type = RET_INTEGER,
536
+ .arg1_type = ARG_PTR_TO_MEM,
537
+ .arg2_type = ARG_CONST_SIZE,
538
+ .arg3_type = ARG_ANYTHING,
539
+ .arg4_type = ARG_PTR_TO_LONG,
540
+};
541
+#endif
542
+
543
+BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino,
544
+ struct bpf_pidns_info *, nsdata, u32, size)
545
+{
546
+ struct task_struct *task = current;
547
+ struct pid_namespace *pidns;
548
+ int err = -EINVAL;
549
+
550
+ if (unlikely(size != sizeof(struct bpf_pidns_info)))
551
+ goto clear;
552
+
553
+ if (unlikely((u64)(dev_t)dev != dev))
554
+ goto clear;
555
+
556
+ if (unlikely(!task))
557
+ goto clear;
558
+
559
+ pidns = task_active_pid_ns(task);
560
+ if (unlikely(!pidns)) {
561
+ err = -ENOENT;
562
+ goto clear;
563
+ }
564
+
565
+ if (!ns_match(&pidns->ns, (dev_t)dev, ino))
566
+ goto clear;
567
+
568
+ nsdata->pid = task_pid_nr_ns(task, pidns);
569
+ nsdata->tgid = task_tgid_nr_ns(task, pidns);
570
+ return 0;
571
+clear:
572
+ memset((void *)nsdata, 0, (size_t) size);
573
+ return err;
574
+}
575
+
576
+const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = {
577
+ .func = bpf_get_ns_current_pid_tgid,
578
+ .gpl_only = false,
579
+ .ret_type = RET_INTEGER,
580
+ .arg1_type = ARG_ANYTHING,
581
+ .arg2_type = ARG_ANYTHING,
582
+ .arg3_type = ARG_PTR_TO_UNINIT_MEM,
583
+ .arg4_type = ARG_CONST_SIZE,
584
+};
585
+
586
+static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
587
+ .func = bpf_get_raw_cpu_id,
588
+ .gpl_only = false,
589
+ .ret_type = RET_INTEGER,
590
+};
591
+
592
+BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map,
593
+ u64, flags, void *, data, u64, size)
594
+{
595
+ if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
596
+ return -EINVAL;
597
+
598
+ return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
599
+}
600
+
601
+const struct bpf_func_proto bpf_event_output_data_proto = {
602
+ .func = bpf_event_output_data,
603
+ .gpl_only = true,
604
+ .ret_type = RET_INTEGER,
605
+ .arg1_type = ARG_PTR_TO_CTX,
606
+ .arg2_type = ARG_CONST_MAP_PTR,
607
+ .arg3_type = ARG_ANYTHING,
608
+ .arg4_type = ARG_PTR_TO_MEM,
609
+ .arg5_type = ARG_CONST_SIZE_OR_ZERO,
610
+};
611
+
612
+BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size,
613
+ const void __user *, user_ptr)
614
+{
615
+ int ret = copy_from_user(dst, user_ptr, size);
616
+
617
+ if (unlikely(ret)) {
618
+ memset(dst, 0, size);
619
+ ret = -EFAULT;
620
+ }
621
+
622
+ return ret;
623
+}
624
+
625
+const struct bpf_func_proto bpf_copy_from_user_proto = {
626
+ .func = bpf_copy_from_user,
627
+ .gpl_only = false,
628
+ .ret_type = RET_INTEGER,
629
+ .arg1_type = ARG_PTR_TO_UNINIT_MEM,
630
+ .arg2_type = ARG_CONST_SIZE_OR_ZERO,
631
+ .arg3_type = ARG_ANYTHING,
632
+};
633
+
634
+BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
635
+{
636
+ if (cpu >= nr_cpu_ids)
637
+ return (unsigned long)NULL;
638
+
639
+ return (unsigned long)per_cpu_ptr((const void __percpu *)ptr, cpu);
640
+}
641
+
642
+const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
643
+ .func = bpf_per_cpu_ptr,
644
+ .gpl_only = false,
645
+ .ret_type = RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL,
646
+ .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID,
647
+ .arg2_type = ARG_ANYTHING,
648
+};
649
+
650
+BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr)
651
+{
652
+ return (unsigned long)this_cpu_ptr((const void __percpu *)percpu_ptr);
653
+}
654
+
655
+const struct bpf_func_proto bpf_this_cpu_ptr_proto = {
656
+ .func = bpf_this_cpu_ptr,
657
+ .gpl_only = false,
658
+ .ret_type = RET_PTR_TO_MEM_OR_BTF_ID,
659
+ .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID,
660
+};
661
+
662
+const struct bpf_func_proto bpf_get_current_task_proto __weak;
663
+const struct bpf_func_proto bpf_probe_read_user_proto __weak;
664
+const struct bpf_func_proto bpf_probe_read_user_str_proto __weak;
665
+const struct bpf_func_proto bpf_probe_read_kernel_proto __weak;
666
+const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak;
667
+
668
+const struct bpf_func_proto *
669
+bpf_base_func_proto(enum bpf_func_id func_id)
670
+{
671
+ switch (func_id) {
672
+ case BPF_FUNC_map_lookup_elem:
673
+ return &bpf_map_lookup_elem_proto;
674
+ case BPF_FUNC_map_update_elem:
675
+ return &bpf_map_update_elem_proto;
676
+ case BPF_FUNC_map_delete_elem:
677
+ return &bpf_map_delete_elem_proto;
678
+ case BPF_FUNC_map_push_elem:
679
+ return &bpf_map_push_elem_proto;
680
+ case BPF_FUNC_map_pop_elem:
681
+ return &bpf_map_pop_elem_proto;
682
+ case BPF_FUNC_map_peek_elem:
683
+ return &bpf_map_peek_elem_proto;
684
+ case BPF_FUNC_get_prandom_u32:
685
+ return &bpf_get_prandom_u32_proto;
686
+ case BPF_FUNC_get_smp_processor_id:
687
+ return &bpf_get_raw_smp_processor_id_proto;
688
+ case BPF_FUNC_get_numa_node_id:
689
+ return &bpf_get_numa_node_id_proto;
690
+ case BPF_FUNC_tail_call:
691
+ return &bpf_tail_call_proto;
692
+ case BPF_FUNC_ktime_get_ns:
693
+ return &bpf_ktime_get_ns_proto;
694
+ case BPF_FUNC_ktime_get_boot_ns:
695
+ return &bpf_ktime_get_boot_ns_proto;
696
+ case BPF_FUNC_ringbuf_output:
697
+ return &bpf_ringbuf_output_proto;
698
+ case BPF_FUNC_ringbuf_reserve:
699
+ return &bpf_ringbuf_reserve_proto;
700
+ case BPF_FUNC_ringbuf_submit:
701
+ return &bpf_ringbuf_submit_proto;
702
+ case BPF_FUNC_ringbuf_discard:
703
+ return &bpf_ringbuf_discard_proto;
704
+ case BPF_FUNC_ringbuf_query:
705
+ return &bpf_ringbuf_query_proto;
706
+ default:
707
+ break;
708
+ }
709
+
710
+ if (!bpf_capable())
711
+ return NULL;
712
+
713
+ switch (func_id) {
714
+ case BPF_FUNC_spin_lock:
715
+ return &bpf_spin_lock_proto;
716
+ case BPF_FUNC_spin_unlock:
717
+ return &bpf_spin_unlock_proto;
718
+ case BPF_FUNC_jiffies64:
719
+ return &bpf_jiffies64_proto;
720
+ case BPF_FUNC_per_cpu_ptr:
721
+ return &bpf_per_cpu_ptr_proto;
722
+ case BPF_FUNC_this_cpu_ptr:
723
+ return &bpf_this_cpu_ptr_proto;
724
+ default:
725
+ break;
726
+ }
727
+
728
+ if (!perfmon_capable())
729
+ return NULL;
730
+
731
+ switch (func_id) {
732
+ case BPF_FUNC_trace_printk:
733
+ return bpf_get_trace_printk_proto();
734
+ case BPF_FUNC_get_current_task:
735
+ return &bpf_get_current_task_proto;
736
+ case BPF_FUNC_probe_read_user:
737
+ return &bpf_probe_read_user_proto;
738
+ case BPF_FUNC_probe_read_kernel:
739
+ return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
740
+ NULL : &bpf_probe_read_kernel_proto;
741
+ case BPF_FUNC_probe_read_user_str:
742
+ return &bpf_probe_read_user_str_proto;
743
+ case BPF_FUNC_probe_read_kernel_str:
744
+ return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
745
+ NULL : &bpf_probe_read_kernel_str_proto;
746
+ case BPF_FUNC_snprintf_btf:
747
+ return &bpf_snprintf_btf_proto;
748
+ default:
749
+ return NULL;
750
+ }
751
+}