-
Notifications
You must be signed in to change notification settings - Fork 1.4k
/
Copy pathos-interface.c
2616 lines (2223 loc) · 63.2 KB
/
os-interface.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
#include "nv-caps-imex.h"
#include "nv-time.h"
#include <linux/mmzone.h>
#include <linux/numa.h>
#include <linux/cpuset.h>
#include <linux/pid.h>
#if defined(CONFIG_LOCKDEP)
#include <linux/lockdep.h>
#endif // CONFIG_LOCKDEP
extern char *NVreg_TemporaryFilePath;
#define MAX_ERROR_STRING 528
static char nv_error_string[MAX_ERROR_STRING];
static NV_DEFINE_SPINLOCK(nv_error_string_lock);
extern nv_linux_state_t nv_ctl_device;
extern nv_kthread_q_t nv_kthread_q;
NvU32 os_page_size = PAGE_SIZE;
NvU64 os_page_mask = NV_PAGE_MASK;
NvU8 os_page_shift = PAGE_SHIFT;
NvBool os_cc_enabled = 0;
NvBool os_cc_tdx_enabled = 0;
#if defined(CONFIG_DMA_SHARED_BUFFER)
NvBool os_dma_buf_enabled = NV_TRUE;
#else
NvBool os_dma_buf_enabled = NV_FALSE;
#endif // CONFIG_DMA_SHARED_BUFFER
NvBool os_imex_channel_is_supported = NV_TRUE;
void NV_API_CALL os_disable_console_access(void)
{
console_lock();
}
void NV_API_CALL os_enable_console_access(void)
{
console_unlock();
}
typedef struct semaphore os_mutex_t;
//
// os_alloc_mutex - Allocate the RM mutex
//
// ppMutex - filled in with pointer to opaque structure to mutex data type
//
NV_STATUS NV_API_CALL os_alloc_mutex
(
void **ppMutex
)
{
NV_STATUS rmStatus;
os_mutex_t *os_mutex;
rmStatus = os_alloc_mem(ppMutex, sizeof(os_mutex_t));
if (rmStatus != NV_OK)
{
nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate mutex!\n");
return rmStatus;
}
os_mutex = (os_mutex_t *)*ppMutex;
NV_INIT_MUTEX(os_mutex);
return NV_OK;
}
//
// os_free_mutex - Free resources associated with mutex allocated
// via os_alloc_mutex above.
//
// pMutex - Pointer to opaque structure to mutex data type
//
void NV_API_CALL os_free_mutex
(
void *pMutex
)
{
os_mutex_t *os_mutex = (os_mutex_t *)pMutex;
if (os_mutex != NULL)
{
os_free_mem(pMutex);
}
}
//
// pMutex - Pointer to opaque structure to mutex data type
//
NV_STATUS NV_API_CALL os_acquire_mutex
(
void *pMutex
)
{
os_mutex_t *os_mutex = (os_mutex_t *)pMutex;
if (!NV_MAY_SLEEP())
{
return NV_ERR_INVALID_REQUEST;
}
down(os_mutex);
return NV_OK;
}
NV_STATUS NV_API_CALL os_cond_acquire_mutex
(
void * pMutex
)
{
os_mutex_t *os_mutex = (os_mutex_t *)pMutex;
if (!NV_MAY_SLEEP())
{
return NV_ERR_INVALID_REQUEST;
}
if (down_trylock(os_mutex))
{
return NV_ERR_TIMEOUT_RETRY;
}
return NV_OK;
}
void NV_API_CALL os_release_mutex
(
void *pMutex
)
{
os_mutex_t *os_mutex = (os_mutex_t *)pMutex;
up(os_mutex);
}
typedef struct semaphore os_semaphore_t;
void* NV_API_CALL os_alloc_semaphore
(
NvU32 initialValue
)
{
NV_STATUS rmStatus;
os_semaphore_t *os_sema;
rmStatus = os_alloc_mem((void *)&os_sema, sizeof(os_semaphore_t));
if (rmStatus != NV_OK)
{
nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate semaphore!\n");
return NULL;
}
sema_init(os_sema, initialValue);
return (void *)os_sema;
}
void NV_API_CALL os_free_semaphore
(
void *pSema
)
{
os_semaphore_t *os_sema = (os_semaphore_t *)pSema;
os_free_mem(os_sema);
}
NV_STATUS NV_API_CALL os_acquire_semaphore
(
void *pSema
)
{
os_semaphore_t *os_sema = (os_semaphore_t *)pSema;
if (!NV_MAY_SLEEP())
{
return NV_ERR_INVALID_REQUEST;
}
down(os_sema);
return NV_OK;
}
NV_STATUS NV_API_CALL os_cond_acquire_semaphore
(
void * pSema
)
{
os_semaphore_t *os_sema = (os_semaphore_t *)pSema;
//
// NOTE: down_trylock() is safe to call from IRQ, se we don't need an
// NV_MAY_SLEEP() check here. We do check it in os_cond_acquire_mutex(),
// even though it is also calling down_trylock(), since that keeps it
// in line with the kernel's 'struct mutex' API.
//
if (down_trylock(os_sema))
{
return NV_ERR_TIMEOUT_RETRY;
}
return NV_OK;
}
NV_STATUS NV_API_CALL os_release_semaphore
(
void *pSema
)
{
os_semaphore_t *os_sema = (os_semaphore_t *)pSema;
up(os_sema);
return NV_OK;
}
typedef struct
{
struct rw_semaphore sem;
#if defined(CONFIG_LOCKDEP)
/**
* A key of lock class. It would be registered to Lockdep validator so all
* instances' usages and dependencies will contribute to constructing correct
* locking rules and this lock will be tracked by the Lockdep validator.
*
*/
struct lock_class_key key;
#endif // CONFIG_LOCKDEP
} os_rwlock_t;
void* NV_API_CALL os_alloc_rwlock(void)
{
os_rwlock_t *os_rwlock = NULL;
NV_STATUS rmStatus = os_alloc_mem((void *)&os_rwlock, sizeof(os_rwlock_t));
if (rmStatus != NV_OK)
{
nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate a struct os_rwlock_t!\n");
return NULL;
}
init_rwsem(&os_rwlock->sem);
#if defined(CONFIG_LOCKDEP)
// Register the dynamically allocated key to Lockdep.
lockdep_register_key(&os_rwlock->key);
lockdep_set_class(&os_rwlock->sem, &os_rwlock->key);
#endif // CONFIG_LOCKDEP
return os_rwlock;
}
void NV_API_CALL os_free_rwlock(void *pRwLock)
{
os_rwlock_t *os_rwlock = (os_rwlock_t *)pRwLock;
#if defined(CONFIG_LOCKDEP)
// Unregister the dynamically allocated key.
lockdep_unregister_key(&os_rwlock->key);
#endif // CONFIG_LOCKDEP
os_free_mem(os_rwlock);
}
NV_STATUS NV_API_CALL os_acquire_rwlock_read(void *pRwLock)
{
os_rwlock_t *os_rwlock = (os_rwlock_t *)pRwLock;
if (!NV_MAY_SLEEP())
{
return NV_ERR_INVALID_REQUEST;
}
down_read(&os_rwlock->sem);
return NV_OK;
}
NV_STATUS NV_API_CALL os_acquire_rwlock_write(void *pRwLock)
{
os_rwlock_t *os_rwlock = (os_rwlock_t *)pRwLock;
if (!NV_MAY_SLEEP())
{
return NV_ERR_INVALID_REQUEST;
}
down_write(&os_rwlock->sem);
return NV_OK;
}
NV_STATUS NV_API_CALL os_cond_acquire_rwlock_read(void *pRwLock)
{
os_rwlock_t *os_rwlock = (os_rwlock_t *)pRwLock;
if (down_read_trylock(&os_rwlock->sem))
{
return NV_ERR_TIMEOUT_RETRY;
}
return NV_OK;
}
NV_STATUS NV_API_CALL os_cond_acquire_rwlock_write(void *pRwLock)
{
os_rwlock_t *os_rwlock = (os_rwlock_t *)pRwLock;
if (down_write_trylock(&os_rwlock->sem))
{
return NV_ERR_TIMEOUT_RETRY;
}
return NV_OK;
}
void NV_API_CALL os_release_rwlock_read(void *pRwLock)
{
os_rwlock_t *os_rwlock = (os_rwlock_t *)pRwLock;
up_read(&os_rwlock->sem);
}
void NV_API_CALL os_release_rwlock_write(void *pRwLock)
{
os_rwlock_t *os_rwlock = (os_rwlock_t *)pRwLock;
up_write(&os_rwlock->sem);
}
NvBool NV_API_CALL os_semaphore_may_sleep(void)
{
return NV_MAY_SLEEP();
}
NvBool NV_API_CALL os_is_isr(void)
{
return (in_irq());
}
// return TRUE if the caller is the super-user
NvBool NV_API_CALL os_is_administrator(void)
{
return NV_IS_SUSER();
}
NvBool NV_API_CALL os_allow_priority_override(void)
{
return capable(CAP_SYS_NICE);
}
char* NV_API_CALL os_string_copy(
char *dst,
const char *src
)
{
return strcpy(dst, src);
}
NvU32 NV_API_CALL os_string_length(
const char* str
)
{
return strlen(str);
}
NvU32 NV_API_CALL os_strtoul(const char *str, char **endp, NvU32 base)
{
return (NvU32)simple_strtoul(str, endp, base);
}
NvS32 NV_API_CALL os_string_compare(const char *str1, const char *str2)
{
return strcmp(str1, str2);
}
void *os_mem_copy_custom(
void *dstPtr,
const void *srcPtr,
NvU32 length
)
{
void *ret = dstPtr;
NvU32 dwords, bytes = length;
NvU8 *dst = dstPtr;
const NvU8 *src = srcPtr;
if ((length >= 128) &&
(((NvUPtr)dst & 3) == 0) & (((NvUPtr)src & 3) == 0))
{
dwords = (length / sizeof(NvU32));
bytes = (length % sizeof(NvU32));
while (dwords != 0)
{
*(NvU32 *)dst = *(const NvU32 *)src;
dst += sizeof(NvU32);
src += sizeof(NvU32);
dwords--;
}
}
while (bytes != 0)
{
*dst = *src;
dst++;
src++;
bytes--;
}
return ret;
}
void *NV_API_CALL os_mem_copy(
void *dst,
const void *src,
NvU32 length
)
{
#if defined(NVCPU_AARCH64)
/*
* TODO: Remove once memset/memcpy restructure is complete
*
* When performing memcpy for memory mapped as device, memcpy_[to/from]io
* must be used. WAR to check the source and destination to determine the
* correct memcpy_io to use.
*
* This WAR is limited to just aarch64 for now because the address range used
* to map ioremap and vmalloc is different on ppc64le, and is_vmalloc_addr()
* does not correctly handle this. is_ioremap_addr() is needed instead. This
* will have to be addressed when reorganizing RM to use the new memset model.
*/
if (is_vmalloc_addr(dst) && !is_vmalloc_addr(src))
{
memcpy_toio(dst, src, length);
return dst;
}
else if (!is_vmalloc_addr(dst) && is_vmalloc_addr(src))
{
memcpy_fromio(dst, src, length);
return dst;
}
else if (is_vmalloc_addr(dst) && is_vmalloc_addr(src))
{
return os_mem_copy_custom(dst, src, length);
}
else
#endif
{
#if defined(CONFIG_CC_OPTIMIZE_FOR_SIZE)
/*
* When the kernel is configured with CC_OPTIMIZE_FOR_SIZE=y, Kbuild uses
* -Os universally. With -Os, GCC will aggressively inline builtins, even
* if -fno-builtin is specified, including memcpy with a tiny byte-copy
* loop on x86 (rep movsb). This is horrible for performance - a strict
* dword copy is much faster - so when we detect this case, just provide
* our own implementation.
*/
return os_mem_copy_custom(dst, src, length);
#else
/*
* Generally speaking, the kernel-provided memcpy will be the fastest,
* (optimized much better for the target architecture than the above
* loop), so we want to use that whenever we can get to it.
*/
return memcpy(dst, src, length);
#endif
}
}
NV_STATUS NV_API_CALL os_memcpy_from_user(
void *to,
const void *from,
NvU32 n
)
{
return (NV_COPY_FROM_USER(to, from, n) ? NV_ERR_INVALID_ADDRESS : NV_OK);
}
NV_STATUS NV_API_CALL os_memcpy_to_user(
void *to,
const void *from,
NvU32 n
)
{
return (NV_COPY_TO_USER(to, from, n) ? NV_ERR_INVALID_ADDRESS : NV_OK);
}
void* NV_API_CALL os_mem_set(
void *dst,
NvU8 c,
NvU32 length
)
{
#if defined(NVCPU_AARCH64)
/*
* TODO: Remove once memset/memcpy restructure is complete
*
* WAR to check the destination to determine if the memory is of type Device
* or Normal, and use the correct memset.
*
* This WAR is limited to just aarch64 for now because the address range used
* to map ioremap and vmalloc is different on ppc64le, and is_vmalloc_addr()
* does not correctly handle this. is_ioremap_addr() is needed instead. This
* will have to be addressed when reorganizing RM to use the new memset model.
*/
if (is_vmalloc_addr(dst))
{
memset_io(dst, (int)c, length);
return dst;
}
else
#endif
return memset(dst, (int)c, length);
}
NvS32 NV_API_CALL os_mem_cmp(
const NvU8 *buf0,
const NvU8* buf1,
NvU32 length
)
{
return memcmp(buf0, buf1, length);
}
/*
* Operating System Memory Functions
*
* There are 2 interesting aspects of resource manager memory allocations
* that need special consideration on Linux:
*
* 1. They are typically very large, (e.g. single allocations of 164KB)
*
* 2. The resource manager assumes that it can safely allocate memory in
* interrupt handlers.
*
* The first requires that we call vmalloc, the second kmalloc. We decide
* which one to use at run time, based on the size of the request and the
* context. Allocations larger than 128KB require vmalloc, in the context
* of an ISR they fail.
*/
#if defined(NV_VGX_HYPER)
/*
* Citrix Hypervisor-8.0 Dom0 sysmem ends up getting fragmented because
* of which high-order kmalloc allocations fail. We try to avoid it by
* requesting allocations not larger than 8K.
*
* KVM will be affected low memory pressure situation a lot,
* particularly if hugetlbfs hugepages are being used. Hence, 8K applies
* here too.
*/
#define KMALLOC_LIMIT 8192
#else
#define KMALLOC_LIMIT 131072
#endif
#define VMALLOC_ALLOCATION_SIZE_FLAG (1 << 0)
NV_STATUS NV_API_CALL os_alloc_mem(
void **address,
NvU64 size
)
{
NvU64 original_size = size;
unsigned long alloc_size;
if (address == NULL)
return NV_ERR_INVALID_ARGUMENT;
*address = NULL;
NV_MEM_TRACKING_PAD_SIZE(size);
// check for integer overflow on size
if (size < original_size)
return NV_ERR_INVALID_ARGUMENT;
//
// NV_KMALLOC, nv_vmalloc take an input of 4 bytes in x86. To avoid
// truncation and wrong allocation, below check is required.
//
alloc_size = size;
if (alloc_size != size)
return NV_ERR_INVALID_PARAMETER;
if (!NV_MAY_SLEEP())
{
if (alloc_size <= KMALLOC_LIMIT)
NV_KMALLOC_ATOMIC(*address, alloc_size);
}
else
{
if (alloc_size <= KMALLOC_LIMIT)
{
NV_KMALLOC_NO_OOM(*address, alloc_size);
}
if (*address == NULL)
{
*address = nv_vmalloc(alloc_size);
alloc_size |= VMALLOC_ALLOCATION_SIZE_FLAG;
}
}
NV_MEM_TRACKING_HIDE_SIZE(address, alloc_size);
return ((*address != NULL) ? NV_OK : NV_ERR_NO_MEMORY);
}
void NV_API_CALL os_free_mem(void *address)
{
NvU64 size;
NV_MEM_TRACKING_RETRIEVE_SIZE(address, size);
if (size & VMALLOC_ALLOCATION_SIZE_FLAG)
{
size &= ~VMALLOC_ALLOCATION_SIZE_FLAG;
nv_vfree(address, size);
}
else
NV_KFREE(address, size);
}
/*****************************************************************************
*
* Name: osGetCurrentTime
*
*****************************************************************************/
NV_STATUS NV_API_CALL os_get_current_time(
NvU32 *seconds,
NvU32 *useconds
)
{
struct timespec64 tm;
ktime_get_real_ts64(&tm);
*seconds = tm.tv_sec;
*useconds = tm.tv_nsec / NSEC_PER_USEC;
return NV_OK;
}
//
// Get the High resolution tick count of the system uptime
//
NvU64 NV_API_CALL os_get_current_tick_hr(void)
{
struct timespec64 tm;
ktime_get_raw_ts64(&tm);
return (NvU64) timespec64_to_ns(&tm);
}
#if BITS_PER_LONG >= 64
NvU64 NV_API_CALL os_get_current_tick(void)
{
#if defined(NV_JIFFIES_TO_TIMESPEC_PRESENT)
struct timespec ts;
jiffies_to_timespec(jiffies, &ts);
return (NvU64) timespec_to_ns(&ts);
#else
struct timespec64 ts;
jiffies_to_timespec64(jiffies, &ts);
return (NvU64) timespec64_to_ns(&ts);
#endif
}
NvU64 NV_API_CALL os_get_tick_resolution(void)
{
return (NvU64)jiffies_to_usecs(1) * NSEC_PER_USEC;
}
#else
NvU64 NV_API_CALL os_get_current_tick(void)
{
/*
* 'jiffies' overflows regularly on 32-bit builds (unsigned long is 4 bytes
* instead of 8 bytes), so it's unwise to build a tick counter on it, since
* the rest of the Resman assumes the 'tick' returned from this function is
* monotonically increasing and never overflows.
*
* Instead, use the previous implementation that we've lived with since the
* beginning, which uses system clock time to calculate the tick. This is
* subject to problems if the system clock time changes dramatically
* (more than a second or so) while the Resman is actively tracking a
* timeout.
*/
NvU32 seconds, useconds;
(void) os_get_current_time(&seconds, &useconds);
return ((NvU64)seconds * NSEC_PER_SEC +
(NvU64)useconds * NSEC_PER_USEC);
}
NvU64 NV_API_CALL os_get_tick_resolution(void)
{
/*
* os_get_current_tick() uses os_get_current_time(), which has
* microsecond resolution.
*/
return 1000ULL;
}
#endif
//---------------------------------------------------------------------------
//
// Misc services.
//
//---------------------------------------------------------------------------
NV_STATUS NV_API_CALL os_delay_us(NvU32 MicroSeconds)
{
return nv_sleep_us(MicroSeconds);
}
NV_STATUS NV_API_CALL os_delay(NvU32 MilliSeconds)
{
return nv_sleep_ms(MilliSeconds);
}
NvU64 NV_API_CALL os_get_cpu_frequency(void)
{
NvU64 cpu_hz = 0;
#if defined(CONFIG_CPU_FREQ)
cpu_hz = (cpufreq_get(0) * 1000);
#elif defined(NVCPU_X86_64)
NvU64 tsc[2];
tsc[0] = nv_rdtsc();
mdelay(250);
tsc[1] = nv_rdtsc();
cpu_hz = ((tsc[1] - tsc[0]) * 4);
#endif
return cpu_hz;
}
NvU32 NV_API_CALL os_get_current_process(void)
{
return NV_GET_CURRENT_PROCESS();
}
void NV_API_CALL os_get_current_process_name(char *buf, NvU32 len)
{
task_lock(current);
strncpy(buf, current->comm, len - 1);
buf[len - 1] = '\0';
task_unlock(current);
}
NV_STATUS NV_API_CALL os_get_current_thread(NvU64 *threadId)
{
if (in_interrupt())
*threadId = 0;
else
*threadId = (NvU64) current->pid;
return NV_OK;
}
/*******************************************************************************/
/* */
/* Debug and logging utilities follow */
/* */
/*******************************************************************************/
// The current debug display level (default to maximum debug level)
NvU32 cur_debuglevel = 0xffffffff;
/*
* The binary core of RM (nv-kernel.o) calls both out_string, and nv_printf.
*/
inline void NV_API_CALL out_string(const char *str)
{
printk("%s", str);
}
/*
* nv_printf() prints to the kernel log for the driver.
* Returns the number of characters written.
*/
int NV_API_CALL nv_printf(NvU32 debuglevel, const char *printf_format, ...)
{
va_list arglist;
int chars_written = 0;
if (debuglevel >= ((cur_debuglevel >> 4) & 0x3))
{
size_t length;
unsigned long flags;
// When printk is called to extend the output of the previous line
// (i.e. when the previous line did not end in \n), the printk call
// must contain KERN_CONT. Older kernels still print the line
// correctly, but KERN_CONT was technically always required.
// This means that every call to printk() needs to have a KERN_xxx
// prefix. The only way to get this is to rebuild the format string
// into a new buffer, with a KERN_xxx prefix prepended.
// Unfortunately, we can't guarantee that two calls to nv_printf()
// won't be interrupted by a printk from another driver. So to be
// safe, we always append KERN_CONT. It's still technically wrong,
// but it works.
// The long-term fix is to modify all NV_PRINTF-ish calls so that the
// string always contains only one \n (at the end) and NV_PRINTF_EX
// is deleted. But that is unlikely to ever happen.
length = strlen(printf_format);
if (length < 1)
return 0;
NV_SPIN_LOCK_IRQSAVE(&nv_error_string_lock, flags);
// KERN_CONT changed in the 3.6 kernel, so we can't assume its
// composition or size.
memcpy(nv_error_string, KERN_CONT, sizeof(KERN_CONT) - 1);
memcpy(nv_error_string + sizeof(KERN_CONT) - 1, printf_format, length + 1);
va_start(arglist, printf_format);
chars_written = vprintk(nv_error_string, arglist);
va_end(arglist);
NV_SPIN_UNLOCK_IRQRESTORE(&nv_error_string_lock, flags);
}
return chars_written;
}
NvS32 NV_API_CALL os_snprintf(char *buf, NvU32 size, const char *fmt, ...)
{
va_list arglist;
int chars_written;
va_start(arglist, fmt);
chars_written = vsnprintf(buf, size, fmt, arglist);
va_end(arglist);
return chars_written;
}
NvS32 NV_API_CALL os_vsnprintf(char *buf, NvU32 size, const char *fmt, va_list arglist)
{
return vsnprintf(buf, size, fmt, arglist);
}
void NV_API_CALL os_log_error(const char *fmt, va_list ap)
{
unsigned long flags;
NV_SPIN_LOCK_IRQSAVE(&nv_error_string_lock, flags);
vsnprintf(nv_error_string, MAX_ERROR_STRING, fmt, ap);
nv_error_string[MAX_ERROR_STRING - 1] = 0;
printk(KERN_ERR "%s", nv_error_string);
NV_SPIN_UNLOCK_IRQRESTORE(&nv_error_string_lock, flags);
}
void NV_API_CALL os_io_write_byte(
NvU32 address,
NvU8 value
)
{
outb(value, address);
}
void NV_API_CALL os_io_write_word(
NvU32 address,
NvU16 value
)
{
outw(value, address);
}
void NV_API_CALL os_io_write_dword(
NvU32 address,
NvU32 value
)
{
outl(value, address);
}
NvU8 NV_API_CALL os_io_read_byte(
NvU32 address
)
{
return inb(address);
}
NvU16 NV_API_CALL os_io_read_word(
NvU32 address
)
{
return inw(address);
}
NvU32 NV_API_CALL os_io_read_dword(
NvU32 address
)
{
return inl(address);
}
static NvBool NV_API_CALL xen_support_fully_virtualized_kernel(void)
{
#if defined(NV_XEN_SUPPORT_FULLY_VIRTUALIZED_KERNEL)
return (os_is_vgx_hyper());
#endif
return NV_FALSE;
}
void* NV_API_CALL os_map_kernel_space(
NvU64 start,
NvU64 size_bytes,
NvU32 mode
)
{
void *vaddr;
if (!xen_support_fully_virtualized_kernel() && start == 0)
{
if (mode != NV_MEMORY_CACHED)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: os_map_kernel_space: won't map address 0x%0llx UC!\n", start);
return NULL;
}
else
return (void *)PAGE_OFFSET;
}
if (!NV_MAY_SLEEP())
{
nv_printf(NV_DBG_ERRORS,
"NVRM: os_map_kernel_space: can't map 0x%0llx, invalid context!\n", start);
os_dbg_breakpoint();
return NULL;
}
switch (mode)
{
case NV_MEMORY_CACHED:
vaddr = nv_ioremap_cache(start, size_bytes);
break;
case NV_MEMORY_WRITECOMBINED:
vaddr = rm_disable_iomap_wc() ?
nv_ioremap_nocache(start, size_bytes) :
nv_ioremap_wc(start, size_bytes);
break;
case NV_MEMORY_UNCACHED:
case NV_MEMORY_DEFAULT:
vaddr = nv_ioremap_nocache(start, size_bytes);
break;
default:
nv_printf(NV_DBG_ERRORS,
"NVRM: os_map_kernel_space: unsupported mode!\n");
return NULL;
}
return vaddr;
}
void NV_API_CALL os_unmap_kernel_space(