| File: | amd64/amd64/mem.c |
| Warning: | line 216, column 13 Copies out a struct with uncleared padding (>= 4 bytes) |
| 1 | /*- | |||
| 2 | * Copyright (c) 1988 University of Utah. | |||
| 3 | * Copyright (c) 1982, 1986, 1990 The Regents of the University of California. | |||
| 4 | * All rights reserved. | |||
| 5 | * | |||
| 6 | * This code is derived from software contributed to Berkeley by | |||
| 7 | * the Systems Programming Group of the University of Utah Computer | |||
| 8 | * Science Department, and code derived from software contributed to | |||
| 9 | * Berkeley by William Jolitz. | |||
| 10 | * | |||
| 11 | * Redistribution and use in source and binary forms, with or without | |||
| 12 | * modification, are permitted provided that the following conditions | |||
| 13 | * are met: | |||
| 14 | * 1. Redistributions of source code must retain the above copyright | |||
| 15 | * notice, this list of conditions and the following disclaimer. | |||
| 16 | * 2. Redistributions in binary form must reproduce the above copyright | |||
| 17 | * notice, this list of conditions and the following disclaimer in the | |||
| 18 | * documentation and/or other materials provided with the distribution. | |||
| 19 | * 4. Neither the name of the University nor the names of its contributors | |||
| 20 | * may be used to endorse or promote products derived from this software | |||
| 21 | * without specific prior written permission. | |||
| 22 | * | |||
| 23 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |||
| 24 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |||
| 25 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |||
| 26 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |||
| 27 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |||
| 28 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |||
| 29 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |||
| 30 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |||
| 31 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |||
| 32 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |||
| 33 | * SUCH DAMAGE. | |||
| 34 | * | |||
| 35 | * from: Utah $Hdr: mem.c 1.13 89/10/08$ | |||
| 36 | * from: @(#)mem.c 7.2 (Berkeley) 5/9/91 | |||
| 37 | */ | |||
| 38 | ||||
| 39 | #include <sys/cdefs.h> | |||
| 40 | __FBSDID("$FreeBSD: releng/11.0/sys/amd64/amd64/mem.c 277055 2015-01-12 08:58:07Z kib $")__asm__(".ident\t\"" "$FreeBSD: releng/11.0/sys/amd64/amd64/mem.c 277055 2015-01-12 08:58:07Z kib $" "\""); | |||
| 41 | ||||
| 42 | /* | |||
| 43 | * Memory special file | |||
| 44 | */ | |||
| 45 | ||||
| 46 | #include <sys/param.h> | |||
| 47 | #include <sys/conf.h> | |||
| 48 | #include <sys/fcntl.h> | |||
| 49 | #include <sys/ioccom.h> | |||
| 50 | #include <sys/kernel.h> | |||
| 51 | #include <sys/lock.h> | |||
| 52 | #include <sys/malloc.h> | |||
| 53 | #include <sys/memrange.h> | |||
| 54 | #include <sys/module.h> | |||
| 55 | #include <sys/mutex.h> | |||
| 56 | #include <sys/proc.h> | |||
| 57 | #include <sys/signalvar.h> | |||
| 58 | #include <sys/systm.h> | |||
| 59 | #include <sys/uio.h> | |||
| 60 | ||||
| 61 | #include <machine/md_var.h> | |||
| 62 | #include <machine/specialreg.h> | |||
| 63 | #include <machine/vmparam.h> | |||
| 64 | ||||
| 65 | #include <vm/vm.h> | |||
| 66 | #include <vm/pmap.h> | |||
| 67 | #include <vm/vm_extern.h> | |||
| 68 | ||||
| 69 | #include <machine/memdev.h> | |||
| 70 | ||||
| 71 | /* | |||
| 72 | * Used in /dev/mem drivers and elsewhere | |||
| 73 | */ | |||
| 74 | MALLOC_DEFINE(M_MEMDESC, "memdesc", "memory range descriptors")struct malloc_type M_MEMDESC[1] = { { ((void *)0), 877983977, "memdesc", ((void *)0) } }; static struct sysinit M_MEMDESC_init_sys_init = { SI_SUB_KMEM, SI_ORDER_THIRD, (sysinit_cfunc_t)(sysinit_nfunc_t )malloc_init, ((void *)(M_MEMDESC)) }; __asm__(".globl " "__start_set_sysinit_set" ); __asm__(".globl " "__stop_set_sysinit_set"); static void const * const __set_sysinit_set_sym_M_MEMDESC_init_sys_init __attribute__ ((__section__("set_" "sysinit_set"))) __attribute__((__used__ )) = &(M_MEMDESC_init_sys_init); static struct sysinit M_MEMDESC_uninit_sys_uninit = { SI_SUB_KMEM, SI_ORDER_ANY, (sysinit_cfunc_t)(sysinit_nfunc_t )malloc_uninit, ((void *)(M_MEMDESC)) }; __asm__(".globl " "__start_set_sysuninit_set" ); __asm__(".globl " "__stop_set_sysuninit_set"); static void const * const __set_sysuninit_set_sym_M_MEMDESC_uninit_sys_uninit __attribute__((__section__("set_" "sysuninit_set"))) __attribute__ ((__used__)) = &(M_MEMDESC_uninit_sys_uninit); | |||
| 75 | ||||
| 76 | /* ARGSUSED */ | |||
| 77 | int | |||
| 78 | memrw(struct cdev *dev, struct uio *uio, int flags) | |||
| 79 | { | |||
| 80 | struct iovec *iov; | |||
| 81 | void *p; | |||
| 82 | ssize_t orig_resid; | |||
| 83 | u_long v, vd; | |||
| 84 | u_int c; | |||
| 85 | int error; | |||
| 86 | ||||
| 87 | error = 0; | |||
| 88 | orig_resid = uio->uio_resid; | |||
| 89 | while (uio->uio_resid > 0 && error == 0) { | |||
| 90 | iov = uio->uio_iov; | |||
| 91 | if (iov->iov_len == 0) { | |||
| 92 | uio->uio_iov++; | |||
| 93 | uio->uio_iovcnt--; | |||
| 94 | if (uio->uio_iovcnt < 0) | |||
| 95 | panic("memrw"); | |||
| 96 | continue; | |||
| 97 | } | |||
| 98 | v = uio->uio_offset; | |||
| 99 | c = ulmin(iov->iov_len, PAGE_SIZE(1<<12) - (u_int)(v & PAGE_MASK((1<<12)-1))); | |||
| 100 | ||||
| 101 | switch (dev2unit(dev)((dev)->si_drv0)) { | |||
| 102 | case CDEV_MINOR_KMEM1: | |||
| 103 | /* | |||
| 104 | * Since c is clamped to be less or equal than | |||
| 105 | * PAGE_SIZE, the uiomove() call does not | |||
| 106 | * access past the end of the direct map. | |||
| 107 | */ | |||
| 108 | if (v >= DMAP_MIN_ADDRESS( ((unsigned long)-1 << 47) | ((unsigned long)(((((((1<< 12)/(sizeof (pml4_entry_t)))-4)-8)/(8))*(8))) << 39) | ( (unsigned long)(0) << 30) | ((unsigned long)(0) << 21) | ((unsigned long)(0) << 12)) && | |||
| 109 | v < DMAP_MIN_ADDRESS( ((unsigned long)-1 << 47) | ((unsigned long)(((((((1<< 12)/(sizeof (pml4_entry_t)))-4)-8)/(8))*(8))) << 39) | ( (unsigned long)(0) << 30) | ((unsigned long)(0) << 21) | ((unsigned long)(0) << 12)) + dmaplimit) { | |||
| 110 | error = uiomove((void *)v, c, uio); | |||
| 111 | break; | |||
| 112 | } | |||
| 113 | ||||
| 114 | if (!kernacc((void *)v, c, uio->uio_rw == UIO_READ ? | |||
| 115 | VM_PROT_READ((vm_prot_t) 0x01) : VM_PROT_WRITE((vm_prot_t) 0x02))) { | |||
| 116 | error = EFAULT14; | |||
| 117 | break; | |||
| 118 | } | |||
| 119 | ||||
| 120 | /* | |||
| 121 | * If the extracted address is not accessible | |||
| 122 | * through the direct map, then we make a | |||
| 123 | * private (uncached) mapping because we can't | |||
| 124 | * depend on the existing kernel mapping | |||
| 125 | * remaining valid until the completion of | |||
| 126 | * uiomove(). | |||
| 127 | * | |||
| 128 | * XXX We cannot provide access to the | |||
| 129 | * physical page 0 mapped into KVA. | |||
| 130 | */ | |||
| 131 | v = pmap_extract(kernel_pmap(&kernel_pmap_store), v); | |||
| 132 | if (v == 0) { | |||
| 133 | error = EFAULT14; | |||
| 134 | break; | |||
| 135 | } | |||
| 136 | /* FALLTHROUGH */ | |||
| 137 | case CDEV_MINOR_MEM0: | |||
| 138 | if (v < dmaplimit) { | |||
| 139 | vd = PHYS_TO_DMAP(v)({ do { } while (0); (v) | ( ((unsigned long)-1 << 47) | ((unsigned long)(((((((1<<12)/(sizeof (pml4_entry_t))) -4)-8)/(8))*(8))) << 39) | ((unsigned long)(0) << 30) | ((unsigned long)(0) << 21) | ((unsigned long)(0) << 12)); }); | |||
| 140 | error = uiomove((void *)vd, c, uio); | |||
| 141 | break; | |||
| 142 | } | |||
| 143 | if (v >= (1ULL << cpu_maxphyaddr)) { | |||
| 144 | error = EFAULT14; | |||
| 145 | break; | |||
| 146 | } | |||
| 147 | p = pmap_mapdev(v, PAGE_SIZE(1<<12)); | |||
| 148 | error = uiomove(p, c, uio); | |||
| 149 | pmap_unmapdev((vm_offset_t)p, PAGE_SIZE(1<<12)); | |||
| 150 | break; | |||
| 151 | } | |||
| 152 | } | |||
| 153 | /* | |||
| 154 | * Don't return error if any byte was written. Read and write | |||
| 155 | * can return error only if no i/o was performed. | |||
| 156 | */ | |||
| 157 | if (uio->uio_resid != orig_resid) | |||
| 158 | error = 0; | |||
| 159 | return (error); | |||
| 160 | } | |||
| 161 | ||||
| 162 | /* | |||
| 163 | * allow user processes to MMAP some memory sections | |||
| 164 | * instead of going through read/write | |||
| 165 | */ | |||
| 166 | /* ARGSUSED */ | |||
| 167 | int | |||
| 168 | memmmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, | |||
| 169 | int prot __unused__attribute__((__unused__)), vm_memattr_t *memattr __unused__attribute__((__unused__))) | |||
| 170 | { | |||
| 171 | if (dev2unit(dev)((dev)->si_drv0) == CDEV_MINOR_MEM0) | |||
| 172 | *paddr = offset; | |||
| 173 | else if (dev2unit(dev)((dev)->si_drv0) == CDEV_MINOR_KMEM1) | |||
| 174 | *paddr = vtophys(offset)pmap_kextract(((vm_offset_t) (offset))); | |||
| 175 | /* else panic! */ | |||
| 176 | return (0); | |||
| 177 | } | |||
| 178 | ||||
| 179 | /* | |||
| 180 | * Operations for changing memory attributes. | |||
| 181 | * | |||
| 182 | * This is basically just an ioctl shim for mem_range_attr_get | |||
| 183 | * and mem_range_attr_set. | |||
| 184 | */ | |||
| 185 | /* ARGSUSED */ | |||
| 186 | int | |||
| 187 | memioctl(struct cdev *dev __unused__attribute__((__unused__)), u_long cmd, caddr_t data, int flags, | |||
| 188 | struct thread *td) | |||
| 189 | { | |||
| 190 | int nd, error = 0; | |||
| 191 | struct mem_range_op *mo = (struct mem_range_op *)data; | |||
| 192 | struct mem_range_desc *md; | |||
| 193 | ||||
| 194 | /* is this for us? */ | |||
| 195 | if ((cmd != MEMRANGE_GET((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct mem_range_op)) & ((1 << 13) - 1)) << 16) | ( (('m')) << 8) | ((50))))) && | |||
| 196 | (cmd != MEMRANGE_SET((unsigned long) ((0x80000000) | (((sizeof(struct mem_range_op )) & ((1 << 13) - 1)) << 16) | ((('m')) << 8) | ((51)))))) | |||
| 197 | return (ENOTTY25); | |||
| 198 | ||||
| 199 | /* any chance we can handle this? */ | |||
| 200 | if (mem_range_softc.mr_op == NULL((void *)0)) | |||
| ||||
| 201 | return (EOPNOTSUPP45); | |||
| 202 | ||||
| 203 | /* do we have any descriptors? */ | |||
| 204 | if (mem_range_softc.mr_ndesc == 0) | |||
| 205 | return (ENXIO6); | |||
| 206 | ||||
| 207 | switch (cmd) { | |||
| 208 | case MEMRANGE_GET((unsigned long) (((0x80000000|0x40000000)) | (((sizeof(struct mem_range_op)) & ((1 << 13) - 1)) << 16) | ( (('m')) << 8) | ((50)))): | |||
| 209 | nd = imin(mo->mo_arg[0], mem_range_softc.mr_ndesc); | |||
| 210 | if (nd > 0) { | |||
| 211 | md = (struct mem_range_desc *) | |||
| 212 | malloc(nd * sizeof(struct mem_range_desc), | |||
| 213 | M_MEMDESC, M_WAITOK0x0002); | |||
| 214 | error = mem_range_attr_get(md, &nd); | |||
| 215 | if (!error) | |||
| 216 | error = copyout(md, mo->mo_desc, | |||
| ||||
| 217 | nd * sizeof(struct mem_range_desc)); | |||
| 218 | free(md, M_MEMDESC); | |||
| 219 | } | |||
| 220 | else | |||
| 221 | nd = mem_range_softc.mr_ndesc; | |||
| 222 | mo->mo_arg[0] = nd; | |||
| 223 | break; | |||
| 224 | ||||
| 225 | case MEMRANGE_SET((unsigned long) ((0x80000000) | (((sizeof(struct mem_range_op )) & ((1 << 13) - 1)) << 16) | ((('m')) << 8) | ((51)))): | |||
| 226 | md = (struct mem_range_desc *)malloc(sizeof(struct mem_range_desc), | |||
| 227 | M_MEMDESC, M_WAITOK0x0002); | |||
| 228 | error = copyin(mo->mo_desc, md, sizeof(struct mem_range_desc)); | |||
| 229 | /* clamp description string */ | |||
| 230 | md->mr_owner[sizeof(md->mr_owner) - 1] = 0; | |||
| 231 | if (error == 0) | |||
| 232 | error = mem_range_attr_set(md, &mo->mo_arg[0]); | |||
| 233 | free(md, M_MEMDESC); | |||
| 234 | break; | |||
| 235 | } | |||
| 236 | return (error); | |||
| 237 | } |