Index: user/ngie/bug-237403/stand/common/load_elf.c =================================================================== --- user/ngie/bug-237403/stand/common/load_elf.c (revision 348028) +++ user/ngie/bug-237403/stand/common/load_elf.c (revision 348029) @@ -1,1224 +1,1224 @@ /*- * Copyright (c) 1998 Michael Smith * Copyright (c) 1998 Peter Wemm * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #define FREEBSD_ELF #include #include "bootstrap.h" #define COPYOUT(s,d,l) archsw.arch_copyout((vm_offset_t)(s), d, l) #if defined(__i386__) && __ELF_WORD_SIZE == 64 #undef ELF_TARG_CLASS #undef ELF_TARG_MACH #define ELF_TARG_CLASS ELFCLASS64 #define ELF_TARG_MACH EM_X86_64 #endif typedef struct elf_file { Elf_Phdr *ph; Elf_Ehdr *ehdr; Elf_Sym *symtab; Elf_Hashelt *hashtab; Elf_Hashelt nbuckets; Elf_Hashelt nchains; Elf_Hashelt *buckets; Elf_Hashelt *chains; Elf_Rel *rel; size_t relsz; Elf_Rela *rela; size_t relasz; char *strtab; size_t strsz; int fd; caddr_t firstpage; size_t firstlen; int kernel; uint64_t off; } *elf_file_t; static int __elfN(loadimage)(struct preloaded_file *mp, elf_file_t ef, uint64_t loadaddr); static int __elfN(lookup_symbol)(struct preloaded_file *mp, elf_file_t ef, const char* name, Elf_Sym* sym); static int __elfN(reloc_ptr)(struct preloaded_file *mp, elf_file_t ef, Elf_Addr p, void *val, size_t len); static int __elfN(parse_modmetadata)(struct preloaded_file *mp, elf_file_t ef, Elf_Addr p_start, Elf_Addr p_end); static symaddr_fn __elfN(symaddr); static char *fake_modname(const char *name); const char *__elfN(kerneltype) = "elf kernel"; const char *__elfN(moduletype) = "elf module"; uint64_t __elfN(relocation_offset) = 0; extern void elf_wrong_field_size(void); #define CONVERT_FIELD(b, f, e) \ switch (sizeof((b)->f)) { \ case 2: \ (b)->f = e ## 16toh((b)->f); \ break; \ case 4: \ (b)->f = e ## 32toh((b)->f); \ break; \ case 8: \ (b)->f = e ## 64toh((b)->f); \ break; \ default: \ /* Force a link time error. */ \ elf_wrong_field_size(); \ break; \ } #define CONVERT_SWITCH(h, d, f) \ switch ((h)->e_ident[EI_DATA]) { \ case ELFDATA2MSB: \ f(d, be); \ break; \ case ELFDATA2LSB: \ f(d, le); \ break; \ default: \ return (EINVAL); \ } static int elf_header_convert(Elf_Ehdr *ehdr) { /* * Fixup ELF header endianness. * * The Xhdr structure was loaded using block read call to optimize file * accesses. It might happen, that the endianness of the system memory * is different that endianness of the ELF header. Swap fields here to * guarantee that Xhdr always contain valid data regardless of * architecture. */ #define HEADER_FIELDS(b, e) \ CONVERT_FIELD(b, e_type, e); \ CONVERT_FIELD(b, e_machine, e); \ CONVERT_FIELD(b, e_version, e); \ CONVERT_FIELD(b, e_entry, e); \ CONVERT_FIELD(b, e_phoff, e); \ CONVERT_FIELD(b, e_shoff, e); \ CONVERT_FIELD(b, e_flags, e); \ CONVERT_FIELD(b, e_ehsize, e); \ CONVERT_FIELD(b, e_phentsize, e); \ CONVERT_FIELD(b, e_phnum, e); \ CONVERT_FIELD(b, e_shentsize, e); \ CONVERT_FIELD(b, e_shnum, e); \ CONVERT_FIELD(b, e_shstrndx, e) CONVERT_SWITCH(ehdr, ehdr, HEADER_FIELDS); #undef HEADER_FIELDS return (0); } static int elf_program_header_convert(const Elf_Ehdr *ehdr, Elf_Phdr *phdr) { #define PROGRAM_HEADER_FIELDS(b, e) \ CONVERT_FIELD(b, p_type, e); \ CONVERT_FIELD(b, p_flags, e); \ CONVERT_FIELD(b, p_offset, e); \ CONVERT_FIELD(b, p_vaddr, e); \ CONVERT_FIELD(b, p_paddr, e); \ CONVERT_FIELD(b, p_filesz, e); \ CONVERT_FIELD(b, p_memsz, e); \ CONVERT_FIELD(b, p_align, e) CONVERT_SWITCH(ehdr, phdr, PROGRAM_HEADER_FIELDS); #undef PROGRAM_HEADER_FIELDS return (0); } static int elf_section_header_convert(const Elf_Ehdr *ehdr, Elf_Shdr *shdr) { #define SECTION_HEADER_FIELDS(b, e) \ CONVERT_FIELD(b, sh_name, e); \ CONVERT_FIELD(b, sh_type, e); \ CONVERT_FIELD(b, sh_link, e); \ CONVERT_FIELD(b, sh_info, e); \ CONVERT_FIELD(b, sh_flags, e); \ CONVERT_FIELD(b, sh_addr, e); \ CONVERT_FIELD(b, sh_offset, e); \ CONVERT_FIELD(b, sh_size, e); \ CONVERT_FIELD(b, sh_addralign, e); \ CONVERT_FIELD(b, sh_entsize, e) CONVERT_SWITCH(ehdr, shdr, SECTION_HEADER_FIELDS); #undef SECTION_HEADER_FIELDS return (0); } #undef CONVERT_SWITCH #undef CONVERT_FIELD static int __elfN(load_elf_header)(char *filename, elf_file_t ef) { ssize_t bytes_read; Elf_Ehdr *ehdr; int err; /* * Open the image, read and validate the ELF header */ if (filename == NULL) /* can't handle nameless */ return (EFTYPE); if ((ef->fd = open(filename, O_RDONLY)) == -1) return (errno); ef->firstpage = malloc(PAGE_SIZE); if (ef->firstpage == NULL) { close(ef->fd); return (ENOMEM); } bytes_read = read(ef->fd, ef->firstpage, PAGE_SIZE); ef->firstlen = (size_t)bytes_read; if (bytes_read < 0 || ef->firstlen <= sizeof(Elf_Ehdr)) { err = EFTYPE; /* could be EIO, but may be small file */ goto error; } ehdr = ef->ehdr = (Elf_Ehdr *)ef->firstpage; /* Is it ELF? */ if (!IS_ELF(*ehdr)) { err = EFTYPE; goto error; } if (ehdr->e_ident[EI_CLASS] != ELF_TARG_CLASS || /* Layout ? */ ehdr->e_ident[EI_DATA] != ELF_TARG_DATA || ehdr->e_ident[EI_VERSION] != EV_CURRENT) /* Version ? */ { err = EFTYPE; goto error; } err = elf_header_convert(ehdr); if (err) goto error; if (ehdr->e_version != EV_CURRENT || ehdr->e_machine != ELF_TARG_MACH) { /* Machine ? */ err = EFTYPE; goto error; } #ifdef LOADER_VERIEXEC if (verify_file(ef->fd, filename, bytes_read, VE_MUST) < 0) { err = EAUTH; goto error; } #endif return (0); error: if (ef->firstpage != NULL) { free(ef->firstpage); ef->firstpage = NULL; } if (ef->fd != -1) { close(ef->fd); ef->fd = -1; } return (err); } /* * Attempt to load the file (file) as an ELF module. It will be stored at * (dest), and a pointer to a module structure describing the loaded object * will be saved in (result). */ int __elfN(loadfile)(char *filename, uint64_t dest, struct preloaded_file **result) { return (__elfN(loadfile_raw)(filename, dest, result, 0)); } int __elfN(loadfile_raw)(char *filename, uint64_t dest, struct preloaded_file **result, int multiboot) { struct preloaded_file *fp, *kfp; struct elf_file ef; Elf_Ehdr *ehdr; int err; fp = NULL; bzero(&ef, sizeof(struct elf_file)); ef.fd = -1; err = __elfN(load_elf_header)(filename, &ef); if (err != 0) return (err); ehdr = ef.ehdr; /* * Check to see what sort of module we are. */ kfp = file_findfile(NULL, __elfN(kerneltype)); #ifdef __powerpc__ /* * Kernels can be ET_DYN, so just assume the first loaded object is the * kernel. This assumption will be checked later. */ if (kfp == NULL) ef.kernel = 1; #endif if (ef.kernel || ehdr->e_type == ET_EXEC) { /* Looks like a kernel */ if (kfp != NULL) { printf("elf" __XSTRING(__ELF_WORD_SIZE) "_loadfile: kernel already loaded\n"); err = EPERM; goto oerr; } /* * Calculate destination address based on kernel entrypoint. * * For ARM, the destination address is independent of any values * in the elf header (an ARM kernel can be loaded at any 2MB * boundary), so we leave dest set to the value calculated by * archsw.arch_loadaddr() and passed in to this function. */ #ifndef __arm__ if (ehdr->e_type == ET_EXEC) dest = (ehdr->e_entry & ~PAGE_MASK); #endif if ((ehdr->e_entry & ~PAGE_MASK) == 0) { printf("elf" __XSTRING(__ELF_WORD_SIZE) "_loadfile: not a kernel (maybe static binary?)\n"); err = EPERM; goto oerr; } ef.kernel = 1; } else if (ehdr->e_type == ET_DYN) { /* Looks like a kld module */ if (multiboot != 0) { printf("elf" __XSTRING(__ELF_WORD_SIZE) "_loadfile: can't load module as multiboot\n"); err = EPERM; goto oerr; } if (kfp == NULL) { printf("elf" __XSTRING(__ELF_WORD_SIZE) "_loadfile: can't load module before kernel\n"); err = EPERM; goto oerr; } if (strcmp(__elfN(kerneltype), kfp->f_type)) { printf("elf" __XSTRING(__ELF_WORD_SIZE) "_loadfile: can't load module with kernel type '%s'\n", kfp->f_type); err = EPERM; goto oerr; } /* Looks OK, got ahead */ ef.kernel = 0; } else { err = EFTYPE; goto oerr; } if (archsw.arch_loadaddr != NULL) dest = archsw.arch_loadaddr(LOAD_ELF, ehdr, dest); else dest = roundup(dest, PAGE_SIZE); /* * Ok, we think we should handle this. */ fp = file_alloc(); if (fp == NULL) { printf("elf" __XSTRING(__ELF_WORD_SIZE) "_loadfile: cannot allocate module info\n"); err = EPERM; goto out; } if (ef.kernel == 1 && multiboot == 0) setenv("kernelname", filename, 1); fp->f_name = strdup(filename); if (multiboot == 0) fp->f_type = strdup(ef.kernel ? __elfN(kerneltype) : __elfN(moduletype)); else fp->f_type = strdup("elf multiboot kernel"); #ifdef ELF_VERBOSE if (ef.kernel) printf("%s entry at 0x%jx\n", filename, (uintmax_t)ehdr->e_entry); #else printf("%s ", filename); #endif fp->f_size = __elfN(loadimage)(fp, &ef, dest); if (fp->f_size == 0 || fp->f_addr == 0) goto ioerr; /* save exec header as metadata */ file_addmetadata(fp, MODINFOMD_ELFHDR, sizeof(*ehdr), ehdr); /* Load OK, return module pointer */ *result = (struct preloaded_file *)fp; err = 0; goto out; ioerr: err = EIO; oerr: file_discard(fp); out: if (ef.firstpage) free(ef.firstpage); if (ef.fd != -1) close(ef.fd); return (err); } /* * With the file (fd) open on the image, and (ehdr) containing * the Elf header, load the image at (off) */ static int __elfN(loadimage)(struct preloaded_file *fp, elf_file_t ef, uint64_t off) { int i; u_int j; Elf_Ehdr *ehdr; Elf_Phdr *phdr, *php; Elf_Shdr *shdr; char *shstr; int ret; vm_offset_t firstaddr; vm_offset_t lastaddr; size_t chunk; ssize_t result; Elf_Addr ssym, esym; Elf_Dyn *dp; Elf_Addr adp; Elf_Addr ctors; int ndp; int symstrindex; int symtabindex; Elf_Size size; u_int fpcopy; Elf_Sym sym; Elf_Addr p_start, p_end; dp = NULL; shdr = NULL; ret = 0; firstaddr = lastaddr = 0; ehdr = ef->ehdr; if (ehdr->e_type == ET_EXEC) { #if defined(__i386__) || defined(__amd64__) #if __ELF_WORD_SIZE == 64 /* x86_64 relocates after locore */ off = - (off & 0xffffffffff000000ull); #else /* i386 relocates after locore */ off = - (off & 0xff000000u); #endif #elif defined(__powerpc__) /* * On the purely virtual memory machines like e500, the kernel * is linked against its final VA range, which is most often * not available at the loader stage, but only after kernel * initializes and completes its VM settings. In such cases we * cannot use p_vaddr field directly to load ELF segments, but * put them at some 'load-time' locations. */ if (off & 0xf0000000u) { off = -(off & 0xf0000000u); /* * XXX the physical load address should not be * hardcoded. Note that the Book-E kernel assumes that * it's loaded at a 16MB boundary for now... */ off += 0x01000000; ehdr->e_entry += off; #ifdef ELF_VERBOSE printf("Converted entry 0x%jx\n", (uintmax_t)ehdr->e_entry); #endif } else off = 0; #elif defined(__arm__) && !defined(EFI) /* * The elf headers in arm kernels specify virtual addresses in * all header fields, even the ones that should be physical * addresses. We assume the entry point is in the first page, * and masking the page offset will leave us with the virtual * address the kernel was linked at. We subtract that from the * load offset, making 'off' into the value which, when added * to a virtual address in an elf header, translates it to a * physical address. We do the va->pa conversion on the entry * point address in the header now, so that later we can launch * the kernel by just jumping to that address. * * When booting from UEFI the copyin and copyout functions * handle adjusting the location relative to the first virtual * address. Because of this there is no need to adjust the * offset or entry point address as these will both be handled * by the efi code. */ off -= ehdr->e_entry & ~PAGE_MASK; ehdr->e_entry += off; #ifdef ELF_VERBOSE - printf("ehdr->e_entry 0x%jx", va<->pa off %llx\n", + printf("ehdr->e_entry 0x%jx, va<->pa off %llx\n", (uintmax_t)ehdr->e_entry, off); #endif #else off = 0; /* other archs use direct mapped kernels */ #endif } ef->off = off; if (ef->kernel) __elfN(relocation_offset) = off; if ((ehdr->e_phoff + ehdr->e_phnum * sizeof(*phdr)) > ef->firstlen) { printf("elf" __XSTRING(__ELF_WORD_SIZE) "_loadimage: program header not within first page\n"); goto out; } phdr = (Elf_Phdr *)(ef->firstpage + ehdr->e_phoff); for (i = 0; i < ehdr->e_phnum; i++) { if (elf_program_header_convert(ehdr, phdr)) continue; /* We want to load PT_LOAD segments only.. */ if (phdr[i].p_type != PT_LOAD) continue; #ifdef ELF_VERBOSE printf("Segment: 0x%lx@0x%lx -> 0x%lx-0x%lx", (long)phdr[i].p_filesz, (long)phdr[i].p_offset, (long)(phdr[i].p_vaddr + off), (long)(phdr[i].p_vaddr + off + phdr[i].p_memsz - 1)); #else if ((phdr[i].p_flags & PF_W) == 0) { printf("text=0x%lx ", (long)phdr[i].p_filesz); } else { printf("data=0x%lx", (long)phdr[i].p_filesz); if (phdr[i].p_filesz < phdr[i].p_memsz) printf("+0x%lx", (long)(phdr[i].p_memsz - phdr[i].p_filesz)); printf(" "); } #endif fpcopy = 0; if (ef->firstlen > phdr[i].p_offset) { fpcopy = ef->firstlen - phdr[i].p_offset; archsw.arch_copyin(ef->firstpage + phdr[i].p_offset, phdr[i].p_vaddr + off, fpcopy); } if (phdr[i].p_filesz > fpcopy) { if (kern_pread(ef->fd, phdr[i].p_vaddr + off + fpcopy, phdr[i].p_filesz - fpcopy, phdr[i].p_offset + fpcopy) != 0) { printf("\nelf" __XSTRING(__ELF_WORD_SIZE) "_loadimage: read failed\n"); goto out; } } /* clear space from oversized segments; eg: bss */ if (phdr[i].p_filesz < phdr[i].p_memsz) { #ifdef ELF_VERBOSE printf(" (bss: 0x%lx-0x%lx)", (long)(phdr[i].p_vaddr + off + phdr[i].p_filesz), (long)(phdr[i].p_vaddr + off + phdr[i].p_memsz -1)); #endif kern_bzero(phdr[i].p_vaddr + off + phdr[i].p_filesz, phdr[i].p_memsz - phdr[i].p_filesz); } #ifdef ELF_VERBOSE printf("\n"); #endif if (archsw.arch_loadseg != NULL) archsw.arch_loadseg(ehdr, phdr + i, off); if (firstaddr == 0 || firstaddr > (phdr[i].p_vaddr + off)) firstaddr = phdr[i].p_vaddr + off; if (lastaddr == 0 || lastaddr < (phdr[i].p_vaddr + off + phdr[i].p_memsz)) lastaddr = phdr[i].p_vaddr + off + phdr[i].p_memsz; } lastaddr = roundup(lastaddr, sizeof(long)); /* * Get the section headers. We need this for finding the .ctors * section as well as for loading any symbols. Both may be hard * to do if reading from a .gz file as it involves seeking. I * think the rule is going to have to be that you must strip a * file to remove symbols before gzipping it. */ chunk = (size_t)ehdr->e_shnum * (size_t)ehdr->e_shentsize; if (chunk == 0 || ehdr->e_shoff == 0) goto nosyms; shdr = alloc_pread(ef->fd, ehdr->e_shoff, chunk); if (shdr == NULL) { printf("\nelf" __XSTRING(__ELF_WORD_SIZE) "_loadimage: failed to read section headers"); goto nosyms; } for (i = 0; i < ehdr->e_shnum; i++) elf_section_header_convert(ehdr, &shdr[i]); file_addmetadata(fp, MODINFOMD_SHDR, chunk, shdr); /* * Read the section string table and look for the .ctors section. * We need to tell the kernel where it is so that it can call the * ctors. */ chunk = shdr[ehdr->e_shstrndx].sh_size; if (chunk) { shstr = alloc_pread(ef->fd, shdr[ehdr->e_shstrndx].sh_offset, chunk); if (shstr) { for (i = 0; i < ehdr->e_shnum; i++) { if (strcmp(shstr + shdr[i].sh_name, ".ctors") != 0) continue; ctors = shdr[i].sh_addr; file_addmetadata(fp, MODINFOMD_CTORS_ADDR, sizeof(ctors), &ctors); size = shdr[i].sh_size; file_addmetadata(fp, MODINFOMD_CTORS_SIZE, sizeof(size), &size); break; } free(shstr); } } /* * Now load any symbols. */ symtabindex = -1; symstrindex = -1; for (i = 0; i < ehdr->e_shnum; i++) { if (shdr[i].sh_type != SHT_SYMTAB) continue; for (j = 0; j < ehdr->e_phnum; j++) { if (phdr[j].p_type != PT_LOAD) continue; if (shdr[i].sh_offset >= phdr[j].p_offset && (shdr[i].sh_offset + shdr[i].sh_size <= phdr[j].p_offset + phdr[j].p_filesz)) { shdr[i].sh_offset = 0; shdr[i].sh_size = 0; break; } } if (shdr[i].sh_offset == 0 || shdr[i].sh_size == 0) continue; /* alread loaded in a PT_LOAD above */ /* Save it for loading below */ symtabindex = i; symstrindex = shdr[i].sh_link; } if (symtabindex < 0 || symstrindex < 0) goto nosyms; /* Ok, committed to a load. */ #ifndef ELF_VERBOSE printf("syms=["); #endif ssym = lastaddr; for (i = symtabindex; i >= 0; i = symstrindex) { #ifdef ELF_VERBOSE char *secname; switch(shdr[i].sh_type) { case SHT_SYMTAB: /* Symbol table */ secname = "symtab"; break; case SHT_STRTAB: /* String table */ secname = "strtab"; break; default: secname = "WHOA!!"; break; } #endif size = shdr[i].sh_size; #if defined(__powerpc__) #if __ELF_WORD_SIZE == 64 size = htobe64(size); #else size = htobe32(size); #endif #endif archsw.arch_copyin(&size, lastaddr, sizeof(size)); lastaddr += sizeof(size); #ifdef ELF_VERBOSE printf("\n%s: 0x%jx@0x%jx -> 0x%jx-0x%jx", secname, (uintmax_t)shdr[i].sh_size, (uintmax_t)shdr[i].sh_offset, (uintmax_t)lastaddr, (uintmax_t)(lastaddr + shdr[i].sh_size)); #else if (i == symstrindex) printf("+"); printf("0x%lx+0x%lx", (long)sizeof(size), (long)size); #endif if (lseek(ef->fd, (off_t)shdr[i].sh_offset, SEEK_SET) == -1) { printf("\nelf" __XSTRING(__ELF_WORD_SIZE) "_loadimage: could not seek for symbols - skipped!"); lastaddr = ssym; ssym = 0; goto nosyms; } result = archsw.arch_readin(ef->fd, lastaddr, shdr[i].sh_size); if (result < 0 || (size_t)result != shdr[i].sh_size) { printf("\nelf" __XSTRING(__ELF_WORD_SIZE) "_loadimage: could not read symbols - skipped! " "(%ju != %ju)", (uintmax_t)result, (uintmax_t)shdr[i].sh_size); lastaddr = ssym; ssym = 0; goto nosyms; } /* Reset offsets relative to ssym */ lastaddr += shdr[i].sh_size; lastaddr = roundup(lastaddr, sizeof(size)); if (i == symtabindex) symtabindex = -1; else if (i == symstrindex) symstrindex = -1; } esym = lastaddr; #ifndef ELF_VERBOSE printf("]"); #endif #if defined(__powerpc__) /* On PowerPC we always need to provide BE data to the kernel */ #if __ELF_WORD_SIZE == 64 ssym = htobe64((uint64_t)ssym); esym = htobe64((uint64_t)esym); #else ssym = htobe32((uint32_t)ssym); esym = htobe32((uint32_t)esym); #endif #endif file_addmetadata(fp, MODINFOMD_SSYM, sizeof(ssym), &ssym); file_addmetadata(fp, MODINFOMD_ESYM, sizeof(esym), &esym); nosyms: printf("\n"); ret = lastaddr - firstaddr; fp->f_addr = firstaddr; php = NULL; for (i = 0; i < ehdr->e_phnum; i++) { if (phdr[i].p_type == PT_DYNAMIC) { php = phdr + i; adp = php->p_vaddr; file_addmetadata(fp, MODINFOMD_DYNAMIC, sizeof(adp), &adp); break; } } if (php == NULL) /* this is bad, we cannot get to symbols or _DYNAMIC */ goto out; ndp = php->p_filesz / sizeof(Elf_Dyn); if (ndp == 0) goto out; dp = malloc(php->p_filesz); if (dp == NULL) goto out; archsw.arch_copyout(php->p_vaddr + off, dp, php->p_filesz); ef->strsz = 0; for (i = 0; i < ndp; i++) { if (dp[i].d_tag == 0) break; switch (dp[i].d_tag) { case DT_HASH: ef->hashtab = (Elf_Hashelt*)(uintptr_t)(dp[i].d_un.d_ptr + off); break; case DT_STRTAB: ef->strtab = (char *)(uintptr_t)(dp[i].d_un.d_ptr + off); break; case DT_STRSZ: ef->strsz = dp[i].d_un.d_val; break; case DT_SYMTAB: ef->symtab = (Elf_Sym *)(uintptr_t)(dp[i].d_un.d_ptr + off); break; case DT_REL: ef->rel = (Elf_Rel *)(uintptr_t)(dp[i].d_un.d_ptr + off); break; case DT_RELSZ: ef->relsz = dp[i].d_un.d_val; break; case DT_RELA: ef->rela = (Elf_Rela *)(uintptr_t)(dp[i].d_un.d_ptr + off); break; case DT_RELASZ: ef->relasz = dp[i].d_un.d_val; break; default: break; } } if (ef->hashtab == NULL || ef->symtab == NULL || ef->strtab == NULL || ef->strsz == 0) goto out; COPYOUT(ef->hashtab, &ef->nbuckets, sizeof(ef->nbuckets)); COPYOUT(ef->hashtab + 1, &ef->nchains, sizeof(ef->nchains)); ef->buckets = ef->hashtab + 2; ef->chains = ef->buckets + ef->nbuckets; if (__elfN(lookup_symbol)(fp, ef, "__start_set_modmetadata_set", &sym) != 0) return 0; p_start = sym.st_value + ef->off; if (__elfN(lookup_symbol)(fp, ef, "__stop_set_modmetadata_set", &sym) != 0) return ENOENT; p_end = sym.st_value + ef->off; if (__elfN(parse_modmetadata)(fp, ef, p_start, p_end) == 0) goto out; if (ef->kernel) /* kernel must not depend on anything */ goto out; out: if (dp) free(dp); if (shdr) free(shdr); return ret; } static char invalid_name[] = "bad"; char * fake_modname(const char *name) { const char *sp, *ep; char *fp; size_t len; sp = strrchr(name, '/'); if (sp) sp++; else sp = name; ep = strrchr(sp, '.'); if (ep == NULL) { ep = sp + strlen(sp); } if (ep == sp) { sp = invalid_name; ep = invalid_name + sizeof(invalid_name) - 1; } len = ep - sp; fp = malloc(len + 1); if (fp == NULL) return NULL; memcpy(fp, sp, len); fp[len] = '\0'; return fp; } #if (defined(__i386__) || defined(__powerpc__)) && __ELF_WORD_SIZE == 64 struct mod_metadata64 { int md_version; /* structure version MDTV_* */ int md_type; /* type of entry MDT_* */ uint64_t md_data; /* specific data */ uint64_t md_cval; /* common string label */ }; #endif #if defined(__amd64__) && __ELF_WORD_SIZE == 32 struct mod_metadata32 { int md_version; /* structure version MDTV_* */ int md_type; /* type of entry MDT_* */ uint32_t md_data; /* specific data */ uint32_t md_cval; /* common string label */ }; #endif int __elfN(load_modmetadata)(struct preloaded_file *fp, uint64_t dest) { struct elf_file ef; int err, i, j; Elf_Shdr *sh_meta, *shdr = NULL; Elf_Shdr *sh_data[2]; char *shstrtab = NULL; size_t size; Elf_Addr p_start, p_end; bzero(&ef, sizeof(struct elf_file)); ef.fd = -1; err = __elfN(load_elf_header)(fp->f_name, &ef); if (err != 0) goto out; if (ef.kernel == 1 || ef.ehdr->e_type == ET_EXEC) { ef.kernel = 1; } else if (ef.ehdr->e_type != ET_DYN) { err = EFTYPE; goto out; } size = (size_t)ef.ehdr->e_shnum * (size_t)ef.ehdr->e_shentsize; shdr = alloc_pread(ef.fd, ef.ehdr->e_shoff, size); if (shdr == NULL) { err = ENOMEM; goto out; } /* Load shstrtab. */ shstrtab = alloc_pread(ef.fd, shdr[ef.ehdr->e_shstrndx].sh_offset, shdr[ef.ehdr->e_shstrndx].sh_size); if (shstrtab == NULL) { printf("\nelf" __XSTRING(__ELF_WORD_SIZE) "load_modmetadata: unable to load shstrtab\n"); err = EFTYPE; goto out; } /* Find set_modmetadata_set and data sections. */ sh_data[0] = sh_data[1] = sh_meta = NULL; for (i = 0, j = 0; i < ef.ehdr->e_shnum; i++) { if (strcmp(&shstrtab[shdr[i].sh_name], "set_modmetadata_set") == 0) { sh_meta = &shdr[i]; } if ((strcmp(&shstrtab[shdr[i].sh_name], ".data") == 0) || (strcmp(&shstrtab[shdr[i].sh_name], ".rodata") == 0)) { sh_data[j++] = &shdr[i]; } } if (sh_meta == NULL || sh_data[0] == NULL || sh_data[1] == NULL) { printf("\nelf" __XSTRING(__ELF_WORD_SIZE) "load_modmetadata: unable to find set_modmetadata_set or data sections\n"); err = EFTYPE; goto out; } /* Load set_modmetadata_set into memory */ err = kern_pread(ef.fd, dest, sh_meta->sh_size, sh_meta->sh_offset); if (err != 0) { printf("\nelf" __XSTRING(__ELF_WORD_SIZE) "load_modmetadata: unable to load set_modmetadata_set: %d\n", err); goto out; } p_start = dest; p_end = dest + sh_meta->sh_size; dest += sh_meta->sh_size; /* Load data sections into memory. */ err = kern_pread(ef.fd, dest, sh_data[0]->sh_size, sh_data[0]->sh_offset); if (err != 0) { printf("\nelf" __XSTRING(__ELF_WORD_SIZE) "load_modmetadata: unable to load data: %d\n", err); goto out; } /* * We have to increment the dest, so that the offset is the same into * both the .rodata and .data sections. */ ef.off = -(sh_data[0]->sh_addr - dest); dest += (sh_data[1]->sh_addr - sh_data[0]->sh_addr); err = kern_pread(ef.fd, dest, sh_data[1]->sh_size, sh_data[1]->sh_offset); if (err != 0) { printf("\nelf" __XSTRING(__ELF_WORD_SIZE) "load_modmetadata: unable to load data: %d\n", err); goto out; } err = __elfN(parse_modmetadata)(fp, &ef, p_start, p_end); if (err != 0) { printf("\nelf" __XSTRING(__ELF_WORD_SIZE) "load_modmetadata: unable to parse metadata: %d\n", err); goto out; } out: if (shstrtab != NULL) free(shstrtab); if (shdr != NULL) free(shdr); if (ef.firstpage != NULL) free(ef.firstpage); if (ef.fd != -1) close(ef.fd); return (err); } int __elfN(parse_modmetadata)(struct preloaded_file *fp, elf_file_t ef, Elf_Addr p_start, Elf_Addr p_end) { struct mod_metadata md; #if (defined(__i386__) || defined(__powerpc__)) && __ELF_WORD_SIZE == 64 struct mod_metadata64 md64; #elif defined(__amd64__) && __ELF_WORD_SIZE == 32 struct mod_metadata32 md32; #endif struct mod_depend *mdepend; struct mod_version mver; char *s; int error, modcnt, minfolen; Elf_Addr v, p; modcnt = 0; p = p_start; while (p < p_end) { COPYOUT(p, &v, sizeof(v)); error = __elfN(reloc_ptr)(fp, ef, p, &v, sizeof(v)); if (error == EOPNOTSUPP) v += ef->off; else if (error != 0) return (error); #if (defined(__i386__) || defined(__powerpc__)) && __ELF_WORD_SIZE == 64 COPYOUT(v, &md64, sizeof(md64)); error = __elfN(reloc_ptr)(fp, ef, v, &md64, sizeof(md64)); if (error == EOPNOTSUPP) { md64.md_cval += ef->off; md64.md_data += ef->off; } else if (error != 0) return (error); md.md_version = md64.md_version; md.md_type = md64.md_type; md.md_cval = (const char *)(uintptr_t)md64.md_cval; md.md_data = (void *)(uintptr_t)md64.md_data; #elif defined(__amd64__) && __ELF_WORD_SIZE == 32 COPYOUT(v, &md32, sizeof(md32)); error = __elfN(reloc_ptr)(fp, ef, v, &md32, sizeof(md32)); if (error == EOPNOTSUPP) { md32.md_cval += ef->off; md32.md_data += ef->off; } else if (error != 0) return (error); md.md_version = md32.md_version; md.md_type = md32.md_type; md.md_cval = (const char *)(uintptr_t)md32.md_cval; md.md_data = (void *)(uintptr_t)md32.md_data; #else COPYOUT(v, &md, sizeof(md)); error = __elfN(reloc_ptr)(fp, ef, v, &md, sizeof(md)); if (error == EOPNOTSUPP) { md.md_cval += ef->off; md.md_data = (void *)((uintptr_t)md.md_data + (uintptr_t)ef->off); } else if (error != 0) return (error); #endif p += sizeof(Elf_Addr); switch(md.md_type) { case MDT_DEPEND: if (ef->kernel) /* kernel must not depend on anything */ break; s = strdupout((vm_offset_t)md.md_cval); minfolen = sizeof(*mdepend) + strlen(s) + 1; mdepend = malloc(minfolen); if (mdepend == NULL) return ENOMEM; COPYOUT((vm_offset_t)md.md_data, mdepend, sizeof(*mdepend)); strcpy((char*)(mdepend + 1), s); free(s); file_addmetadata(fp, MODINFOMD_DEPLIST, minfolen, mdepend); free(mdepend); break; case MDT_VERSION: s = strdupout((vm_offset_t)md.md_cval); COPYOUT((vm_offset_t)md.md_data, &mver, sizeof(mver)); file_addmodule(fp, s, mver.mv_version, NULL); free(s); modcnt++; break; } } if (modcnt == 0) { s = fake_modname(fp->f_name); file_addmodule(fp, s, 1, NULL); free(s); } return 0; } static unsigned long elf_hash(const char *name) { const unsigned char *p = (const unsigned char *) name; unsigned long h = 0; unsigned long g; while (*p != '\0') { h = (h << 4) + *p++; if ((g = h & 0xf0000000) != 0) h ^= g >> 24; h &= ~g; } return h; } static const char __elfN(bad_symtable)[] = "elf" __XSTRING(__ELF_WORD_SIZE) "_lookup_symbol: corrupt symbol table\n"; int __elfN(lookup_symbol)(struct preloaded_file *fp, elf_file_t ef, const char* name, Elf_Sym *symp) { Elf_Hashelt symnum; Elf_Sym sym; char *strp; unsigned long hash; hash = elf_hash(name); COPYOUT(&ef->buckets[hash % ef->nbuckets], &symnum, sizeof(symnum)); while (symnum != STN_UNDEF) { if (symnum >= ef->nchains) { printf(__elfN(bad_symtable)); return ENOENT; } COPYOUT(ef->symtab + symnum, &sym, sizeof(sym)); if (sym.st_name == 0) { printf(__elfN(bad_symtable)); return ENOENT; } strp = strdupout((vm_offset_t)(ef->strtab + sym.st_name)); if (strcmp(name, strp) == 0) { free(strp); if (sym.st_shndx != SHN_UNDEF || (sym.st_value != 0 && ELF_ST_TYPE(sym.st_info) == STT_FUNC)) { *symp = sym; return 0; } return ENOENT; } free(strp); COPYOUT(&ef->chains[symnum], &symnum, sizeof(symnum)); } return ENOENT; } /* * Apply any intra-module relocations to the value. p is the load address * of the value and val/len is the value to be modified. This does NOT modify * the image in-place, because this is done by kern_linker later on. * * Returns EOPNOTSUPP if no relocation method is supplied. */ static int __elfN(reloc_ptr)(struct preloaded_file *mp, elf_file_t ef, Elf_Addr p, void *val, size_t len) { size_t n; Elf_Rela a; Elf_Rel r; int error; /* * The kernel is already relocated, but we still want to apply * offset adjustments. */ if (ef->kernel) return (EOPNOTSUPP); for (n = 0; n < ef->relsz / sizeof(r); n++) { COPYOUT(ef->rel + n, &r, sizeof(r)); error = __elfN(reloc)(ef, __elfN(symaddr), &r, ELF_RELOC_REL, ef->off, p, val, len); if (error != 0) return (error); } for (n = 0; n < ef->relasz / sizeof(a); n++) { COPYOUT(ef->rela + n, &a, sizeof(a)); error = __elfN(reloc)(ef, __elfN(symaddr), &a, ELF_RELOC_RELA, ef->off, p, val, len); if (error != 0) return (error); } return (0); } static Elf_Addr __elfN(symaddr)(struct elf_file *ef, Elf_Size symidx) { /* Symbol lookup by index not required here. */ return (0); } Index: user/ngie/bug-237403/stand/powerpc/Makefile =================================================================== --- user/ngie/bug-237403/stand/powerpc/Makefile (revision 348028) +++ user/ngie/bug-237403/stand/powerpc/Makefile (revision 348029) @@ -1,10 +1,13 @@ # $FreeBSD$ NO_OBJ=t .include SUBDIR.yes= boot1.chrp ofw uboot + +.if "${TARGET_ARCH}" == "powerpc64" SUBDIR.${MK_FDT}+= kboot +.endif .include Index: user/ngie/bug-237403/sys/arm/allwinner/a10_fb.c =================================================================== --- user/ngie/bug-237403/sys/arm/allwinner/a10_fb.c (revision 348028) +++ user/ngie/bug-237403/sys/arm/allwinner/a10_fb.c (revision 348029) @@ -1,662 +1,663 @@ /*- * Copyright (c) 2016 Jared McNeill * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Allwinner A10/A20 Framebuffer */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "fb_if.h" #include "hdmi_if.h" #define FB_DEFAULT_W 800 #define FB_DEFAULT_H 600 #define FB_DEFAULT_REF 60 #define FB_BPP 32 #define FB_ALIGN 0x1000 #define HDMI_ENABLE_DELAY 20000 #define DEBE_FREQ 300000000 #define DOT_CLOCK_TO_HZ(c) ((c) * 1000) /* Display backend */ #define DEBE_REG_START 0x800 #define DEBE_REG_END 0x1000 #define DEBE_REG_WIDTH 4 #define DEBE_MODCTL 0x800 #define MODCTL_ITLMOD_EN (1 << 28) #define MODCTL_OUT_SEL_MASK (0x7 << 20) #define MODCTL_OUT_SEL(sel) ((sel) << 20) #define OUT_SEL_LCD 0 #define MODCTL_LAY0_EN (1 << 8) #define MODCTL_START_CTL (1 << 1) #define MODCTL_EN (1 << 0) #define DEBE_DISSIZE 0x808 #define DIS_HEIGHT(h) (((h) - 1) << 16) #define DIS_WIDTH(w) (((w) - 1) << 0) #define DEBE_LAYSIZE0 0x810 #define LAY_HEIGHT(h) (((h) - 1) << 16) #define LAY_WIDTH(w) (((w) - 1) << 0) #define DEBE_LAYCOOR0 0x820 #define LAY_XCOOR(x) ((x) << 16) #define LAY_YCOOR(y) ((y) << 0) #define DEBE_LAYLINEWIDTH0 0x840 #define DEBE_LAYFB_L32ADD0 0x850 #define LAYFB_L32ADD(pa) ((pa) << 3) #define DEBE_LAYFB_H4ADD 0x860 #define LAY0FB_H4ADD(pa) ((pa) >> 29) #define DEBE_REGBUFFCTL 0x870 #define REGBUFFCTL_LOAD (1 << 0) #define DEBE_ATTCTL1 0x8a0 #define ATTCTL1_FBFMT(fmt) ((fmt) << 8) #define FBFMT_XRGB8888 9 #define ATTCTL1_FBPS(ps) ((ps) << 0) #define FBPS_32BPP_ARGB 0 /* Timing controller */ #define TCON_GCTL 0x000 #define GCTL_TCON_EN (1 << 31) #define GCTL_IO_MAP_SEL_TCON1 (1 << 0) #define TCON_GINT1 0x008 #define GINT1_TCON1_LINENO(n) (((n) + 2) << 0) #define TCON0_DCLK 0x044 #define DCLK_EN 0xf0000000 #define TCON1_CTL 0x090 #define TCON1_EN (1 << 31) #define INTERLACE_EN (1 << 20) #define TCON1_SRC_SEL(src) ((src) << 0) #define TCON1_SRC_CH1 0 #define TCON1_SRC_CH2 1 #define TCON1_SRC_BLUE 2 #define TCON1_START_DELAY(sd) ((sd) << 4) #define TCON1_BASIC0 0x094 #define TCON1_BASIC1 0x098 #define TCON1_BASIC2 0x09c #define TCON1_BASIC3 0x0a0 #define TCON1_BASIC4 0x0a4 #define TCON1_BASIC5 0x0a8 #define BASIC_X(x) (((x) - 1) << 16) #define BASIC_Y(y) (((y) - 1) << 0) #define BASIC3_HT(ht) (((ht) - 1) << 16) #define BASIC3_HBP(hbp) (((hbp) - 1) << 0) #define BASIC4_VT(vt) ((vt) << 16) #define BASIC4_VBP(vbp) (((vbp) - 1) << 0) #define BASIC5_HSPW(hspw) (((hspw) - 1) << 16) #define BASIC5_VSPW(vspw) (((vspw) - 1) << 0) #define TCON1_IO_POL 0x0f0 #define IO_POL_IO2_INV (1 << 26) #define IO_POL_PHSYNC (1 << 25) #define IO_POL_PVSYNC (1 << 24) #define TCON1_IO_TRI 0x0f4 #define IO0_OUTPUT_TRI_EN (1 << 24) #define IO1_OUTPUT_TRI_EN (1 << 25) #define IO_TRI_MASK 0xffffffff #define START_DELAY(vbl) (MIN(32, (vbl)) - 2) #define VBLANK_LEN(vt, vd, i) ((((vt) << (i)) >> 1) - (vd) - 2) #define VTOTAL(vt) ((vt) * 2) #define DIVIDE(x, y) (((x) + ((y) / 2)) / (y)) struct a10fb_softc { device_t dev; device_t fbdev; struct resource *res[2]; /* Framebuffer */ struct fb_info info; size_t fbsize; bus_addr_t paddr; vm_offset_t vaddr; /* HDMI */ eventhandler_tag hdmi_evh; }; static struct resource_spec a10fb_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, /* DEBE */ { SYS_RES_MEMORY, 1, RF_ACTIVE }, /* TCON */ { -1, 0 } }; #define DEBE_READ(sc, reg) bus_read_4((sc)->res[0], (reg)) #define DEBE_WRITE(sc, reg, val) bus_write_4((sc)->res[0], (reg), (val)) #define TCON_READ(sc, reg) bus_read_4((sc)->res[1], (reg)) #define TCON_WRITE(sc, reg, val) bus_write_4((sc)->res[1], (reg), (val)) static int a10fb_allocfb(struct a10fb_softc *sc) { sc->vaddr = kmem_alloc_contig(sc->fbsize, M_NOWAIT | M_ZERO, 0, ~0, FB_ALIGN, 0, VM_MEMATTR_WRITE_COMBINING); if (sc->vaddr == 0) { device_printf(sc->dev, "failed to allocate FB memory\n"); return (ENOMEM); } sc->paddr = pmap_kextract(sc->vaddr); return (0); } static void a10fb_freefb(struct a10fb_softc *sc) { kmem_free(sc->vaddr, sc->fbsize); } static int a10fb_setup_debe(struct a10fb_softc *sc, const struct videomode *mode) { int width, height, interlace, reg; clk_t clk_ahb, clk_dram, clk_debe; hwreset_t rst; uint32_t val; int error; interlace = !!(mode->flags & VID_INTERLACE); width = mode->hdisplay; height = mode->vdisplay << interlace; /* Leave reset */ error = hwreset_get_by_ofw_name(sc->dev, 0, "de_be", &rst); if (error != 0) { device_printf(sc->dev, "cannot find reset 'de_be'\n"); return (error); } error = hwreset_deassert(rst); if (error != 0) { device_printf(sc->dev, "couldn't de-assert reset 'de_be'\n"); return (error); } /* Gating AHB clock for BE */ error = clk_get_by_ofw_name(sc->dev, 0, "ahb_de_be", &clk_ahb); if (error != 0) { device_printf(sc->dev, "cannot find clk 'ahb_de_be'\n"); return (error); } error = clk_enable(clk_ahb); if (error != 0) { device_printf(sc->dev, "cannot enable clk 'ahb_de_be'\n"); return (error); } /* Enable DRAM clock to BE */ error = clk_get_by_ofw_name(sc->dev, 0, "dram_de_be", &clk_dram); if (error != 0) { device_printf(sc->dev, "cannot find clk 'dram_de_be'\n"); return (error); } error = clk_enable(clk_dram); if (error != 0) { device_printf(sc->dev, "cannot enable clk 'dram_de_be'\n"); return (error); } /* Set BE clock to 300MHz and enable */ error = clk_get_by_ofw_name(sc->dev, 0, "de_be", &clk_debe); if (error != 0) { device_printf(sc->dev, "cannot find clk 'de_be'\n"); return (error); } error = clk_set_freq(clk_debe, DEBE_FREQ, CLK_SET_ROUND_DOWN); if (error != 0) { device_printf(sc->dev, "cannot set 'de_be' frequency\n"); return (error); } error = clk_enable(clk_debe); if (error != 0) { device_printf(sc->dev, "cannot enable clk 'de_be'\n"); return (error); } /* Initialize all registers to 0 */ for (reg = DEBE_REG_START; reg < DEBE_REG_END; reg += DEBE_REG_WIDTH) DEBE_WRITE(sc, reg, 0); /* Enable display backend */ DEBE_WRITE(sc, DEBE_MODCTL, MODCTL_EN); /* Set display size */ DEBE_WRITE(sc, DEBE_DISSIZE, DIS_HEIGHT(height) | DIS_WIDTH(width)); /* Set layer 0 size, position, and stride */ DEBE_WRITE(sc, DEBE_LAYSIZE0, LAY_HEIGHT(height) | LAY_WIDTH(width)); DEBE_WRITE(sc, DEBE_LAYCOOR0, LAY_XCOOR(0) | LAY_YCOOR(0)); DEBE_WRITE(sc, DEBE_LAYLINEWIDTH0, width * FB_BPP); /* Point layer 0 to FB memory */ DEBE_WRITE(sc, DEBE_LAYFB_L32ADD0, LAYFB_L32ADD(sc->paddr)); DEBE_WRITE(sc, DEBE_LAYFB_H4ADD, LAY0FB_H4ADD(sc->paddr)); /* Set backend format and pixel sequence */ DEBE_WRITE(sc, DEBE_ATTCTL1, ATTCTL1_FBFMT(FBFMT_XRGB8888) | ATTCTL1_FBPS(FBPS_32BPP_ARGB)); /* Enable layer 0, output to LCD, setup interlace */ val = DEBE_READ(sc, DEBE_MODCTL); val |= MODCTL_LAY0_EN; val &= ~MODCTL_OUT_SEL_MASK; val |= MODCTL_OUT_SEL(OUT_SEL_LCD); if (interlace) val |= MODCTL_ITLMOD_EN; else val &= ~MODCTL_ITLMOD_EN; DEBE_WRITE(sc, DEBE_MODCTL, val); /* Commit settings */ DEBE_WRITE(sc, DEBE_REGBUFFCTL, REGBUFFCTL_LOAD); /* Start DEBE */ val = DEBE_READ(sc, DEBE_MODCTL); val |= MODCTL_START_CTL; DEBE_WRITE(sc, DEBE_MODCTL, val); return (0); } static int a10fb_setup_pll(struct a10fb_softc *sc, uint64_t freq) { clk_t clk_sclk1, clk_sclk2; int error; error = clk_get_by_ofw_name(sc->dev, 0, "lcd_ch1_sclk1", &clk_sclk1); if (error != 0) { device_printf(sc->dev, "cannot find clk 'lcd_ch1_sclk1'\n"); return (error); } error = clk_get_by_ofw_name(sc->dev, 0, "lcd_ch1_sclk2", &clk_sclk2); if (error != 0) { device_printf(sc->dev, "cannot find clk 'lcd_ch1_sclk2'\n"); return (error); } error = clk_set_freq(clk_sclk2, freq, 0); if (error != 0) { device_printf(sc->dev, "cannot set lcd ch1 frequency\n"); return (error); } error = clk_enable(clk_sclk2); if (error != 0) { device_printf(sc->dev, "cannot enable lcd ch1 sclk2\n"); return (error); } error = clk_enable(clk_sclk1); if (error != 0) { device_printf(sc->dev, "cannot enable lcd ch1 sclk1\n"); return (error); } return (0); } static int a10fb_setup_tcon(struct a10fb_softc *sc, const struct videomode *mode) { u_int interlace, hspw, hbp, vspw, vbp, vbl, width, height, start_delay; u_int vtotal, framerate, clk; clk_t clk_ahb; hwreset_t rst; uint32_t val; int error; interlace = !!(mode->flags & VID_INTERLACE); width = mode->hdisplay; height = mode->vdisplay; hspw = mode->hsync_end - mode->hsync_start; hbp = mode->htotal - mode->hsync_start; vspw = mode->vsync_end - mode->vsync_start; vbp = mode->vtotal - mode->vsync_start; vbl = VBLANK_LEN(mode->vtotal, mode->vdisplay, interlace); start_delay = START_DELAY(vbl); /* Leave reset */ error = hwreset_get_by_ofw_name(sc->dev, 0, "lcd", &rst); if (error != 0) { device_printf(sc->dev, "cannot find reset 'lcd'\n"); return (error); } error = hwreset_deassert(rst); if (error != 0) { device_printf(sc->dev, "couldn't de-assert reset 'lcd'\n"); return (error); } /* Gating AHB clock for LCD */ error = clk_get_by_ofw_name(sc->dev, 0, "ahb_lcd", &clk_ahb); if (error != 0) { device_printf(sc->dev, "cannot find clk 'ahb_lcd'\n"); return (error); } error = clk_enable(clk_ahb); if (error != 0) { device_printf(sc->dev, "cannot enable clk 'ahb_lcd'\n"); return (error); } /* Disable TCON and TCON1 */ TCON_WRITE(sc, TCON_GCTL, 0); TCON_WRITE(sc, TCON1_CTL, 0); /* Enable clocks */ TCON_WRITE(sc, TCON0_DCLK, DCLK_EN); /* Disable IO and data output ports */ TCON_WRITE(sc, TCON1_IO_TRI, IO_TRI_MASK); /* Disable TCON and select TCON1 */ TCON_WRITE(sc, TCON_GCTL, GCTL_IO_MAP_SEL_TCON1); /* Source width and height */ TCON_WRITE(sc, TCON1_BASIC0, BASIC_X(width) | BASIC_Y(height)); /* Scaler width and height */ TCON_WRITE(sc, TCON1_BASIC1, BASIC_X(width) | BASIC_Y(height)); /* Output width and height */ TCON_WRITE(sc, TCON1_BASIC2, BASIC_X(width) | BASIC_Y(height)); /* Horizontal total and back porch */ TCON_WRITE(sc, TCON1_BASIC3, BASIC3_HT(mode->htotal) | BASIC3_HBP(hbp)); /* Vertical total and back porch */ vtotal = VTOTAL(mode->vtotal); if (interlace) { framerate = DIVIDE(DIVIDE(DOT_CLOCK_TO_HZ(mode->dot_clock), mode->htotal), mode->vtotal); clk = mode->htotal * (VTOTAL(mode->vtotal) + 1) * framerate; if ((clk / 2) == DOT_CLOCK_TO_HZ(mode->dot_clock)) vtotal += 1; } TCON_WRITE(sc, TCON1_BASIC4, BASIC4_VT(vtotal) | BASIC4_VBP(vbp)); /* Horizontal and vertical sync */ TCON_WRITE(sc, TCON1_BASIC5, BASIC5_HSPW(hspw) | BASIC5_VSPW(vspw)); /* Polarity */ val = IO_POL_IO2_INV; if (mode->flags & VID_PHSYNC) val |= IO_POL_PHSYNC; if (mode->flags & VID_PVSYNC) val |= IO_POL_PVSYNC; TCON_WRITE(sc, TCON1_IO_POL, val); /* Set scan line for TCON1 line trigger */ TCON_WRITE(sc, TCON_GINT1, GINT1_TCON1_LINENO(start_delay)); /* Enable TCON1 */ val = TCON1_EN; if (interlace) val |= INTERLACE_EN; val |= TCON1_START_DELAY(start_delay); val |= TCON1_SRC_SEL(TCON1_SRC_CH1); TCON_WRITE(sc, TCON1_CTL, val); /* Setup PLL */ return (a10fb_setup_pll(sc, DOT_CLOCK_TO_HZ(mode->dot_clock))); } static void a10fb_enable_tcon(struct a10fb_softc *sc, int onoff) { uint32_t val; /* Enable TCON */ val = TCON_READ(sc, TCON_GCTL); if (onoff) val |= GCTL_TCON_EN; else val &= ~GCTL_TCON_EN; TCON_WRITE(sc, TCON_GCTL, val); /* Enable TCON1 IO0/IO1 outputs */ val = TCON_READ(sc, TCON1_IO_TRI); if (onoff) val &= ~(IO0_OUTPUT_TRI_EN | IO1_OUTPUT_TRI_EN); else val |= (IO0_OUTPUT_TRI_EN | IO1_OUTPUT_TRI_EN); TCON_WRITE(sc, TCON1_IO_TRI, val); } static int a10fb_configure(struct a10fb_softc *sc, const struct videomode *mode) { size_t fbsize; int error; fbsize = round_page(mode->hdisplay * mode->vdisplay * (FB_BPP / NBBY)); /* Detach the old FB device */ if (sc->fbdev != NULL) { device_delete_child(sc->dev, sc->fbdev); sc->fbdev = NULL; } /* If the FB size has changed, free the old FB memory */ if (sc->fbsize > 0 && sc->fbsize != fbsize) { a10fb_freefb(sc); sc->vaddr = 0; } /* Allocate the FB if necessary */ sc->fbsize = fbsize; if (sc->vaddr == 0) { error = a10fb_allocfb(sc); if (error != 0) { device_printf(sc->dev, "failed to allocate FB memory\n"); return (ENXIO); } } /* Setup display backend */ error = a10fb_setup_debe(sc, mode); if (error != 0) return (error); /* Setup display timing controller */ error = a10fb_setup_tcon(sc, mode); if (error != 0) return (error); /* Attach framebuffer device */ sc->info.fb_name = device_get_nameunit(sc->dev); sc->info.fb_vbase = (intptr_t)sc->vaddr; sc->info.fb_pbase = sc->paddr; sc->info.fb_size = sc->fbsize; sc->info.fb_bpp = sc->info.fb_depth = FB_BPP; sc->info.fb_stride = mode->hdisplay * (FB_BPP / NBBY); sc->info.fb_width = mode->hdisplay; sc->info.fb_height = mode->vdisplay; sc->fbdev = device_add_child(sc->dev, "fbd", device_get_unit(sc->dev)); if (sc->fbdev == NULL) { device_printf(sc->dev, "failed to add fbd child\n"); return (ENOENT); } error = device_probe_and_attach(sc->fbdev); if (error != 0) { device_printf(sc->dev, "failed to attach fbd device\n"); return (error); } return (0); } static void a10fb_hdmi_event(void *arg, device_t hdmi_dev) { const struct videomode *mode; struct videomode hdmi_mode; struct a10fb_softc *sc; struct edid_info ei; uint8_t *edid; uint32_t edid_len; int error; sc = arg; edid = NULL; edid_len = 0; mode = NULL; error = HDMI_GET_EDID(hdmi_dev, &edid, &edid_len); if (error != 0) { device_printf(sc->dev, "failed to get EDID: %d\n", error); } else { error = edid_parse(edid, &ei); if (error != 0) { device_printf(sc->dev, "failed to parse EDID: %d\n", error); } else { if (bootverbose) edid_print(&ei); mode = ei.edid_preferred_mode; } } /* If the preferred mode could not be determined, use the default */ if (mode == NULL) mode = pick_mode_by_ref(FB_DEFAULT_W, FB_DEFAULT_H, FB_DEFAULT_REF); if (mode == NULL) { device_printf(sc->dev, "failed to find usable video mode\n"); return; } if (bootverbose) device_printf(sc->dev, "using %dx%d\n", mode->hdisplay, mode->vdisplay); /* Disable HDMI */ HDMI_ENABLE(hdmi_dev, 0); /* Disable timing controller */ a10fb_enable_tcon(sc, 0); /* Configure DEBE and TCON */ error = a10fb_configure(sc, mode); if (error != 0) { device_printf(sc->dev, "failed to configure FB: %d\n", error); return; } hdmi_mode = *mode; hdmi_mode.hskew = mode->hsync_end - mode->hsync_start; hdmi_mode.flags |= VID_HSKEW; HDMI_SET_VIDEOMODE(hdmi_dev, &hdmi_mode); /* Enable timing controller */ a10fb_enable_tcon(sc, 1); DELAY(HDMI_ENABLE_DELAY); /* Enable HDMI */ HDMI_ENABLE(hdmi_dev, 1); } static int a10fb_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "allwinner,sun7i-a20-fb")) return (ENXIO); device_set_desc(dev, "Allwinner Framebuffer"); return (BUS_PROBE_DEFAULT); } static int a10fb_attach(device_t dev) { struct a10fb_softc *sc; sc = device_get_softc(dev); sc->dev = dev; if (bus_alloc_resources(dev, a10fb_spec, sc->res)) { device_printf(dev, "cannot allocate resources for device\n"); return (ENXIO); } sc->hdmi_evh = EVENTHANDLER_REGISTER(hdmi_event, a10fb_hdmi_event, sc, 0); return (0); } static struct fb_info * a10fb_fb_getinfo(device_t dev) { struct a10fb_softc *sc; sc = device_get_softc(dev); return (&sc->info); } static device_method_t a10fb_methods[] = { /* Device interface */ DEVMETHOD(device_probe, a10fb_probe), DEVMETHOD(device_attach, a10fb_attach), /* FB interface */ DEVMETHOD(fb_getinfo, a10fb_fb_getinfo), DEVMETHOD_END }; static driver_t a10fb_driver = { "fb", a10fb_methods, sizeof(struct a10fb_softc), }; static devclass_t a10fb_devclass; DRIVER_MODULE(fb, simplebus, a10fb_driver, a10fb_devclass, 0, 0); Index: user/ngie/bug-237403/sys/arm/allwinner/a10_hdmi.c =================================================================== --- user/ngie/bug-237403/sys/arm/allwinner/a10_hdmi.c (revision 348028) +++ user/ngie/bug-237403/sys/arm/allwinner/a10_hdmi.c (revision 348029) @@ -1,725 +1,726 @@ /*- * Copyright (c) 2016 Jared McNeill * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Allwinner A10/A20 HDMI TX */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include "hdmi_if.h" #define HDMI_CTRL 0x004 #define CTRL_MODULE_EN (1 << 31) #define HDMI_INT_STATUS 0x008 #define HDMI_HPD 0x00c #define HPD_DET (1 << 0) #define HDMI_VID_CTRL 0x010 #define VID_CTRL_VIDEO_EN (1 << 31) #define VID_CTRL_HDMI_MODE (1 << 30) #define VID_CTRL_INTERLACE (1 << 4) #define VID_CTRL_REPEATER_2X (1 << 0) #define HDMI_VID_TIMING0 0x014 #define VID_ACT_V(v) (((v) - 1) << 16) #define VID_ACT_H(h) (((h) - 1) << 0) #define HDMI_VID_TIMING1 0x018 #define VID_VBP(vbp) (((vbp) - 1) << 16) #define VID_HBP(hbp) (((hbp) - 1) << 0) #define HDMI_VID_TIMING2 0x01c #define VID_VFP(vfp) (((vfp) - 1) << 16) #define VID_HFP(hfp) (((hfp) - 1) << 0) #define HDMI_VID_TIMING3 0x020 #define VID_VSPW(vspw) (((vspw) - 1) << 16) #define VID_HSPW(hspw) (((hspw) - 1) << 0) #define HDMI_VID_TIMING4 0x024 #define TX_CLOCK_NORMAL 0x03e00000 #define VID_VSYNC_ACTSEL (1 << 1) #define VID_HSYNC_ACTSEL (1 << 0) #define HDMI_AUD_CTRL 0x040 #define AUD_CTRL_EN (1 << 31) #define AUD_CTRL_RST (1 << 30) #define HDMI_ADMA_CTRL 0x044 #define HDMI_ADMA_MODE (1 << 31) #define HDMI_ADMA_MODE_DDMA (0 << 31) #define HDMI_ADMA_MODE_NDMA (1 << 31) #define HDMI_AUD_FMT 0x048 #define AUD_FMT_CH(n) ((n) - 1) #define HDMI_PCM_CTRL 0x04c #define HDMI_AUD_CTS 0x050 #define HDMI_AUD_N 0x054 #define HDMI_AUD_CH_STATUS0 0x058 #define CH_STATUS0_FS_FREQ (0xf << 24) #define CH_STATUS0_FS_FREQ_48 (2 << 24) #define HDMI_AUD_CH_STATUS1 0x05c #define CH_STATUS1_WORD_LEN (0x7 << 1) #define CH_STATUS1_WORD_LEN_16 (1 << 1) #define HDMI_AUDIO_RESET_RETRY 1000 #define HDMI_AUDIO_CHANNELS 2 #define HDMI_AUDIO_CHANNELMAP 0x76543210 #define HDMI_AUDIO_N 6144 /* 48 kHz */ #define HDMI_AUDIO_CTS(r, n) ((((r) * 10) * ((n) / 128)) / 480) #define HDMI_PADCTRL0 0x200 #define PADCTRL0_BIASEN (1 << 31) #define PADCTRL0_LDOCEN (1 << 30) #define PADCTRL0_LDODEN (1 << 29) #define PADCTRL0_PWENC (1 << 28) #define PADCTRL0_PWEND (1 << 27) #define PADCTRL0_PWENG (1 << 26) #define PADCTRL0_CKEN (1 << 25) #define PADCTRL0_SEN (1 << 24) #define PADCTRL0_TXEN (1 << 23) #define HDMI_PADCTRL1 0x204 #define PADCTRL1_AMP_OPT (1 << 23) #define PADCTRL1_AMPCK_OPT (1 << 22) #define PADCTRL1_DMP_OPT (1 << 21) #define PADCTRL1_EMP_OPT (1 << 20) #define PADCTRL1_EMPCK_OPT (1 << 19) #define PADCTRL1_PWSCK (1 << 18) #define PADCTRL1_PWSDT (1 << 17) #define PADCTRL1_REG_CSMPS (1 << 16) #define PADCTRL1_REG_DEN (1 << 15) #define PADCTRL1_REG_DENCK (1 << 14) #define PADCTRL1_REG_PLRCK (1 << 13) #define PADCTRL1_REG_EMP (0x7 << 10) #define PADCTRL1_REG_EMP_EN (0x2 << 10) #define PADCTRL1_REG_CD (0x3 << 8) #define PADCTRL1_REG_CKSS (0x3 << 6) #define PADCTRL1_REG_CKSS_1X (0x1 << 6) #define PADCTRL1_REG_CKSS_2X (0x0 << 6) #define PADCTRL1_REG_AMP (0x7 << 3) #define PADCTRL1_REG_AMP_EN (0x6 << 3) #define PADCTRL1_REG_PLR (0x7 << 0) #define HDMI_PLLCTRL0 0x208 #define PLLCTRL0_PLL_EN (1 << 31) #define PLLCTRL0_BWS (1 << 30) #define PLLCTRL0_HV_IS_33 (1 << 29) #define PLLCTRL0_LDO1_EN (1 << 28) #define PLLCTRL0_LDO2_EN (1 << 27) #define PLLCTRL0_SDIV2 (1 << 25) #define PLLCTRL0_VCO_GAIN (0x1 << 22) #define PLLCTRL0_S (0x7 << 17) #define PLLCTRL0_CP_S (0xf << 12) #define PLLCTRL0_CS (0x7 << 8) #define PLLCTRL0_PREDIV(x) ((x) << 4) #define PLLCTRL0_VCO_S (0x8 << 0) #define HDMI_PLLDBG0 0x20c #define PLLDBG0_CKIN_SEL (1 << 21) #define PLLDBG0_CKIN_SEL_PLL3 (0 << 21) #define PLLDBG0_CKIN_SEL_PLL7 (1 << 21) #define HDMI_PKTCTRL0 0x2f0 #define HDMI_PKTCTRL1 0x2f4 #define PKTCTRL_PACKET(n,t) ((t) << ((n) << 2)) #define PKT_NULL 0 #define PKT_GC 1 #define PKT_AVI 2 #define PKT_AI 3 #define PKT_SPD 5 #define PKT_END 15 #define DDC_CTRL 0x500 #define CTRL_DDC_EN (1 << 31) #define CTRL_DDC_ACMD_START (1 << 30) #define CTRL_DDC_FIFO_DIR (1 << 8) #define CTRL_DDC_FIFO_DIR_READ (0 << 8) #define CTRL_DDC_FIFO_DIR_WRITE (1 << 8) #define CTRL_DDC_SWRST (1 << 0) #define DDC_SLAVE_ADDR 0x504 #define SLAVE_ADDR_SEG_SHIFT 24 #define SLAVE_ADDR_EDDC_SHIFT 16 #define SLAVE_ADDR_OFFSET_SHIFT 8 #define SLAVE_ADDR_SHIFT 0 #define DDC_INT_STATUS 0x50c #define INT_STATUS_XFER_DONE (1 << 0) #define DDC_FIFO_CTRL 0x510 #define FIFO_CTRL_CLEAR (1 << 31) #define DDC_BYTE_COUNTER 0x51c #define DDC_COMMAND 0x520 #define COMMAND_EOREAD (4 << 0) #define DDC_CLOCK 0x528 #define DDC_CLOCK_M (1 << 3) #define DDC_CLOCK_N (5 << 0) #define DDC_FIFO 0x518 #define SWRST_DELAY 1000 #define DDC_DELAY 1000 #define DDC_RETRY 1000 #define DDC_BLKLEN 16 #define DDC_ADDR 0x50 #define EDDC_ADDR 0x60 #define EDID_LENGTH 128 #define DDC_CTRL_LINE 0x540 #define DDC_LINE_SCL_ENABLE (1 << 8) #define DDC_LINE_SDA_ENABLE (1 << 9) #define HDMI_ENABLE_DELAY 50000 #define DDC_READ_RETRY 4 #define EXT_TAG 0x00 #define CEA_TAG_ID 0x02 #define CEA_DTD 0x03 #define DTD_BASIC_AUDIO (1 << 6) #define CEA_REV 0x02 #define CEA_DATA_OFF 0x03 #define CEA_DATA_START 4 #define BLOCK_TAG(x) (((x) >> 5) & 0x7) #define BLOCK_TAG_VSDB 3 #define BLOCK_LEN(x) ((x) & 0x1f) #define HDMI_VSDB_MINLEN 5 #define HDMI_OUI "\x03\x0c\x00" #define HDMI_OUI_LEN 3 #define HDMI_DEFAULT_FREQ 297000000 struct a10hdmi_softc { struct resource *res; struct intr_config_hook mode_hook; uint8_t edid[EDID_LENGTH]; int has_hdmi; int has_audio; clk_t clk_ahb; clk_t clk_hdmi; clk_t clk_lcd; }; static struct resource_spec a10hdmi_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { -1, 0 } }; #define HDMI_READ(sc, reg) bus_read_4((sc)->res, (reg)) #define HDMI_WRITE(sc, reg, val) bus_write_4((sc)->res, (reg), (val)) static void a10hdmi_init(struct a10hdmi_softc *sc) { /* Enable the HDMI module */ HDMI_WRITE(sc, HDMI_CTRL, CTRL_MODULE_EN); /* Configure PLL/DRV settings */ HDMI_WRITE(sc, HDMI_PADCTRL0, PADCTRL0_BIASEN | PADCTRL0_LDOCEN | PADCTRL0_LDODEN | PADCTRL0_PWENC | PADCTRL0_PWEND | PADCTRL0_PWENG | PADCTRL0_CKEN | PADCTRL0_TXEN); HDMI_WRITE(sc, HDMI_PADCTRL1, PADCTRL1_AMP_OPT | PADCTRL1_AMPCK_OPT | PADCTRL1_EMP_OPT | PADCTRL1_EMPCK_OPT | PADCTRL1_REG_DEN | PADCTRL1_REG_DENCK | PADCTRL1_REG_EMP_EN | PADCTRL1_REG_AMP_EN); /* Select PLL3 as input clock */ HDMI_WRITE(sc, HDMI_PLLDBG0, PLLDBG0_CKIN_SEL_PLL3); DELAY(HDMI_ENABLE_DELAY); } static void a10hdmi_hpd(void *arg) { struct a10hdmi_softc *sc; device_t dev; uint32_t hpd; dev = arg; sc = device_get_softc(dev); hpd = HDMI_READ(sc, HDMI_HPD); if ((hpd & HPD_DET) == HPD_DET) EVENTHANDLER_INVOKE(hdmi_event, dev, HDMI_EVENT_CONNECTED); config_intrhook_disestablish(&sc->mode_hook); } static int a10hdmi_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "allwinner,sun7i-a20-hdmi")) return (ENXIO); device_set_desc(dev, "Allwinner HDMI TX"); return (BUS_PROBE_DEFAULT); } static int a10hdmi_attach(device_t dev) { struct a10hdmi_softc *sc; int error; sc = device_get_softc(dev); if (bus_alloc_resources(dev, a10hdmi_spec, &sc->res)) { device_printf(dev, "cannot allocate resources for device\n"); return (ENXIO); } /* Setup clocks */ error = clk_get_by_ofw_name(dev, 0, "ahb", &sc->clk_ahb); if (error != 0) { device_printf(dev, "cannot find ahb clock\n"); return (error); } error = clk_get_by_ofw_name(dev, 0, "hdmi", &sc->clk_hdmi); if (error != 0) { device_printf(dev, "cannot find hdmi clock\n"); return (error); } error = clk_get_by_ofw_name(dev, 0, "lcd", &sc->clk_lcd); if (error != 0) { device_printf(dev, "cannot find lcd clock\n"); } /* Enable HDMI clock */ error = clk_enable(sc->clk_hdmi); if (error != 0) { device_printf(dev, "cannot enable hdmi clock\n"); return (error); } /* Gating AHB clock for HDMI */ error = clk_enable(sc->clk_ahb); if (error != 0) { device_printf(dev, "cannot enable ahb gate\n"); return (error); } a10hdmi_init(sc); sc->mode_hook.ich_func = a10hdmi_hpd; sc->mode_hook.ich_arg = dev; error = config_intrhook_establish(&sc->mode_hook); if (error != 0) return (error); return (0); } static int a10hdmi_ddc_xfer(struct a10hdmi_softc *sc, uint16_t addr, uint8_t seg, uint8_t off, int len) { uint32_t val; int retry; /* Set FIFO direction to read */ val = HDMI_READ(sc, DDC_CTRL); val &= ~CTRL_DDC_FIFO_DIR; val |= CTRL_DDC_FIFO_DIR_READ; HDMI_WRITE(sc, DDC_CTRL, val); /* Setup DDC slave address */ val = (addr << SLAVE_ADDR_SHIFT) | (seg << SLAVE_ADDR_SEG_SHIFT) | (EDDC_ADDR << SLAVE_ADDR_EDDC_SHIFT) | (off << SLAVE_ADDR_OFFSET_SHIFT); HDMI_WRITE(sc, DDC_SLAVE_ADDR, val); /* Clear FIFO */ val = HDMI_READ(sc, DDC_FIFO_CTRL); val |= FIFO_CTRL_CLEAR; HDMI_WRITE(sc, DDC_FIFO_CTRL, val); /* Set transfer length */ HDMI_WRITE(sc, DDC_BYTE_COUNTER, len); /* Set command to "Explicit Offset Address Read" */ HDMI_WRITE(sc, DDC_COMMAND, COMMAND_EOREAD); /* Start transfer */ val = HDMI_READ(sc, DDC_CTRL); val |= CTRL_DDC_ACMD_START; HDMI_WRITE(sc, DDC_CTRL, val); /* Wait for command to start */ retry = DDC_RETRY; while (--retry > 0) { val = HDMI_READ(sc, DDC_CTRL); if ((val & CTRL_DDC_ACMD_START) == 0) break; DELAY(DDC_DELAY); } if (retry == 0) return (ETIMEDOUT); /* Ensure that the transfer completed */ val = HDMI_READ(sc, DDC_INT_STATUS); if ((val & INT_STATUS_XFER_DONE) == 0) return (EIO); return (0); } static int a10hdmi_ddc_read(struct a10hdmi_softc *sc, int block, uint8_t *edid) { int resid, off, len, error; uint8_t *pbuf; pbuf = edid; resid = EDID_LENGTH; off = (block & 1) ? EDID_LENGTH : 0; while (resid > 0) { len = min(resid, DDC_BLKLEN); error = a10hdmi_ddc_xfer(sc, DDC_ADDR, block >> 1, off, len); if (error != 0) return (error); bus_read_multi_1(sc->res, DDC_FIFO, pbuf, len); pbuf += len; off += len; resid -= len; } return (0); } static int a10hdmi_detect_hdmi_vsdb(uint8_t *edid) { int off, p, btag, blen; if (edid[EXT_TAG] != CEA_TAG_ID) return (0); off = edid[CEA_DATA_OFF]; /* CEA data block collection starts at byte 4 */ if (off <= CEA_DATA_START) return (0); /* Parse the CEA data blocks */ for (p = CEA_DATA_START; p < off;) { btag = BLOCK_TAG(edid[p]); blen = BLOCK_LEN(edid[p]); /* Make sure the length is sane */ if (p + blen + 1 > off) break; /* Look for a VSDB with the HDMI 24-bit IEEE registration ID */ if (btag == BLOCK_TAG_VSDB && blen >= HDMI_VSDB_MINLEN && memcmp(&edid[p + 1], HDMI_OUI, HDMI_OUI_LEN) == 0) return (1); /* Next data block */ p += (1 + blen); } return (0); } static void a10hdmi_detect_hdmi(struct a10hdmi_softc *sc, int *phdmi, int *paudio) { struct edid_info ei; uint8_t edid[EDID_LENGTH]; int block; *phdmi = *paudio = 0; if (edid_parse(sc->edid, &ei) != 0) return; /* Scan through extension blocks, looking for a CEA-861 block. */ for (block = 1; block <= ei.edid_ext_block_count; block++) { if (a10hdmi_ddc_read(sc, block, edid) != 0) return; if (a10hdmi_detect_hdmi_vsdb(edid) != 0) { *phdmi = 1; *paudio = ((edid[CEA_DTD] & DTD_BASIC_AUDIO) != 0); return; } } } static int a10hdmi_get_edid(device_t dev, uint8_t **edid, uint32_t *edid_len) { struct a10hdmi_softc *sc; int error, retry; sc = device_get_softc(dev); retry = DDC_READ_RETRY; while (--retry > 0) { /* I2C software reset */ HDMI_WRITE(sc, DDC_FIFO_CTRL, 0); HDMI_WRITE(sc, DDC_CTRL, CTRL_DDC_EN | CTRL_DDC_SWRST); DELAY(SWRST_DELAY); if (HDMI_READ(sc, DDC_CTRL) & CTRL_DDC_SWRST) { device_printf(dev, "DDC software reset failed\n"); return (ENXIO); } /* Configure DDC clock */ HDMI_WRITE(sc, DDC_CLOCK, DDC_CLOCK_M | DDC_CLOCK_N); /* Enable SDA/SCL */ HDMI_WRITE(sc, DDC_CTRL_LINE, DDC_LINE_SCL_ENABLE | DDC_LINE_SDA_ENABLE); /* Read EDID block */ error = a10hdmi_ddc_read(sc, 0, sc->edid); if (error == 0) { *edid = sc->edid; *edid_len = sizeof(sc->edid); break; } } if (error == 0) a10hdmi_detect_hdmi(sc, &sc->has_hdmi, &sc->has_audio); else sc->has_hdmi = sc->has_audio = 0; return (error); } static void a10hdmi_set_audiomode(device_t dev, const struct videomode *mode) { struct a10hdmi_softc *sc; uint32_t val; int retry; sc = device_get_softc(dev); /* Disable and reset audio module and wait for reset bit to clear */ HDMI_WRITE(sc, HDMI_AUD_CTRL, AUD_CTRL_RST); for (retry = HDMI_AUDIO_RESET_RETRY; retry > 0; retry--) { val = HDMI_READ(sc, HDMI_AUD_CTRL); if ((val & AUD_CTRL_RST) == 0) break; } if (retry == 0) { device_printf(dev, "timeout waiting for audio module\n"); return; } if (!sc->has_audio) return; /* DMA and FIFO control */ HDMI_WRITE(sc, HDMI_ADMA_CTRL, HDMI_ADMA_MODE_DDMA); /* Audio format control (LPCM, S16LE, stereo) */ HDMI_WRITE(sc, HDMI_AUD_FMT, AUD_FMT_CH(HDMI_AUDIO_CHANNELS)); /* Channel mappings */ HDMI_WRITE(sc, HDMI_PCM_CTRL, HDMI_AUDIO_CHANNELMAP); /* Clocks */ HDMI_WRITE(sc, HDMI_AUD_CTS, HDMI_AUDIO_CTS(mode->dot_clock, HDMI_AUDIO_N)); HDMI_WRITE(sc, HDMI_AUD_N, HDMI_AUDIO_N); /* Set sampling frequency to 48 kHz, word length to 16-bit */ HDMI_WRITE(sc, HDMI_AUD_CH_STATUS0, CH_STATUS0_FS_FREQ_48); HDMI_WRITE(sc, HDMI_AUD_CH_STATUS1, CH_STATUS1_WORD_LEN_16); /* Enable */ HDMI_WRITE(sc, HDMI_AUD_CTRL, AUD_CTRL_EN); } static int a10hdmi_get_tcon_config(struct a10hdmi_softc *sc, int *div, int *dbl) { uint64_t lcd_fin, lcd_fout; clk_t clk_lcd_parent; const char *pname; int error; error = clk_get_parent(sc->clk_lcd, &clk_lcd_parent); if (error != 0) return (error); /* Get the LCD CH1 special clock 2 divider */ error = clk_get_freq(sc->clk_lcd, &lcd_fout); if (error != 0) return (error); error = clk_get_freq(clk_lcd_parent, &lcd_fin); if (error != 0) return (error); *div = lcd_fin / lcd_fout; /* Detect LCD CH1 special clock using a 1X or 2X source */ /* XXX */ pname = clk_get_name(clk_lcd_parent); if (strcmp(pname, "pll3") == 0 || strcmp(pname, "pll7") == 0) *dbl = 0; else *dbl = 1; return (0); } static int a10hdmi_set_videomode(device_t dev, const struct videomode *mode) { struct a10hdmi_softc *sc; int error, clk_div, clk_dbl; int dblscan, hfp, hspw, hbp, vfp, vspw, vbp; uint32_t val; sc = device_get_softc(dev); dblscan = !!(mode->flags & VID_DBLSCAN); hfp = mode->hsync_start - mode->hdisplay; hspw = mode->hsync_end - mode->hsync_start; hbp = mode->htotal - mode->hsync_start; vfp = mode->vsync_start - mode->vdisplay; vspw = mode->vsync_end - mode->vsync_start; vbp = mode->vtotal - mode->vsync_start; error = a10hdmi_get_tcon_config(sc, &clk_div, &clk_dbl); if (error != 0) { device_printf(dev, "couldn't get tcon config: %d\n", error); return (error); } /* Clear interrupt status */ HDMI_WRITE(sc, HDMI_INT_STATUS, HDMI_READ(sc, HDMI_INT_STATUS)); /* Clock setup */ val = HDMI_READ(sc, HDMI_PADCTRL1); val &= ~PADCTRL1_REG_CKSS; val |= (clk_dbl ? PADCTRL1_REG_CKSS_2X : PADCTRL1_REG_CKSS_1X); HDMI_WRITE(sc, HDMI_PADCTRL1, val); HDMI_WRITE(sc, HDMI_PLLCTRL0, PLLCTRL0_PLL_EN | PLLCTRL0_BWS | PLLCTRL0_HV_IS_33 | PLLCTRL0_LDO1_EN | PLLCTRL0_LDO2_EN | PLLCTRL0_SDIV2 | PLLCTRL0_VCO_GAIN | PLLCTRL0_S | PLLCTRL0_CP_S | PLLCTRL0_CS | PLLCTRL0_PREDIV(clk_div) | PLLCTRL0_VCO_S); /* Setup display settings */ if (bootverbose) device_printf(dev, "HDMI: %s, Audio: %s\n", sc->has_hdmi ? "yes" : "no", sc->has_audio ? "yes" : "no"); val = 0; if (sc->has_hdmi) val |= VID_CTRL_HDMI_MODE; if (mode->flags & VID_INTERLACE) val |= VID_CTRL_INTERLACE; if (mode->flags & VID_DBLSCAN) val |= VID_CTRL_REPEATER_2X; HDMI_WRITE(sc, HDMI_VID_CTRL, val); /* Setup display timings */ HDMI_WRITE(sc, HDMI_VID_TIMING0, VID_ACT_V(mode->vdisplay) | VID_ACT_H(mode->hdisplay << dblscan)); HDMI_WRITE(sc, HDMI_VID_TIMING1, VID_VBP(vbp) | VID_HBP(hbp << dblscan)); HDMI_WRITE(sc, HDMI_VID_TIMING2, VID_VFP(vfp) | VID_HFP(hfp << dblscan)); HDMI_WRITE(sc, HDMI_VID_TIMING3, VID_VSPW(vspw) | VID_HSPW(hspw << dblscan)); val = TX_CLOCK_NORMAL; if (mode->flags & VID_PVSYNC) val |= VID_VSYNC_ACTSEL; if (mode->flags & VID_PHSYNC) val |= VID_HSYNC_ACTSEL; HDMI_WRITE(sc, HDMI_VID_TIMING4, val); /* This is an ordered list of infoframe packets that the HDMI * transmitter will send. Transmit packets in the following order: * 1. General control packet * 2. AVI infoframe * 3. Audio infoframe * There are 2 registers with 4 slots each. The list is terminated * with the special PKT_END marker. */ HDMI_WRITE(sc, HDMI_PKTCTRL0, PKTCTRL_PACKET(0, PKT_GC) | PKTCTRL_PACKET(1, PKT_AVI) | PKTCTRL_PACKET(2, PKT_AI) | PKTCTRL_PACKET(3, PKT_END)); HDMI_WRITE(sc, HDMI_PKTCTRL1, 0); /* Setup audio */ a10hdmi_set_audiomode(dev, mode); return (0); } static int a10hdmi_enable(device_t dev, int onoff) { struct a10hdmi_softc *sc; uint32_t val; sc = device_get_softc(dev); /* Enable or disable video output */ val = HDMI_READ(sc, HDMI_VID_CTRL); if (onoff) val |= VID_CTRL_VIDEO_EN; else val &= ~VID_CTRL_VIDEO_EN; HDMI_WRITE(sc, HDMI_VID_CTRL, val); return (0); } static device_method_t a10hdmi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, a10hdmi_probe), DEVMETHOD(device_attach, a10hdmi_attach), /* HDMI interface */ DEVMETHOD(hdmi_get_edid, a10hdmi_get_edid), DEVMETHOD(hdmi_set_videomode, a10hdmi_set_videomode), DEVMETHOD(hdmi_enable, a10hdmi_enable), DEVMETHOD_END }; static driver_t a10hdmi_driver = { "a10hdmi", a10hdmi_methods, sizeof(struct a10hdmi_softc), }; static devclass_t a10hdmi_devclass; DRIVER_MODULE(a10hdmi, simplebus, a10hdmi_driver, a10hdmi_devclass, 0, 0); MODULE_VERSION(a10hdmi, 1); Index: user/ngie/bug-237403/sys/arm/amlogic/aml8726/aml8726_wdt.c =================================================================== --- user/ngie/bug-237403/sys/arm/amlogic/aml8726/aml8726_wdt.c (revision 348028) +++ user/ngie/bug-237403/sys/arm/amlogic/aml8726/aml8726_wdt.c (revision 348029) @@ -1,306 +1,307 @@ /*- * Copyright 2013-2015 John Wehle * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* * Amlogic aml8726 watchdog driver. */ #include __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include #include #include #include #include #include #include struct aml8726_wdt_softc { device_t dev; struct resource * res[2]; struct mtx mtx; void * ih_cookie; }; static struct resource_spec aml8726_wdt_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE }, { -1, 0 } }; static struct { uint32_t ctrl_cpu_mask; uint32_t ctrl_en; uint32_t term_cnt_mask; uint32_t reset_cnt_mask; } aml8726_wdt_soc_params; /* * devclass_get_device / device_get_softc could be used * to dynamically locate this, however the wdt is a * required device which can't be unloaded so there's * no need for the overhead. */ static struct aml8726_wdt_softc *aml8726_wdt_sc = NULL; #define AML_WDT_LOCK(sc) mtx_lock_spin(&(sc)->mtx) #define AML_WDT_UNLOCK(sc) mtx_unlock_spin(&(sc)->mtx) #define AML_WDT_LOCK_INIT(sc) \ mtx_init(&(sc)->mtx, device_get_nameunit((sc)->dev), \ "wdt", MTX_SPIN) #define AML_WDT_LOCK_DESTROY(sc) mtx_destroy(&(sc)->mtx); #define AML_WDT_CTRL_REG 0 #define AML_WDT_CTRL_CPU_WDRESET_MASK aml8726_wdt_soc_params.ctrl_cpu_mask #define AML_WDT_CTRL_CPU_WDRESET_SHIFT 24 #define AML_WDT_CTRL_IRQ_EN (1 << 23) #define AML_WDT_CTRL_EN aml8726_wdt_soc_params.ctrl_en #define AML_WDT_CTRL_TERMINAL_CNT_MASK aml8726_wdt_soc_params.term_cnt_mask #define AML_WDT_CTRL_TERMINAL_CNT_SHIFT 0 #define AML_WDT_RESET_REG 4 #define AML_WDT_RESET_CNT_MASK aml8726_wdt_soc_params.reset_cnt_mask #define AML_WDT_RESET_CNT_SHIFT 0 #define CSR_WRITE_4(sc, reg, val) bus_write_4((sc)->res[0], reg, (val)) #define CSR_READ_4(sc, reg) bus_read_4((sc)->res[0], reg) #define CSR_BARRIER(sc, reg) bus_barrier((sc)->res[0], reg, 4, \ (BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE)) static void aml8726_wdt_watchdog(void *private, u_int cmd, int *error) { struct aml8726_wdt_softc *sc = (struct aml8726_wdt_softc *)private; uint32_t wcr; uint64_t tens_of_usec; AML_WDT_LOCK(sc); tens_of_usec = (((uint64_t)1 << (cmd & WD_INTERVAL)) + 9999) / 10000; if (cmd != 0 && tens_of_usec <= (AML_WDT_CTRL_TERMINAL_CNT_MASK >> AML_WDT_CTRL_TERMINAL_CNT_SHIFT)) { wcr = AML_WDT_CTRL_CPU_WDRESET_MASK | AML_WDT_CTRL_EN | ((uint32_t)tens_of_usec << AML_WDT_CTRL_TERMINAL_CNT_SHIFT); CSR_WRITE_4(sc, AML_WDT_RESET_REG, 0); CSR_WRITE_4(sc, AML_WDT_CTRL_REG, wcr); *error = 0; } else CSR_WRITE_4(sc, AML_WDT_CTRL_REG, (CSR_READ_4(sc, AML_WDT_CTRL_REG) & ~(AML_WDT_CTRL_IRQ_EN | AML_WDT_CTRL_EN))); AML_WDT_UNLOCK(sc); } static int aml8726_wdt_intr(void *arg) { struct aml8726_wdt_softc *sc = (struct aml8726_wdt_softc *)arg; /* * Normally a timeout causes a hardware reset, however * the watchdog timer can be configured to cause an * interrupt instead by setting AML_WDT_CTRL_IRQ_EN * and clearing AML_WDT_CTRL_CPU_WDRESET_MASK. */ AML_WDT_LOCK(sc); CSR_WRITE_4(sc, AML_WDT_CTRL_REG, (CSR_READ_4(sc, AML_WDT_CTRL_REG) & ~(AML_WDT_CTRL_IRQ_EN | AML_WDT_CTRL_EN))); CSR_BARRIER(sc, AML_WDT_CTRL_REG); AML_WDT_UNLOCK(sc); device_printf(sc->dev, "timeout expired\n"); return (FILTER_HANDLED); } static int aml8726_wdt_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "amlogic,meson6-wdt")) return (ENXIO); device_set_desc(dev, "Amlogic aml8726 WDT"); return (BUS_PROBE_DEFAULT); } static int aml8726_wdt_attach(device_t dev) { struct aml8726_wdt_softc *sc = device_get_softc(dev); /* There should be exactly one instance. */ if (aml8726_wdt_sc != NULL) return (ENXIO); sc->dev = dev; if (bus_alloc_resources(dev, aml8726_wdt_spec, sc->res)) { device_printf(dev, "can not allocate resources for device\n"); return (ENXIO); } /* * Certain bitfields are dependent on the hardware revision. */ switch (aml8726_soc_hw_rev) { case AML_SOC_HW_REV_M8: aml8726_wdt_soc_params.ctrl_cpu_mask = 0xf << AML_WDT_CTRL_CPU_WDRESET_SHIFT; switch (aml8726_soc_metal_rev) { case AML_SOC_M8_METAL_REV_M2_A: aml8726_wdt_soc_params.ctrl_en = 1 << 19; aml8726_wdt_soc_params.term_cnt_mask = 0x07ffff << AML_WDT_CTRL_TERMINAL_CNT_SHIFT; aml8726_wdt_soc_params.reset_cnt_mask = 0x07ffff << AML_WDT_RESET_CNT_SHIFT; break; default: aml8726_wdt_soc_params.ctrl_en = 1 << 22; aml8726_wdt_soc_params.term_cnt_mask = 0x3fffff << AML_WDT_CTRL_TERMINAL_CNT_SHIFT; aml8726_wdt_soc_params.reset_cnt_mask = 0x3fffff << AML_WDT_RESET_CNT_SHIFT; break; } break; case AML_SOC_HW_REV_M8B: aml8726_wdt_soc_params.ctrl_cpu_mask = 0xf << AML_WDT_CTRL_CPU_WDRESET_SHIFT; aml8726_wdt_soc_params.ctrl_en = 1 << 19; aml8726_wdt_soc_params.term_cnt_mask = 0x07ffff << AML_WDT_CTRL_TERMINAL_CNT_SHIFT; aml8726_wdt_soc_params.reset_cnt_mask = 0x07ffff << AML_WDT_RESET_CNT_SHIFT; break; default: aml8726_wdt_soc_params.ctrl_cpu_mask = 3 << AML_WDT_CTRL_CPU_WDRESET_SHIFT; aml8726_wdt_soc_params.ctrl_en = 1 << 22; aml8726_wdt_soc_params.term_cnt_mask = 0x3fffff << AML_WDT_CTRL_TERMINAL_CNT_SHIFT; aml8726_wdt_soc_params.reset_cnt_mask = 0x3fffff << AML_WDT_RESET_CNT_SHIFT; break; } /* * Disable the watchdog. */ CSR_WRITE_4(sc, AML_WDT_CTRL_REG, (CSR_READ_4(sc, AML_WDT_CTRL_REG) & ~(AML_WDT_CTRL_IRQ_EN | AML_WDT_CTRL_EN))); /* * Initialize the mutex prior to installing the interrupt handler * in case of a spurious interrupt. */ AML_WDT_LOCK_INIT(sc); if (bus_setup_intr(dev, sc->res[1], INTR_TYPE_MISC | INTR_MPSAFE, aml8726_wdt_intr, NULL, sc, &sc->ih_cookie)) { device_printf(dev, "could not setup interrupt handler\n"); bus_release_resources(dev, aml8726_wdt_spec, sc->res); AML_WDT_LOCK_DESTROY(sc); return (ENXIO); } aml8726_wdt_sc = sc; EVENTHANDLER_REGISTER(watchdog_list, aml8726_wdt_watchdog, sc, 0); return (0); } static int aml8726_wdt_detach(device_t dev) { return (EBUSY); } static device_method_t aml8726_wdt_methods[] = { /* Device interface */ DEVMETHOD(device_probe, aml8726_wdt_probe), DEVMETHOD(device_attach, aml8726_wdt_attach), DEVMETHOD(device_detach, aml8726_wdt_detach), DEVMETHOD_END }; static driver_t aml8726_wdt_driver = { "wdt", aml8726_wdt_methods, sizeof(struct aml8726_wdt_softc), }; static devclass_t aml8726_wdt_devclass; EARLY_DRIVER_MODULE(wdt, simplebus, aml8726_wdt_driver, aml8726_wdt_devclass, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LATE); void cpu_reset(void) { /* Watchdog has not yet been initialized */ if (aml8726_wdt_sc == NULL) printf("Reset hardware has not yet been initialized.\n"); else { CSR_WRITE_4(aml8726_wdt_sc, AML_WDT_RESET_REG, 0); CSR_WRITE_4(aml8726_wdt_sc, AML_WDT_CTRL_REG, (AML_WDT_CTRL_CPU_WDRESET_MASK | AML_WDT_CTRL_EN | (10 << AML_WDT_CTRL_TERMINAL_CNT_SHIFT))); } while (1); } Index: user/ngie/bug-237403/sys/arm/freescale/imx/imx6_ipu.c =================================================================== --- user/ngie/bug-237403/sys/arm/freescale/imx/imx6_ipu.c (revision 348028) +++ user/ngie/bug-237403/sys/arm/freescale/imx/imx6_ipu.c (revision 348029) @@ -1,1202 +1,1203 @@ /*- * Copyright 2015 Oleksandr Tymoshenko * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "fb_if.h" #include "hdmi_if.h" #define EDID_DEBUG_not static int have_ipu = 0; #define LDB_CLOCK_RATE 280000000 #define MODE_HBP(mode) ((mode)->htotal - (mode)->hsync_end) #define MODE_HFP(mode) ((mode)->hsync_start - (mode)->hdisplay) #define MODE_HSW(mode) ((mode)->hsync_end - (mode)->hsync_start) #define MODE_VBP(mode) ((mode)->vtotal - (mode)->vsync_end) #define MODE_VFP(mode) ((mode)->vsync_start - (mode)->vdisplay) #define MODE_VSW(mode) ((mode)->vsync_end - (mode)->vsync_start) #define MODE_BPP 16 #define MODE_PIXEL_CLOCK_INVERT 1 #define M(nm,hr,vr,clk,hs,he,ht,vs,ve,vt,f) \ { clk, hr, hs, he, ht, vr, vs, ve, vt, f, nm } static struct videomode mode1024x768 = M("1024x768x60",1024,768,65000,1048,1184,1344,771,777,806,VID_NHSYNC|VID_PHSYNC); #define DMA_CHANNEL 23 #define DC_CHAN5 5 #define DI_PORT 0 #define IPU_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) #define IPU_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) #define IPU_LOCK_INIT(_sc) mtx_init(&(_sc)->sc_mtx, \ device_get_nameunit(_sc->sc_dev), "ipu", MTX_DEF) #define IPU_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->sc_mtx) #define IPU_READ4(_sc, reg) bus_read_4((_sc)->sc_mem_res, (reg)) #define IPU_WRITE4(_sc, reg, value) \ bus_write_4((_sc)->sc_mem_res, (reg), (value)) #define CPMEM_BASE 0x300000 #define DC_TEMPL_BASE 0x380000 /* Microcode */ /* Word 1 */ #define TEMPLATE_SYNC(v) ((v) << 0) #define TEMPLATE_GLUELOGIC(v) ((v) << 4) #define TEMPLATE_MAPPING(v) ((v) << 15) #define TEMPLATE_WAVEFORM(v) ((v) << 11) #define GLUELOGIC_KEEP_ASSERTED (1 << 3) #define GLUELOGIC_KEEP_NEGATED (1 << 2) /* Word 2 */ #define TEMPLATE_OPCODE(v) ((v) << 4) #define OPCODE_WROD 0x18 #define TEMPLATE_STOP (1 << 9) #define IPU_CONF 0x200000 #define IPU_CONF_DMFC_EN (1 << 10) #define IPU_CONF_DC_EN (1 << 9) #define IPU_CONF_DI1_EN (1 << 7) #define IPU_CONF_DI0_EN (1 << 6) #define IPU_CONF_DP_EN (1 << 5) #define IPU_DISP_GEN 0x2000C4 #define DISP_GEN_DI1_CNTR_RELEASE (1 << 25) #define DISP_GEN_DI0_CNTR_RELEASE (1 << 24) #define DISP_GEN_MCU_MAX_BURST_STOP (1 << 22) #define DISP_GEN_MCU_T_SHIFT 18 #define IPU_MEM_RST 0x2000DC #define IPU_MEM_RST_START (1 << 31) #define IPU_MEM_RST_ALL 0x807FFFFF #define IPU_CH_DB_MODE_SEL_0 0x200150 #define IPU_CH_DB_MODE_SEL_1 0x200154 #define IPU_CUR_BUF_0 0x20023C #define IPU_CUR_BUF_1 0x200240 #define IPU_IDMAC_CH_EN_1 0x208004 #define IPU_IDMAC_CH_EN_2 0x208008 #define IPU_IDMAC_CH_PRI_1 0x208014 #define IPU_IDMAC_CH_PRI_2 0x208018 #define IPU_DI0_GENERAL 0x240000 #define DI_CLOCK_EXTERNAL (1 << 20) #define DI_GENERAL_POL_CLK (1 << 17) #define DI_GENERAL_POLARITY_3 (1 << 2) #define DI_GENERAL_POLARITY_2 (1 << 1) #define IPU_DI0_BS_CLKGEN0 0x240004 #define DI_BS_CLKGEN0(_int, _frac) (((_int) << 4) | (_frac)) #define IPU_DI0_BS_CLKGEN1 0x240008 #define DI_BS_CLKGEN1_DOWN(_int, _frac) ((((_int) << 1) | (_frac)) << 16) #define IPU_DI0_SW_GEN0_1 0x24000C #define DI_RUN_VALUE_M1(v) ((v) << 19) #define DI_RUN_RESOLUTION(v) ((v) << 16) #define DI_OFFSET_VALUE(v) ((v) << 3) #define IPU_DI0_SW_GEN1_1 0x240030 #define DI_CNT_POLARITY_GEN_EN(v) ((v) << 29) #define DI_CNT_AUTO_RELOAD (1 << 28) #define DI_CNT_CLR_SEL(v) ((v) << 25) #define DI_CNT_DOWN(v) ((v) << 16) #define DI_CNT_POLARITY_TRIGGER_SEL(v) ((v) << 12) #define DI_CNT_POLARITY_CLR_SEL(v) ((v) << 9) #define IPU_DI0_SYNC_AS_GEN 0x240054 #define SYNC_AS_GEN_VSYNC_SEL(v) ((v) << 13) #define SYNC_AS_GEN_SYNC_START(v) ((v) << 0) #define IPU_DI0_DW_GEN_0 0x240058 #define DW_GEN_DI_ACCESS_SIZE(v) ((v) << 24) #define DW_GEN_DI_COMPONENT_SIZE(v) ((v) << 16) #define DW_GEN_DI_SET_MASK 3 #define DW_GEN_DI_PIN_15_SET(v) ((v) << 8) #define IPU_DI0_DW_SET3_0 0x240118 #define DW_SET_DATA_CNT_DOWN(v) ((v) << 16) #define DW_SET_DATA_CNT_UP(v) ((v) << 0) #define IPU_DI0_STP_REP 0x240148 #define IPU_DI0_POL 0x240164 #define DI_POL_DRDY_POLARITY_15 (1 << 4) #define IPU_DI0_SCR_CONF 0x240170 #define IPU_DI1_GENERAL 0x248000 #define IPU_DI1_BS_CLKGEN0 0x248004 #define IPU_DI1_BS_CLKGEN1 0x248008 #define IPU_DI1_SW_GEN0_1 0x24800C #define IPU_DI1_SW_GEN1_1 0x248030 #define IPU_DI1_SYNC_AS_GEN 0x248054 #define IPU_DI1_DW_GEN_0 0x248058 #define IPU_DI1_POL 0x248164 #define IPU_DI1_DW_SET3_0 0x248118 #define IPU_DI1_STP_REP 0x248148 #define IPU_DI1_SCR_CONF 0x248170 #define DMFC_RD_CHAN 0x260000 #define DMFC_WR_CHAN 0x260004 #define DMFC_WR_CHAN_BURST_SIZE_32 (0 << 6) #define DMFC_WR_CHAN_BURST_SIZE_16 (1 << 6) #define DMFC_WR_CHAN_BURST_SIZE_8 (2 << 6) #define DMFC_WR_CHAN_BURST_SIZE_4 (3 << 6) #define DMFC_WR_CHAN_BURST_SIZE_4 (3 << 6) #define DMFC_WR_CHAN_FIFO_SIZE_128 (2 << 3) #define DMFC_WR_CHAN_DEF 0x260008 #define DMFC_WR_CHAN_DEF_WM_CLR_2C(v) ((v) << 29) #define DMFC_WR_CHAN_DEF_WM_CLR_1C(v) ((v) << 21) #define DMFC_WR_CHAN_DEF_WM_CLR_2(v) ((v) << 13) #define DMFC_WR_CHAN_DEF_WM_CLR_1(v) ((v) << 5) #define DMFC_WR_CHAN_DEF_WM_SET_1(v) ((v) << 2) #define DMFC_WR_CHAN_DEF_WM_EN_1 (1 << 1) #define DMFC_DP_CHAN 0x26000C #define DMFC_DP_CHAN_BURST_SIZE_8 2 #define DMFC_DP_CHAN_FIFO_SIZE_256 1 #define DMFC_DP_CHAN_FIFO_SIZE_128 2 #define DMFC_DP_CHAN_BURST_SIZE_5F(v) ((v) << 14) #define DMFC_DP_CHAN_FIFO_SIZE_5F(v) ((v) << 11) #define DMFC_DP_CHAN_ST_ADDR_SIZE_5F(v) ((v) << 8) #define DMFC_DP_CHAN_BURST_SIZE_5B(v) ((v) << 6) #define DMFC_DP_CHAN_FIFO_SIZE_5B(v) ((v) << 3) #define DMFC_DP_CHAN_ST_ADDR_SIZE_5B(v) ((v) << 0) #define DMFC_DP_CHAN_DEF 0x260010 #define DMFC_DP_CHAN_DEF_WM_CLR_6F(v) ((v) << 29) #define DMFC_DP_CHAN_DEF_WM_CLR_6B(v) ((v) << 21) #define DMFC_DP_CHAN_DEF_WM_CLR_5F(v) ((v) << 13) #define DMFC_DP_CHAN_DEF_WM_SET_5F(v) ((v) << 10) #define DMFC_DP_CHAN_DEF_WM_EN_5F (1 << 9) #define DMFC_DP_CHAN_DEF_WM_CLR_5B(v) ((v) << 5) #define DMFC_DP_CHAN_DEF_WM_SET_5B(v) ((v) << 2) #define DMFC_DP_CHAN_DEF_WM_EN_5B (1 << 1) #define DMFC_GENERAL_1 0x260014 #define DMFC_GENERAL_1_WAIT4EOT_5B (1 << 20) #define DMFC_IC_CTRL 0x26001C #define DMFC_IC_CTRL_DISABLED 0x2 #define DC_WRITE_CH_CONF_1 0x0025801C #define WRITE_CH_CONF_PROG_CHAN_TYP_MASK (7 << 5) #define WRITE_CH_CONF_PROG_CHAN_NORMAL (4 << 5) #define DC_WRITE_CH_ADDR_1 0x00258020 #define DC_WRITE_CH_CONF_5 0x0025805C #define WRITE_CH_CONF_PROG_DISP_ID(v) ((v) << 3) #define WRITE_CH_CONF_PROG_DI_ID(v) ((v) << 2) #define WRITE_CH_CONF_PROG_W_SIZE(v) (v) #define DC_WRITE_CH_ADDR_5 0x00258060 #define DC_RL0_CH_5 0x00258064 #define DC_GEN 0x002580D4 #define DC_GEN_SYNC_PRIORITY (1 << 7) #define DC_GEN_ASYNC (0 << 1) #define DC_GEN_SYNC (2 << 1) #define DC_DISP_CONF2(di) (0x002580E8 + (di) * 4) #define DC_MAP_CONF_0 0x00258108 #define DC_MAP_CONF_15 0x00258144 #define DC_MAP_CONF_VAL(map) (DC_MAP_CONF_15 + ((map) / 2) * sizeof(uint32_t)) #define MAP_CONF_VAL_MASK 0xffff #define DC_MAP_CONF_PTR(ptr) (DC_MAP_CONF_0 + ((ptr) / 2) * sizeof(uint32_t)) #define MAP_CONF_PTR_MASK 0x1f #define DI_COUNTER_INT_HSYNC 1 #define DI_COUNTER_HSYNC 2 #define DI_COUNTER_VSYNC 3 #define DI_COUNTER_AD_0 4 #define DI_COUNTER_AD_1 5 #define DI_SYNC_NONE 0 #define DI_SYNC_CLK 1 #define DI_SYNC_COUNTER(c) ((c) + 1) struct ipu_cpmem_word { uint32_t data[5]; uint32_t padding[3]; }; struct ipu_cpmem_ch_param { struct ipu_cpmem_word word[2]; }; #define CH_PARAM_RESET(param) memset(param, 0, sizeof(*param)) #define IPU_READ_CH_PARAM(_sc, ch, param) bus_read_region_4( \ (_sc)->sc_mem_res, CPMEM_BASE + ch * (sizeof(*param)),\ (uint32_t*)param, sizeof(*param) / 4) #define IPU_WRITE_CH_PARAM(_sc, ch, param) bus_write_region_4( \ (_sc)->sc_mem_res, CPMEM_BASE + ch * (sizeof(*param)),\ (uint32_t*)param, sizeof(*param) / 4) #define CH_PARAM_SET_FW(param, v) ipu_ch_param_set_value((param), \ 0, 125, 13, (v)) #define CH_PARAM_SET_FH(param, v) ipu_ch_param_set_value((param), \ 0, 138, 12, (v)) #define CH_PARAM_SET_SLY(param, v) ipu_ch_param_set_value((param), \ 1, 102, 14, (v)) #define CH_PARAM_SET_EBA0(param, v) ipu_ch_param_set_value((param), \ 1, 0, 29, (v)) #define CH_PARAM_SET_EBA1(param, v) ipu_ch_param_set_value((param), \ 1, 29, 29, (v)) #define CH_PARAM_SET_BPP(param, v) ipu_ch_param_set_value((param), \ 0, 107, 3, (v)) #define CH_PARAM_SET_PFS(param, v) ipu_ch_param_set_value((param), \ 1, 85, 4, (v)) #define CH_PARAM_SET_NPB(param, v) ipu_ch_param_set_value((param), \ 1, 78, 7, (v)) #define CH_PARAM_SET_UBO(param, v) ipu_ch_param_set_value((param), \ 0, 46, 22, (v)) #define CH_PARAM_SET_VBO(param, v) ipu_ch_param_set_value((param), \ 0, 68, 22, (v)) #define CH_PARAM_SET_RED_WIDTH(param, v) ipu_ch_param_set_value((param), \ 1, 116, 3, (v)) #define CH_PARAM_SET_RED_OFFSET(param, v) ipu_ch_param_set_value((param), \ 1, 128, 5, (v)) #define CH_PARAM_SET_GREEN_WIDTH(param, v) ipu_ch_param_set_value((param), \ 1, 119, 3, (v)) #define CH_PARAM_SET_GREEN_OFFSET(param, v) ipu_ch_param_set_value((param), \ 1, 133, 5, (v)) #define CH_PARAM_SET_BLUE_WIDTH(param, v) ipu_ch_param_set_value((param), \ 1, 122, 3, (v)) #define CH_PARAM_SET_BLUE_OFFSET(param, v) ipu_ch_param_set_value((param), \ 1, 138, 5, (v)) #define CH_PARAM_SET_ALPHA_WIDTH(param, v) ipu_ch_param_set_value((param), \ 1, 125, 3, (v)) #define CH_PARAM_SET_ALPHA_OFFSET(param, v) ipu_ch_param_set_value((param), \ 1, 143, 5, (v)) #define CH_PARAM_GET_FW(param) ipu_ch_param_get_value((param), \ 0, 125, 13) #define CH_PARAM_GET_FH(param) ipu_ch_param_get_value((param), \ 0, 138, 12) #define CH_PARAM_GET_SLY(param) ipu_ch_param_get_value((param), \ 1, 102, 14) #define CH_PARAM_GET_EBA0(param) ipu_ch_param_get_value((param), \ 1, 0, 29) #define CH_PARAM_GET_EBA1(param) ipu_ch_param_get_value((param), \ 1, 29, 29) #define CH_PARAM_GET_BPP(param) ipu_ch_param_get_value((param), \ 0, 107, 3) #define CH_PARAM_GET_PFS(param) ipu_ch_param_get_value((param), \ 1, 85, 4) #define CH_PARAM_GET_NPB(param) ipu_ch_param_get_value((param), \ 1, 78, 7) #define CH_PARAM_GET_UBO(param) ipu_ch_param_get_value((param), \ 0, 46, 22) #define CH_PARAM_GET_VBO(param) ipu_ch_param_get_value((param), \ 0, 68, 22) #define CH_PARAM_GET_RED_WIDTH(param) ipu_ch_param_get_value((param), \ 1, 116, 3) #define CH_PARAM_GET_RED_OFFSET(param) ipu_ch_param_get_value((param), \ 1, 128, 5) #define CH_PARAM_GET_GREEN_WIDTH(param) ipu_ch_param_get_value((param), \ 1, 119, 3) #define CH_PARAM_GET_GREEN_OFFSET(param) ipu_ch_param_get_value((param), \ 1, 133, 5) #define CH_PARAM_GET_BLUE_WIDTH(param) ipu_ch_param_get_value((param), \ 1, 122, 3) #define CH_PARAM_GET_BLUE_OFFSET(param) ipu_ch_param_get_value((param), \ 1, 138, 5) #define CH_PARAM_GET_ALPHA_WIDTH(param) ipu_ch_param_get_value((param), \ 1, 125, 3) #define CH_PARAM_GET_ALPHA_OFFSET(param) ipu_ch_param_get_value((param), \ 1, 143, 5) #define IPU_PIX_FORMAT_BPP_32 0 #define IPU_PIX_FORMAT_BPP_24 1 #define IPU_PIX_FORMAT_BPP_18 2 #define IPU_PIX_FORMAT_BPP_16 3 #define IPU_PIX_FORMAT_BPP_12 4 #define IPU_PIX_FORMAT_BPP_8 5 #define IPU_PIX_FORMAT_BPP_ #define IPU_PIX_FORMAT_RGB 7 enum dc_event_t { DC_EVENT_NF = 0, DC_EVENT_NL, DC_EVENT_EOF, DC_EVENT_NFIELD, DC_EVENT_EOL, DC_EVENT_EOFIELD, DC_EVENT_NEW_ADDR, DC_EVENT_NEW_CHAN, DC_EVENT_NEW_DATA }; struct ipu_softc { device_t sc_dev; struct resource *sc_mem_res; int sc_mem_rid; struct resource *sc_irq_res; int sc_irq_rid; void *sc_intr_hl; struct mtx sc_mtx; struct fb_info sc_fb_info; struct videomode *sc_mode; /* Framebuffer */ bus_dma_tag_t sc_dma_tag; bus_dmamap_t sc_dma_map; size_t sc_fb_size; bus_addr_t sc_fb_phys; uint8_t *sc_fb_base; /* HDMI */ eventhandler_tag sc_hdmi_evh; }; static void ipu_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err) { bus_addr_t *addr; if (err) return; addr = (bus_addr_t*)arg; *addr = segs[0].ds_addr; } static void ipu_ch_param_set_value(struct ipu_cpmem_ch_param *param, int word, unsigned int offset, int len, uint32_t value) { uint32_t datapos, bitpos, mask; uint32_t data, data2; KASSERT((len <= 32), ("%s: field len is more than 32", __func__)); datapos = offset / 32; bitpos = offset % 32; mask = (1 << len) - 1; data = param->word[word].data[datapos]; data &= ~(mask << bitpos); data |= (value << bitpos); param->word[word].data[datapos] = data; if ((bitpos + len) > 32) { len = bitpos + len - 32; mask = (1UL << len) - 1; data2 = param->word[word].data[datapos + 1]; data2 &= mask; data2 |= (value >> (32 - bitpos)); param->word[word].data[datapos + 1] = data2; } } #ifdef DEBUG static uint32_t ipu_ch_param_get_value(struct ipu_cpmem_ch_param *param, int word, unsigned int offset, int len) { uint32_t datapos, bitpos, mask; uint32_t data, data2; KASSERT((len <= 32), ("%s: field len is more than 32", __func__)); datapos = offset / 32; bitpos = offset % 32; mask = (1UL << len) - 1; data = param->word[word].data[datapos]; data = data >> bitpos; data &= mask; if ((bitpos + len) > 32) { len = bitpos + len - 32; mask = (1UL << len) - 1; data2 = param->word[word].data[datapos + 1]; data2 &= mask; data |= (data2 << (32 - bitpos)); } return (data); } static void ipu_print_channel(struct ipu_cpmem_ch_param *param) { int offset0[] = {0, 10, 19, 32, 44, 45, 46, 68, 90, 94, 95, 113, 114, 117, 119, 120, 121, 122, 123, 124, 125, 138, 150, 151, -1}; int offset1[] = {0, 29, 58, 78, 85, 89, 90, 93, 95, 102, 116, 119, 122, 125, 128, 133, 138, 143, 148, 149, 150, -1}; printf("WORD0: %08x %08x %08x %08x %08x\n", param->word[0].data[0], param->word[0].data[1], param->word[0].data[2], param->word[0].data[3], param->word[0].data[4]); printf("WORD1: %08x %08x %08x %08x %08x\n", param->word[1].data[0], param->word[1].data[1], param->word[1].data[2], param->word[1].data[3], param->word[1].data[4]); for (int i = 0; offset0[i + 1] != -1; i++) { int len = offset0[i + 1] - offset0[i]; printf("W0[%d:%d] = %d\n", offset0[i], offset0[i] + len - 1, ipu_ch_param_get_value(param, 0, offset0[i], len) ); } for (int i = 0; offset1[i + 1] != -1; i++) { int len = offset1[i + 1] - offset1[i]; printf("W1[%d:%d] = %d\n", offset1[i], offset1[i] + len - 1, ipu_ch_param_get_value(param, 1, offset1[i], len) ); } printf("FW: %d\n", CH_PARAM_GET_FW(param)); printf("FH: %d\n", CH_PARAM_GET_FH(param)); printf("SLY: %d\n", CH_PARAM_GET_SLY(param)); printf("EBA0: 0x%08x\n", CH_PARAM_GET_EBA0(param)); printf("EBA1: 0x%08x\n", CH_PARAM_GET_EBA1(param)); printf("BPP: %d\n", CH_PARAM_GET_BPP(param)); printf("PFS: %d\n", CH_PARAM_GET_PFS(param)); printf("NPB: %d\n", CH_PARAM_GET_NPB(param)); printf("UBO: %d\n", CH_PARAM_GET_UBO(param)); printf("VBO: %d\n", CH_PARAM_GET_VBO(param)); printf("RED: %d bits @%d\n", CH_PARAM_GET_RED_WIDTH(param) + 1, CH_PARAM_GET_RED_OFFSET(param)); printf("GREEN: %d bits @%d\n", CH_PARAM_GET_GREEN_WIDTH(param) + 1, CH_PARAM_GET_GREEN_OFFSET(param)); printf("BLUE: %d bits @%d\n", CH_PARAM_GET_BLUE_WIDTH(param) + 1, CH_PARAM_GET_BLUE_OFFSET(param)); printf("ALPHA: %d bits @%d\n", CH_PARAM_GET_ALPHA_WIDTH(param) + 1, CH_PARAM_GET_ALPHA_OFFSET(param)); } #endif static void ipu_di_enable(struct ipu_softc *sc, int di) { uint32_t flag, reg; flag = di ? DISP_GEN_DI1_CNTR_RELEASE : DISP_GEN_DI0_CNTR_RELEASE; reg = IPU_READ4(sc, IPU_DISP_GEN); reg |= flag; IPU_WRITE4(sc, IPU_DISP_GEN, reg); } static void ipu_config_wave_gen_0(struct ipu_softc *sc, int di, int wave_gen, int run_value, int run_res, int offset_value, int offset_res) { uint32_t addr, reg; addr = (di ? IPU_DI1_SW_GEN0_1 : IPU_DI0_SW_GEN0_1) + (wave_gen - 1) * sizeof(uint32_t); reg = DI_RUN_VALUE_M1(run_value) | DI_RUN_RESOLUTION(run_res) | DI_OFFSET_VALUE(offset_value) | offset_res; IPU_WRITE4(sc, addr, reg); } static void ipu_config_wave_gen_1(struct ipu_softc *sc, int di, int wave_gen, int repeat_count, int cnt_clr_src, int cnt_polarity_gen_en, int cnt_polarity_clr_src, int cnt_polarity_trigger_src, int cnt_up, int cnt_down) { uint32_t addr, reg; addr = (di ? IPU_DI1_SW_GEN1_1 : IPU_DI0_SW_GEN1_1) + (wave_gen - 1) * sizeof(uint32_t); reg = DI_CNT_POLARITY_GEN_EN(cnt_polarity_gen_en) | DI_CNT_CLR_SEL(cnt_clr_src) | DI_CNT_POLARITY_TRIGGER_SEL(cnt_polarity_trigger_src) | DI_CNT_POLARITY_CLR_SEL(cnt_polarity_clr_src); reg |= DI_CNT_DOWN(cnt_down) | cnt_up; if (repeat_count == 0) reg |= DI_CNT_AUTO_RELOAD; IPU_WRITE4(sc, addr, reg); addr = (di ? IPU_DI1_STP_REP : IPU_DI0_STP_REP) + (wave_gen - 1) / 2 * sizeof(uint32_t); reg = IPU_READ4(sc, addr); if (wave_gen % 2) { reg &= ~(0xffff); reg |= repeat_count; } else { reg &= ~(0xffff << 16); reg |= (repeat_count << 16); } IPU_WRITE4(sc, addr, reg); } static void ipu_reset_wave_gen(struct ipu_softc *sc, int di, int wave_gen) { uint32_t addr, reg; addr = (di ? IPU_DI1_SW_GEN0_1 : IPU_DI0_SW_GEN0_1) + (wave_gen - 1) * sizeof(uint32_t); IPU_WRITE4(sc, addr, 0); addr = (di ? IPU_DI1_SW_GEN1_1 : IPU_DI0_SW_GEN1_1) + (wave_gen - 1) * sizeof(uint32_t); IPU_WRITE4(sc, addr, 0); addr = (di ? IPU_DI1_STP_REP : IPU_DI0_STP_REP) + (wave_gen - 1) / 2 * sizeof(uint32_t); reg = IPU_READ4(sc, addr); if (wave_gen % 2) reg &= ~(0xffff); else reg &= ~(0xffff << 16); IPU_WRITE4(sc, addr, reg); } static void ipu_init_microcode_template(struct ipu_softc *sc, int di, int map) { uint32_t addr; uint32_t w1, w2; int i, word; int glue; word = di ? 2 : 5; for (i = 0; i < 3; i++) { if (i == 0) glue = GLUELOGIC_KEEP_ASSERTED; else if (i == 1) glue = GLUELOGIC_KEEP_NEGATED; else if (i == 2) glue = 0; w1 = TEMPLATE_SYNC(5) | TEMPLATE_GLUELOGIC(glue) | TEMPLATE_WAVEFORM(1) | /* wave unit 0 */ TEMPLATE_MAPPING(map + 1); /* operand is zero */ /* Write data to DI and Hold data in register */ w2 = TEMPLATE_OPCODE(OPCODE_WROD) | TEMPLATE_STOP; addr = DC_TEMPL_BASE + (word + i) * 2 * sizeof(uint32_t); IPU_WRITE4(sc, addr, w1); IPU_WRITE4(sc, addr + sizeof(uint32_t), w2); } } static void ipu_config_timing(struct ipu_softc *sc, int di) { int div; uint32_t di_scr_conf; uint32_t gen_offset, gen; uint32_t as_gen_offset, as_gen; uint32_t dw_gen_offset, dw_gen; uint32_t dw_set_offset, dw_set; uint32_t bs_clkgen_offset; int map; /* TODO: check mode restrictions / fixup */ /* TODO: enable timers, get divisors */ div = 1; map = 0; bs_clkgen_offset = di ? IPU_DI1_BS_CLKGEN0 : IPU_DI0_BS_CLKGEN0; IPU_WRITE4(sc, bs_clkgen_offset, DI_BS_CLKGEN0(div, 0)); /* half of the divider */ IPU_WRITE4(sc, bs_clkgen_offset + 4, DI_BS_CLKGEN1_DOWN(div / 2, div % 2)); /* * TODO: Configure LLDB clock by changing following fields * in CCM fields: * CS2CDR_LDB_DI0_CLK_SEL * CSCMR2_LDB_DI0_IPU_DIV * CBCDR_MMDC_CH1_AXI_PODF */ /* Setup wave generator */ dw_gen_offset = di ? IPU_DI1_DW_GEN_0 : IPU_DI0_DW_GEN_0; dw_gen = DW_GEN_DI_ACCESS_SIZE(div - 1) | DW_GEN_DI_COMPONENT_SIZE(div - 1); dw_gen &= ~DW_GEN_DI_PIN_15_SET(DW_GEN_DI_SET_MASK); dw_gen |= DW_GEN_DI_PIN_15_SET(3); /* set 3*/ IPU_WRITE4(sc, dw_gen_offset, dw_gen); dw_set_offset = di ? IPU_DI1_DW_SET3_0 : IPU_DI0_DW_SET3_0; dw_set = DW_SET_DATA_CNT_DOWN(div * 2) | DW_SET_DATA_CNT_UP(0); IPU_WRITE4(sc, dw_set_offset, dw_set); /* DI_COUNTER_INT_HSYNC */ ipu_config_wave_gen_0(sc, di, DI_COUNTER_INT_HSYNC, sc->sc_mode->htotal - 1, DI_SYNC_CLK, 0, DI_SYNC_NONE); ipu_config_wave_gen_1(sc, di, DI_COUNTER_INT_HSYNC, 0, DI_SYNC_NONE, 0, DI_SYNC_NONE, DI_SYNC_NONE, 0, 0); /* DI_COUNTER_HSYNC */ ipu_config_wave_gen_0(sc, di, DI_COUNTER_HSYNC, sc->sc_mode->htotal - 1, DI_SYNC_CLK, 0, DI_SYNC_CLK); ipu_config_wave_gen_1(sc, di, DI_COUNTER_HSYNC, 0, DI_SYNC_NONE, 1, DI_SYNC_NONE, DI_SYNC_CLK, 0, MODE_HSW(sc->sc_mode) * 2); /* DI_COUNTER_VSYNC */ ipu_config_wave_gen_0(sc, di, DI_COUNTER_VSYNC, sc->sc_mode->vtotal - 1, DI_SYNC_COUNTER(DI_COUNTER_INT_HSYNC), 0, DI_SYNC_NONE); ipu_config_wave_gen_1(sc, di, DI_COUNTER_VSYNC, 0, DI_SYNC_NONE, 1, DI_SYNC_NONE, DI_SYNC_COUNTER(DI_COUNTER_INT_HSYNC), 0, MODE_VSW(sc->sc_mode) * 2); di_scr_conf = di ? IPU_DI1_SCR_CONF : IPU_DI0_SCR_CONF; IPU_WRITE4(sc, di_scr_conf, sc->sc_mode->vtotal - 1); /* TODO: update DI_SCR_CONF */ /* Active Data 0 */ ipu_config_wave_gen_0(sc, di, DI_COUNTER_AD_0, 0, DI_SYNC_COUNTER(DI_COUNTER_HSYNC), MODE_VSW(sc->sc_mode) + MODE_VFP(sc->sc_mode), DI_SYNC_COUNTER(DI_COUNTER_HSYNC)); ipu_config_wave_gen_1(sc, di, DI_COUNTER_AD_0, sc->sc_mode->vdisplay, DI_SYNC_COUNTER(DI_COUNTER_VSYNC), 0, DI_SYNC_NONE, DI_SYNC_NONE, 0, 0); ipu_config_wave_gen_0(sc, di, DI_COUNTER_AD_1, 0, DI_SYNC_CLK, MODE_HSW(sc->sc_mode) + MODE_HFP(sc->sc_mode), DI_SYNC_CLK); ipu_config_wave_gen_1(sc, di, DI_COUNTER_AD_1, sc->sc_mode->hdisplay, DI_SYNC_COUNTER(DI_COUNTER_AD_0), 0, DI_SYNC_NONE, DI_SYNC_NONE, 0, 0); ipu_reset_wave_gen(sc, di, 6); ipu_reset_wave_gen(sc, di, 7); ipu_reset_wave_gen(sc, di, 8); ipu_reset_wave_gen(sc, di, 9); ipu_init_microcode_template(sc, di, map); gen_offset = di ? IPU_DI1_GENERAL : IPU_DI0_GENERAL; gen = IPU_READ4(sc, gen_offset); if (sc->sc_mode->flags & VID_NHSYNC) gen &= ~DI_GENERAL_POLARITY_2; else /* active high */ gen |= DI_GENERAL_POLARITY_2; if (sc->sc_mode->flags & VID_NVSYNC) gen &= ~DI_GENERAL_POLARITY_3; else /* active high */ gen |= DI_GENERAL_POLARITY_3; if (MODE_PIXEL_CLOCK_INVERT) gen &= ~DI_GENERAL_POL_CLK; else gen |= DI_GENERAL_POL_CLK; /* Use LDB clock to drive pixel clock */ gen |= DI_CLOCK_EXTERNAL; IPU_WRITE4(sc, gen_offset, gen); as_gen_offset = di ? IPU_DI1_SYNC_AS_GEN : IPU_DI0_SYNC_AS_GEN; as_gen = SYNC_AS_GEN_VSYNC_SEL(DI_COUNTER_VSYNC - 1) | SYNC_AS_GEN_SYNC_START(2); IPU_WRITE4(sc, as_gen_offset, as_gen); IPU_WRITE4(sc, (di ? IPU_DI1_POL : IPU_DI0_POL), DI_POL_DRDY_POLARITY_15); IPU_WRITE4(sc, DC_DISP_CONF2(di), sc->sc_mode->hdisplay); } static void ipu_dc_enable(struct ipu_softc *sc) { uint32_t conf; /* channel 1 uses DI1 */ IPU_WRITE4(sc, DC_WRITE_CH_CONF_1, WRITE_CH_CONF_PROG_DI_ID(1)); conf = IPU_READ4(sc, DC_WRITE_CH_CONF_5); conf &= ~WRITE_CH_CONF_PROG_CHAN_TYP_MASK; conf |= WRITE_CH_CONF_PROG_CHAN_NORMAL; IPU_WRITE4(sc, DC_WRITE_CH_CONF_5, conf); /* TODO: enable clock */ } static void ipu_dc_link_event(struct ipu_softc *sc, int event, int addr, int priority) { uint32_t reg; int offset; int shift; if (event % 2) shift = 16; else shift = 0; offset = DC_RL0_CH_5 + (event / 2) * sizeof(uint32_t); reg = IPU_READ4(sc, offset); reg &= ~(0xFFFF << shift); reg |= ((addr << 8) | priority) << shift; IPU_WRITE4(sc, offset, reg); } static void ipu_dc_setup_map(struct ipu_softc *sc, int map, int byte, int offset, int mask) { uint32_t reg, shift, ptr; ptr = map * 3 + byte; reg = IPU_READ4(sc, DC_MAP_CONF_VAL(ptr)); if (ptr & 1) shift = 16; else shift = 0; reg &= ~(0xffff << shift); reg |= ((offset << 8) | mask) << shift; IPU_WRITE4(sc, DC_MAP_CONF_VAL(ptr), reg); reg = IPU_READ4(sc, DC_MAP_CONF_PTR(map)); if (map & 1) shift = 16 + 5 * byte; else shift = 5 * byte; reg &= ~(MAP_CONF_PTR_MASK << shift); reg |= (ptr) << shift; IPU_WRITE4(sc, DC_MAP_CONF_PTR(map), reg); } static void ipu_dc_reset_map(struct ipu_softc *sc, int map) { uint32_t reg, shift; reg = IPU_READ4(sc, DC_MAP_CONF_VAL(map)); if (map & 1) shift = 16; else shift = 0; reg &= ~(MAP_CONF_VAL_MASK << shift); IPU_WRITE4(sc, DC_MAP_CONF_VAL(map), reg); } static void ipu_dc_init(struct ipu_softc *sc, int di_port) { int addr; uint32_t conf; if (di_port) addr = 2; else addr = 5; ipu_dc_link_event(sc, DC_EVENT_NL, addr, 3); ipu_dc_link_event(sc, DC_EVENT_EOL, addr + 1, 2); ipu_dc_link_event(sc, DC_EVENT_NEW_DATA, addr + 2, 1); ipu_dc_link_event(sc, DC_EVENT_NF, 0, 0); ipu_dc_link_event(sc, DC_EVENT_NFIELD, 0, 0); ipu_dc_link_event(sc, DC_EVENT_EOF, 0, 0); ipu_dc_link_event(sc, DC_EVENT_EOFIELD, 0, 0); ipu_dc_link_event(sc, DC_EVENT_NEW_CHAN, 0, 0); ipu_dc_link_event(sc, DC_EVENT_NEW_ADDR, 0, 0); conf = WRITE_CH_CONF_PROG_W_SIZE(0x02) | WRITE_CH_CONF_PROG_DISP_ID(DI_PORT) | WRITE_CH_CONF_PROG_DI_ID(DI_PORT); IPU_WRITE4(sc, DC_WRITE_CH_CONF_5, conf); IPU_WRITE4(sc, DC_WRITE_CH_ADDR_5, 0x00000000); IPU_WRITE4(sc, DC_GEN, DC_GEN_SYNC_PRIORITY | DC_GEN_SYNC); /* High priority, sync */ } static void ipu_init_buffer(struct ipu_softc *sc) { struct ipu_cpmem_ch_param param; uint32_t stride; uint32_t reg, db_mode_sel, cur_buf; stride = sc->sc_mode->hdisplay * MODE_BPP / 8; /* init channel parameters */ CH_PARAM_RESET(¶m); /* XXX: interlaced modes are not supported yet */ CH_PARAM_SET_FW(¶m, sc->sc_mode->hdisplay - 1); CH_PARAM_SET_FH(¶m, sc->sc_mode->vdisplay - 1); CH_PARAM_SET_SLY(¶m, stride - 1); CH_PARAM_SET_EBA0(¶m, (sc->sc_fb_phys >> 3)); CH_PARAM_SET_EBA1(¶m, (sc->sc_fb_phys >> 3)); CH_PARAM_SET_BPP(¶m, IPU_PIX_FORMAT_BPP_16); CH_PARAM_SET_PFS(¶m, IPU_PIX_FORMAT_RGB); /* 16 pixels per burst access */ CH_PARAM_SET_NPB(¶m, 16 - 1); CH_PARAM_SET_RED_OFFSET(¶m, 0); CH_PARAM_SET_RED_WIDTH(¶m, 5 - 1); CH_PARAM_SET_GREEN_OFFSET(¶m, 5); CH_PARAM_SET_GREEN_WIDTH(¶m, 6 - 1); CH_PARAM_SET_BLUE_OFFSET(¶m, 11); CH_PARAM_SET_BLUE_WIDTH(¶m, 5 - 1); CH_PARAM_SET_ALPHA_OFFSET(¶m, 16); CH_PARAM_SET_ALPHA_WIDTH(¶m, 8 - 1); CH_PARAM_SET_UBO(¶m, 0); CH_PARAM_SET_VBO(¶m, 0); IPU_WRITE_CH_PARAM(sc, DMA_CHANNEL, ¶m); #ifdef DEBUG ipu_print_channel(¶m); #endif /* init DMFC */ IPU_WRITE4(sc, DMFC_IC_CTRL, DMFC_IC_CTRL_DISABLED); /* High resolution DP */ IPU_WRITE4(sc, DMFC_WR_CHAN, DMFC_WR_CHAN_BURST_SIZE_8 | DMFC_WR_CHAN_FIFO_SIZE_128); IPU_WRITE4(sc, DMFC_WR_CHAN_DEF, DMFC_WR_CHAN_DEF_WM_CLR_2C(1) | DMFC_WR_CHAN_DEF_WM_CLR_1C(1) | DMFC_WR_CHAN_DEF_WM_CLR_2(1) | DMFC_WR_CHAN_DEF_WM_CLR_1(7) | DMFC_WR_CHAN_DEF_WM_SET_1(5) | DMFC_WR_CHAN_DEF_WM_EN_1); IPU_WRITE4(sc, DMFC_DP_CHAN, DMFC_DP_CHAN_BURST_SIZE_5F(DMFC_DP_CHAN_BURST_SIZE_8) | DMFC_DP_CHAN_FIFO_SIZE_5F(DMFC_DP_CHAN_FIFO_SIZE_128) | DMFC_DP_CHAN_ST_ADDR_SIZE_5F(6) /* segment 6 */ | DMFC_DP_CHAN_BURST_SIZE_5B(DMFC_DP_CHAN_BURST_SIZE_8) | DMFC_DP_CHAN_FIFO_SIZE_5B(DMFC_DP_CHAN_FIFO_SIZE_256) | DMFC_DP_CHAN_ST_ADDR_SIZE_5B(2) /* segment 2 */); IPU_WRITE4(sc, DMFC_DP_CHAN_DEF, DMFC_DP_CHAN_DEF_WM_CLR_6F(1) | DMFC_DP_CHAN_DEF_WM_CLR_6B(1) | DMFC_DP_CHAN_DEF_WM_CLR_5F(7) | DMFC_DP_CHAN_DEF_WM_SET_5F(5) | DMFC_DP_CHAN_DEF_WM_EN_5F | DMFC_DP_CHAN_DEF_WM_CLR_5B(7) | DMFC_DP_CHAN_DEF_WM_SET_5B(5) | DMFC_DP_CHAN_DEF_WM_EN_5B); reg = IPU_READ4(sc, DMFC_GENERAL_1); reg &= ~(DMFC_GENERAL_1_WAIT4EOT_5B); IPU_WRITE4(sc, DMFC_GENERAL_1, reg); /* XXX: set priority? */ /* Set single buffer mode */ if (DMA_CHANNEL < 32) { db_mode_sel = IPU_CH_DB_MODE_SEL_0; cur_buf = IPU_CUR_BUF_0; } else { db_mode_sel = IPU_CH_DB_MODE_SEL_1; cur_buf = IPU_CUR_BUF_1; } reg = IPU_READ4(sc, db_mode_sel); reg |= (1UL << (DMA_CHANNEL & 0x1f)); IPU_WRITE4(sc, db_mode_sel, reg); IPU_WRITE4(sc, cur_buf, (1UL << (DMA_CHANNEL & 0x1f))); } static int ipu_init(struct ipu_softc *sc) { uint32_t reg, off; int i, err; size_t dma_size; IPU_WRITE4(sc, IPU_CONF, DI_PORT ? IPU_CONF_DI1_EN : IPU_CONF_DI0_EN); IPU_WRITE4(sc, IPU_MEM_RST, IPU_MEM_RST_ALL); i = 1000; while (i-- > 0) { if (!(IPU_READ4(sc, IPU_MEM_RST) & IPU_MEM_RST_START)) break; DELAY(1); } if (i <= 0) { err = ETIMEDOUT; device_printf(sc->sc_dev, "timeout while resetting memory\n"); goto fail; } ipu_dc_reset_map(sc, 0); ipu_dc_setup_map(sc, 0, 0, 7, 0xff); ipu_dc_setup_map(sc, 0, 1, 15, 0xff); ipu_dc_setup_map(sc, 0, 2, 23, 0xff); dma_size = round_page(sc->sc_mode->hdisplay * sc->sc_mode->vdisplay * (MODE_BPP / 8)); /* * Now allocate framebuffer memory */ err = bus_dma_tag_create( bus_get_dma_tag(sc->sc_dev), 4, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ dma_size, 1, /* maxsize, nsegments */ dma_size, 0, /* maxsegsize, flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->sc_dma_tag); if (err) goto fail; err = bus_dmamem_alloc(sc->sc_dma_tag, (void **)&sc->sc_fb_base, BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sc_dma_map); if (err) { device_printf(sc->sc_dev, "cannot allocate framebuffer\n"); goto fail; } err = bus_dmamap_load(sc->sc_dma_tag, sc->sc_dma_map, sc->sc_fb_base, dma_size, ipu_dmamap_cb, &sc->sc_fb_phys, BUS_DMA_NOWAIT); if (err) { device_printf(sc->sc_dev, "cannot load DMA map\n"); goto fail; } /* Calculate actual FB Size */ sc->sc_fb_size = sc->sc_mode->hdisplay * sc->sc_mode->vdisplay * MODE_BPP / 8; ipu_dc_init(sc, DI_PORT); reg = IPU_READ4(sc, IPU_CONF); reg |= IPU_CONF_DMFC_EN | IPU_CONF_DC_EN | IPU_CONF_DP_EN; IPU_WRITE4(sc, IPU_CONF, reg); ipu_config_timing(sc, DI_PORT); ipu_init_buffer(sc); ipu_di_enable(sc, DI_PORT); /* Enable DMA channel */ off = (DMA_CHANNEL > 31) ? IPU_IDMAC_CH_EN_2 : IPU_IDMAC_CH_EN_1; reg = IPU_READ4(sc, off); reg |= (1 << (DMA_CHANNEL & 0x1f)); IPU_WRITE4(sc, off, reg); ipu_dc_enable(sc); sc->sc_fb_info.fb_name = device_get_nameunit(sc->sc_dev); sc->sc_fb_info.fb_vbase = (intptr_t)sc->sc_fb_base; sc->sc_fb_info.fb_pbase = sc->sc_fb_phys; sc->sc_fb_info.fb_size = sc->sc_fb_size; sc->sc_fb_info.fb_bpp = sc->sc_fb_info.fb_depth = MODE_BPP; sc->sc_fb_info.fb_stride = sc->sc_mode->hdisplay * MODE_BPP / 8; sc->sc_fb_info.fb_width = sc->sc_mode->hdisplay; sc->sc_fb_info.fb_height = sc->sc_mode->vdisplay; device_t fbd = device_add_child(sc->sc_dev, "fbd", device_get_unit(sc->sc_dev)); if (fbd == NULL) { device_printf(sc->sc_dev, "Failed to add fbd child\n"); goto fail; } if (device_probe_and_attach(fbd) != 0) { device_printf(sc->sc_dev, "Failed to attach fbd device\n"); goto fail; } return (0); fail: return (err); } static void ipu_hdmi_event(void *arg, device_t hdmi_dev) { struct ipu_softc *sc; uint8_t *edid; uint32_t edid_len; #ifdef EDID_DEBUG struct edid_info ei; #endif const struct videomode *videomode; sc = arg; edid = NULL; edid_len = 0; if (HDMI_GET_EDID(hdmi_dev, &edid, &edid_len) != 0) { device_printf(sc->sc_dev, "failed to get EDID info from HDMI framer\n"); } videomode = NULL; #ifdef EDID_DEBUG if ( edid && (edid_parse(edid, &ei) == 0)) { edid_print(&ei); } else device_printf(sc->sc_dev, "failed to parse EDID\n"); #endif sc->sc_mode = &mode1024x768; ipu_init(sc); HDMI_SET_VIDEOMODE(hdmi_dev, sc->sc_mode); } static int ipu_probe(device_t dev) { if (have_ipu) return (ENXIO); if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "fsl,imx6q-ipu")) return (ENXIO); device_set_desc(dev, "Freescale IPU"); return (BUS_PROBE_DEFAULT); } static int ipu_attach(device_t dev) { struct ipu_softc *sc; if (have_ipu) return (ENXIO); sc = device_get_softc(dev); sc->sc_dev = dev; sc->sc_mem_rid = 0; sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_mem_rid, RF_ACTIVE); if (!sc->sc_mem_res) { device_printf(dev, "cannot allocate memory window\n"); return (ENXIO); } sc->sc_irq_rid = 0; sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irq_rid, RF_ACTIVE); if (!sc->sc_irq_res) { bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid, sc->sc_mem_res); device_printf(dev, "cannot allocate interrupt\n"); return (ENXIO); } /* Enable IPU1 */ imx_ccm_ipu_enable(1); if (src_reset_ipu() != 0) { device_printf(dev, "failed to reset IPU\n"); return (ENXIO); } IPU_LOCK_INIT(sc); sc->sc_hdmi_evh = EVENTHANDLER_REGISTER(hdmi_event, ipu_hdmi_event, sc, 0); have_ipu = 1; return (0); } static int ipu_detach(device_t dev) { /* Do not let unload driver */ return (EBUSY); } static struct fb_info * ipu_fb_getinfo(device_t dev) { struct ipu_softc *sc; sc = device_get_softc(dev); return (&sc->sc_fb_info); } static device_method_t ipu_methods[] = { DEVMETHOD(device_probe, ipu_probe), DEVMETHOD(device_attach, ipu_attach), DEVMETHOD(device_detach, ipu_detach), /* Framebuffer service methods */ DEVMETHOD(fb_getinfo, ipu_fb_getinfo), DEVMETHOD_END }; static driver_t ipu_driver = { "fb", ipu_methods, sizeof(struct ipu_softc), }; static devclass_t ipu_devclass; DRIVER_MODULE(ipu, simplebus, ipu_driver, ipu_devclass, 0, 0); MODULE_VERSION(ipu, 1); MODULE_DEPEND(ipu, simplebus, 1, 1, 1); Index: user/ngie/bug-237403/sys/arm/rockchip/rk30xx_wdog.c =================================================================== --- user/ngie/bug-237403/sys/arm/rockchip/rk30xx_wdog.c (revision 348028) +++ user/ngie/bug-237403/sys/arm/rockchip/rk30xx_wdog.c (revision 348029) @@ -1,201 +1,202 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2013 Ganbold Tsagaankhuu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef RK30_WDT_BASE #define RK30_WDT_BASE 0x2004c000 #define RK30_WDT_PSIZE 0x100 #endif #define RK30_WDT_READ(_sc, _r) bus_read_4((_sc)->res, (_r)) #define RK30_WDT_WRITE(_sc, _r, _v) bus_write_4((_sc)->res, (_r), (_v)) #define WDOG_CTRL 0x00 #define WDOG_CTRL_EN (1 << 0) #define WDOG_CTRL_RSP_MODE (1 << 1) #define WDOG_CTRL_RST_PULSE (4 << 2) #define WDOG_CTRL_RST 0xa #define WDOG_TORR 0x04 #define WDOG_TORR_INTVL_SHIFT 0 #define WDOG_CCVR 0x08 #define WDOG_CRR 0x0c #define WDOG_CRR_PWD 0x76 #define WDOG_STAT 0x10 #define WDOG_EOI 0x14 static struct rk30_wd_softc *rk30_wd_sc = NULL; struct rk30_wd_softc { device_t dev; struct resource *res; struct mtx mtx; int freq; }; static void rk30_wd_watchdog_fn(void *private, u_int cmd, int *error); static int rk30_wd_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_is_compatible(dev, "rockchip,rk30xx-wdt")) { device_set_desc(dev, "Rockchip RK30XX Watchdog"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int rk30_wd_attach(device_t dev) { struct rk30_wd_softc *sc; int rid; phandle_t node; pcell_t cell; if (rk30_wd_sc != NULL) return (ENXIO); sc = device_get_softc(dev); sc->dev = dev; node = ofw_bus_get_node(sc->dev); if (OF_getencprop(node, "clock-frequency", &cell, sizeof(cell)) > 0) sc->freq = cell / 1000000; else return (ENXIO); rid = 0; sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->res == NULL) { device_printf(dev, "could not allocate memory resource\n"); return (ENXIO); } rk30_wd_sc = sc; mtx_init(&sc->mtx, "RK30XX Watchdog", "rk30_wd", MTX_DEF); EVENTHANDLER_REGISTER(watchdog_list, rk30_wd_watchdog_fn, sc, 0); return (0); } static void rk30_wd_watchdog_fn(void *private, u_int cmd, int *error) { struct rk30_wd_softc *sc; uint64_t ms, m, max; int i; sc = private; mtx_lock(&sc->mtx); cmd &= WD_INTERVAL; if (cmd > 0) { ms = ((uint64_t)1 << (cmd & WD_INTERVAL)) / 1000000; m = 0xffff / sc->freq; max = 0x7fffffff / sc->freq + 1; i = 0; while (m < max && m < ms) { m <<= 1; i++; } if (m < max) { RK30_WDT_WRITE(sc, WDOG_TORR, i << WDOG_TORR_INTVL_SHIFT); RK30_WDT_WRITE(sc, WDOG_CTRL, WDOG_CTRL_EN | WDOG_CTRL_RSP_MODE | WDOG_CTRL_RST_PULSE); RK30_WDT_WRITE(sc, WDOG_CRR, WDOG_CRR_PWD); *error = 0; } else { device_printf(sc->dev, "Can not be disabled\n"); mtx_unlock(&sc->mtx); RK30_WDT_WRITE(sc, WDOG_CTRL, WDOG_CTRL_RST); return; } } else RK30_WDT_WRITE(sc, WDOG_CTRL, WDOG_CTRL_RST); mtx_unlock(&sc->mtx); } void rk30_wd_watchdog_reset(void) { bus_space_handle_t bsh; bus_space_map(fdtbus_bs_tag, RK30_WDT_BASE, RK30_WDT_PSIZE, 0, &bsh); bus_space_write_4(fdtbus_bs_tag, bsh, WDOG_TORR, 0); bus_space_write_4(fdtbus_bs_tag, bsh, WDOG_CTRL, WDOG_CTRL_EN | WDOG_CTRL_RSP_MODE | WDOG_CTRL_RST_PULSE); while (1); } static device_method_t rk30_wd_methods[] = { DEVMETHOD(device_probe, rk30_wd_probe), DEVMETHOD(device_attach, rk30_wd_attach), DEVMETHOD_END }; static driver_t rk30_wd_driver = { "rk30_wd", rk30_wd_methods, sizeof(struct rk30_wd_softc), }; static devclass_t rk30_wd_devclass; DRIVER_MODULE(rk30_wd, simplebus, rk30_wd_driver, rk30_wd_devclass, 0, 0); Index: user/ngie/bug-237403/sys/arm/ti/am335x/am335x_lcd.c =================================================================== --- user/ngie/bug-237403/sys/arm/ti/am335x/am335x_lcd.c (revision 348028) +++ user/ngie/bug-237403/sys/arm/ti/am335x/am335x_lcd.c (revision 348029) @@ -1,1082 +1,1083 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright 2013 Oleksandr Tymoshenko * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_syscons.h" #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DEV_SC #include #else /* VT */ #include #endif #include #include #include "am335x_lcd.h" #include "am335x_pwm.h" #include "fb_if.h" #include "hdmi_if.h" #define LCD_PID 0x00 #define LCD_CTRL 0x04 #define CTRL_DIV_MASK 0xff #define CTRL_DIV_SHIFT 8 #define CTRL_AUTO_UFLOW_RESTART (1 << 1) #define CTRL_RASTER_MODE 1 #define CTRL_LIDD_MODE 0 #define LCD_LIDD_CTRL 0x0C #define LCD_LIDD_CS0_CONF 0x10 #define LCD_LIDD_CS0_ADDR 0x14 #define LCD_LIDD_CS0_DATA 0x18 #define LCD_LIDD_CS1_CONF 0x1C #define LCD_LIDD_CS1_ADDR 0x20 #define LCD_LIDD_CS1_DATA 0x24 #define LCD_RASTER_CTRL 0x28 #define RASTER_CTRL_TFT24_UNPACKED (1 << 26) #define RASTER_CTRL_TFT24 (1 << 25) #define RASTER_CTRL_STN565 (1 << 24) #define RASTER_CTRL_TFTPMAP (1 << 23) #define RASTER_CTRL_NIBMODE (1 << 22) #define RASTER_CTRL_PALMODE_SHIFT 20 #define PALETTE_PALETTE_AND_DATA 0x00 #define PALETTE_PALETTE_ONLY 0x01 #define PALETTE_DATA_ONLY 0x02 #define RASTER_CTRL_REQDLY_SHIFT 12 #define RASTER_CTRL_MONO8B (1 << 9) #define RASTER_CTRL_RBORDER (1 << 8) #define RASTER_CTRL_LCDTFT (1 << 7) #define RASTER_CTRL_LCDBW (1 << 1) #define RASTER_CTRL_LCDEN (1 << 0) #define LCD_RASTER_TIMING_0 0x2C #define RASTER_TIMING_0_HBP_SHIFT 24 #define RASTER_TIMING_0_HFP_SHIFT 16 #define RASTER_TIMING_0_HSW_SHIFT 10 #define RASTER_TIMING_0_PPLLSB_SHIFT 4 #define RASTER_TIMING_0_PPLMSB_SHIFT 3 #define LCD_RASTER_TIMING_1 0x30 #define RASTER_TIMING_1_VBP_SHIFT 24 #define RASTER_TIMING_1_VFP_SHIFT 16 #define RASTER_TIMING_1_VSW_SHIFT 10 #define RASTER_TIMING_1_LPP_SHIFT 0 #define LCD_RASTER_TIMING_2 0x34 #define RASTER_TIMING_2_HSWHI_SHIFT 27 #define RASTER_TIMING_2_LPP_B10_SHIFT 26 #define RASTER_TIMING_2_PHSVS (1 << 25) #define RASTER_TIMING_2_PHSVS_RISE (1 << 24) #define RASTER_TIMING_2_PHSVS_FALL (0 << 24) #define RASTER_TIMING_2_IOE (1 << 23) #define RASTER_TIMING_2_IPC (1 << 22) #define RASTER_TIMING_2_IHS (1 << 21) #define RASTER_TIMING_2_IVS (1 << 20) #define RASTER_TIMING_2_ACBI_SHIFT 16 #define RASTER_TIMING_2_ACB_SHIFT 8 #define RASTER_TIMING_2_HBPHI_SHIFT 4 #define RASTER_TIMING_2_HFPHI_SHIFT 0 #define LCD_RASTER_SUBPANEL 0x38 #define LCD_RASTER_SUBPANEL2 0x3C #define LCD_LCDDMA_CTRL 0x40 #define LCDDMA_CTRL_DMA_MASTER_PRIO_SHIFT 16 #define LCDDMA_CTRL_TH_FIFO_RDY_SHIFT 8 #define LCDDMA_CTRL_BURST_SIZE_SHIFT 4 #define LCDDMA_CTRL_BYTES_SWAP (1 << 3) #define LCDDMA_CTRL_BE (1 << 1) #define LCDDMA_CTRL_FB0_ONLY 0 #define LCDDMA_CTRL_FB0_FB1 (1 << 0) #define LCD_LCDDMA_FB0_BASE 0x44 #define LCD_LCDDMA_FB0_CEILING 0x48 #define LCD_LCDDMA_FB1_BASE 0x4C #define LCD_LCDDMA_FB1_CEILING 0x50 #define LCD_SYSCONFIG 0x54 #define SYSCONFIG_STANDBY_FORCE (0 << 4) #define SYSCONFIG_STANDBY_NONE (1 << 4) #define SYSCONFIG_STANDBY_SMART (2 << 4) #define SYSCONFIG_IDLE_FORCE (0 << 2) #define SYSCONFIG_IDLE_NONE (1 << 2) #define SYSCONFIG_IDLE_SMART (2 << 2) #define LCD_IRQSTATUS_RAW 0x58 #define LCD_IRQSTATUS 0x5C #define LCD_IRQENABLE_SET 0x60 #define LCD_IRQENABLE_CLEAR 0x64 #define IRQ_EOF1 (1 << 9) #define IRQ_EOF0 (1 << 8) #define IRQ_PL (1 << 6) #define IRQ_FUF (1 << 5) #define IRQ_ACB (1 << 3) #define IRQ_SYNC_LOST (1 << 2) #define IRQ_RASTER_DONE (1 << 1) #define IRQ_FRAME_DONE (1 << 0) #define LCD_END_OF_INT_IND 0x68 #define LCD_CLKC_ENABLE 0x6C #define CLKC_ENABLE_DMA (1 << 2) #define CLKC_ENABLE_LDID (1 << 1) #define CLKC_ENABLE_CORE (1 << 0) #define LCD_CLKC_RESET 0x70 #define CLKC_RESET_MAIN (1 << 3) #define CLKC_RESET_DMA (1 << 2) #define CLKC_RESET_LDID (1 << 1) #define CLKC_RESET_CORE (1 << 0) #define LCD_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) #define LCD_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) #define LCD_LOCK_INIT(_sc) mtx_init(&(_sc)->sc_mtx, \ device_get_nameunit(_sc->sc_dev), "am335x_lcd", MTX_DEF) #define LCD_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->sc_mtx); #define LCD_READ4(_sc, reg) bus_read_4((_sc)->sc_mem_res, reg); #define LCD_WRITE4(_sc, reg, value) \ bus_write_4((_sc)->sc_mem_res, reg, value); /* Backlight is controlled by eCAS interface on PWM unit 0 */ #define PWM_UNIT 0 #define PWM_PERIOD 100 #define MODE_HBP(mode) ((mode)->htotal - (mode)->hsync_end) #define MODE_HFP(mode) ((mode)->hsync_start - (mode)->hdisplay) #define MODE_HSW(mode) ((mode)->hsync_end - (mode)->hsync_start) #define MODE_VBP(mode) ((mode)->vtotal - (mode)->vsync_end) #define MODE_VFP(mode) ((mode)->vsync_start - (mode)->vdisplay) #define MODE_VSW(mode) ((mode)->vsync_end - (mode)->vsync_start) #define MAX_PIXEL_CLOCK 126000 #define MAX_BANDWIDTH (1280*1024*60) struct am335x_lcd_softc { device_t sc_dev; struct fb_info sc_fb_info; struct resource *sc_mem_res; struct resource *sc_irq_res; void *sc_intr_hl; struct mtx sc_mtx; int sc_backlight; struct sysctl_oid *sc_oid; struct panel_info sc_panel; /* Framebuffer */ bus_dma_tag_t sc_dma_tag; bus_dmamap_t sc_dma_map; size_t sc_fb_size; bus_addr_t sc_fb_phys; uint8_t *sc_fb_base; /* HDMI framer */ phandle_t sc_hdmi_framer; eventhandler_tag sc_hdmi_evh; }; static void am335x_fb_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err) { bus_addr_t *addr; if (err) return; addr = (bus_addr_t*)arg; *addr = segs[0].ds_addr; } static uint32_t am335x_lcd_calc_divisor(uint32_t reference, uint32_t freq) { uint32_t div, i; uint32_t delta, min_delta; min_delta = freq; div = 255; /* Raster mode case: divisors are in range from 2 to 255 */ for (i = 2; i < 255; i++) { delta = abs(reference/i - freq); if (delta < min_delta) { div = i; min_delta = delta; } } return (div); } static int am335x_lcd_sysctl_backlight(SYSCTL_HANDLER_ARGS) { struct am335x_lcd_softc *sc = (struct am335x_lcd_softc*)arg1; int error; int backlight; backlight = sc->sc_backlight; error = sysctl_handle_int(oidp, &backlight, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (backlight < 0) backlight = 0; if (backlight > 100) backlight = 100; LCD_LOCK(sc); error = am335x_pwm_config_ecap(PWM_UNIT, PWM_PERIOD, backlight*PWM_PERIOD/100); if (error == 0) sc->sc_backlight = backlight; LCD_UNLOCK(sc); return (error); } static uint32_t am335x_mode_vrefresh(const struct videomode *mode) { uint32_t refresh; /* Calculate vertical refresh rate */ refresh = (mode->dot_clock * 1000 / mode->htotal); refresh = (refresh + mode->vtotal / 2) / mode->vtotal; if (mode->flags & VID_INTERLACE) refresh *= 2; if (mode->flags & VID_DBLSCAN) refresh /= 2; return refresh; } static int am335x_mode_is_valid(const struct videomode *mode) { uint32_t hbp, hfp, hsw; uint32_t vbp, vfp, vsw; if (mode->dot_clock > MAX_PIXEL_CLOCK) return (0); if (mode->hdisplay & 0xf) return (0); if (mode->vdisplay > 2048) return (0); /* Check ranges for timing parameters */ hbp = MODE_HBP(mode) - 1; hfp = MODE_HFP(mode) - 1; hsw = MODE_HSW(mode) - 1; vbp = MODE_VBP(mode); vfp = MODE_VFP(mode); vsw = MODE_VSW(mode) - 1; if (hbp > 0x3ff) return (0); if (hfp > 0x3ff) return (0); if (hsw > 0x3ff) return (0); if (vbp > 0xff) return (0); if (vfp > 0xff) return (0); if (vsw > 0x3f) return (0); if (mode->vdisplay*mode->hdisplay*am335x_mode_vrefresh(mode) > MAX_BANDWIDTH) return (0); return (1); } static void am335x_read_hdmi_property(device_t dev) { phandle_t node, xref; phandle_t endpoint; phandle_t hdmi_xref; struct am335x_lcd_softc *sc; sc = device_get_softc(dev); node = ofw_bus_get_node(dev); sc->sc_hdmi_framer = 0; /* * Old FreeBSD way of referencing to HDMI framer */ if (OF_getencprop(node, "hdmi", &hdmi_xref, sizeof(hdmi_xref)) != -1) { sc->sc_hdmi_framer = hdmi_xref; return; } /* * Use bindings described in Linux docs: * bindings/media/video-interfaces.txt * We assume that the only endpoint in LCDC node * is HDMI framer. */ node = ofw_bus_find_child(node, "port"); /* No media bindings */ if (node == 0) return; for (endpoint = OF_child(node); endpoint != 0; endpoint = OF_peer(endpoint)) { if (OF_getencprop(endpoint, "remote-endpoint", &xref, sizeof(xref)) != -1) { /* port/port@0/endpoint@0 */ node = OF_node_from_xref(xref); /* port/port@0 */ node = OF_parent(node); /* port */ node = OF_parent(node); /* actual owner of port, in our case HDMI framer */ sc->sc_hdmi_framer = OF_xref_from_node(OF_parent(node)); if (sc->sc_hdmi_framer != 0) return; } } } static int am335x_read_property(device_t dev, phandle_t node, const char *name, uint32_t *val) { pcell_t cell; if ((OF_getencprop(node, name, &cell, sizeof(cell))) <= 0) { device_printf(dev, "missing '%s' attribute in LCD panel info\n", name); return (ENXIO); } *val = cell; return (0); } static int am335x_read_timing(device_t dev, phandle_t node, struct panel_info *panel) { int error; phandle_t timings_node, timing_node, native; timings_node = ofw_bus_find_child(node, "display-timings"); if (timings_node == 0) { device_printf(dev, "no \"display-timings\" node\n"); return (-1); } if (OF_searchencprop(timings_node, "native-mode", &native, sizeof(native)) == -1) { device_printf(dev, "no \"native-mode\" reference in \"timings\" node\n"); return (-1); } timing_node = OF_node_from_xref(native); error = 0; if ((error = am335x_read_property(dev, timing_node, "hactive", &panel->panel_width))) goto out; if ((error = am335x_read_property(dev, timing_node, "vactive", &panel->panel_height))) goto out; if ((error = am335x_read_property(dev, timing_node, "hfront-porch", &panel->panel_hfp))) goto out; if ((error = am335x_read_property(dev, timing_node, "hback-porch", &panel->panel_hbp))) goto out; if ((error = am335x_read_property(dev, timing_node, "hsync-len", &panel->panel_hsw))) goto out; if ((error = am335x_read_property(dev, timing_node, "vfront-porch", &panel->panel_vfp))) goto out; if ((error = am335x_read_property(dev, timing_node, "vback-porch", &panel->panel_vbp))) goto out; if ((error = am335x_read_property(dev, timing_node, "vsync-len", &panel->panel_vsw))) goto out; if ((error = am335x_read_property(dev, timing_node, "clock-frequency", &panel->panel_pxl_clk))) goto out; if ((error = am335x_read_property(dev, timing_node, "pixelclk-active", &panel->pixelclk_active))) goto out; if ((error = am335x_read_property(dev, timing_node, "hsync-active", &panel->hsync_active))) goto out; if ((error = am335x_read_property(dev, timing_node, "vsync-active", &panel->vsync_active))) goto out; out: return (error); } static int am335x_read_panel_info(device_t dev, phandle_t node, struct panel_info *panel) { phandle_t panel_info_node; panel_info_node = ofw_bus_find_child(node, "panel-info"); if (panel_info_node == 0) return (-1); am335x_read_property(dev, panel_info_node, "ac-bias", &panel->ac_bias); am335x_read_property(dev, panel_info_node, "ac-bias-intrpt", &panel->ac_bias_intrpt); am335x_read_property(dev, panel_info_node, "dma-burst-sz", &panel->dma_burst_sz); am335x_read_property(dev, panel_info_node, "bpp", &panel->bpp); am335x_read_property(dev, panel_info_node, "fdd", &panel->fdd); am335x_read_property(dev, panel_info_node, "sync-edge", &panel->sync_edge); am335x_read_property(dev, panel_info_node, "sync-ctrl", &panel->sync_ctrl); return (0); } static void am335x_lcd_intr(void *arg) { struct am335x_lcd_softc *sc = arg; uint32_t reg; reg = LCD_READ4(sc, LCD_IRQSTATUS); LCD_WRITE4(sc, LCD_IRQSTATUS, reg); /* Read value back to make sure it reached the hardware */ reg = LCD_READ4(sc, LCD_IRQSTATUS); if (reg & IRQ_SYNC_LOST) { reg = LCD_READ4(sc, LCD_RASTER_CTRL); reg &= ~RASTER_CTRL_LCDEN; LCD_WRITE4(sc, LCD_RASTER_CTRL, reg); reg = LCD_READ4(sc, LCD_RASTER_CTRL); reg |= RASTER_CTRL_LCDEN; LCD_WRITE4(sc, LCD_RASTER_CTRL, reg); goto done; } if (reg & IRQ_PL) { reg = LCD_READ4(sc, LCD_RASTER_CTRL); reg &= ~RASTER_CTRL_LCDEN; LCD_WRITE4(sc, LCD_RASTER_CTRL, reg); reg = LCD_READ4(sc, LCD_RASTER_CTRL); reg |= RASTER_CTRL_LCDEN; LCD_WRITE4(sc, LCD_RASTER_CTRL, reg); goto done; } if (reg & IRQ_EOF0) { LCD_WRITE4(sc, LCD_LCDDMA_FB0_BASE, sc->sc_fb_phys); LCD_WRITE4(sc, LCD_LCDDMA_FB0_CEILING, sc->sc_fb_phys + sc->sc_fb_size - 1); reg &= ~IRQ_EOF0; } if (reg & IRQ_EOF1) { LCD_WRITE4(sc, LCD_LCDDMA_FB1_BASE, sc->sc_fb_phys); LCD_WRITE4(sc, LCD_LCDDMA_FB1_CEILING, sc->sc_fb_phys + sc->sc_fb_size - 1); reg &= ~IRQ_EOF1; } if (reg & IRQ_FUF) { /* TODO: Handle FUF */ } if (reg & IRQ_ACB) { /* TODO: Handle ACB */ } done: LCD_WRITE4(sc, LCD_END_OF_INT_IND, 0); /* Read value back to make sure it reached the hardware */ reg = LCD_READ4(sc, LCD_END_OF_INT_IND); } static const struct videomode * am335x_lcd_pick_mode(struct edid_info *ei) { const struct videomode *videomode; const struct videomode *m; int n; /* Get standard VGA as default */ videomode = NULL; /* * Pick a mode. */ if (ei->edid_preferred_mode != NULL) { if (am335x_mode_is_valid(ei->edid_preferred_mode)) videomode = ei->edid_preferred_mode; } if (videomode == NULL) { m = ei->edid_modes; sort_modes(ei->edid_modes, &ei->edid_preferred_mode, ei->edid_nmodes); for (n = 0; n < ei->edid_nmodes; n++) if (am335x_mode_is_valid(&m[n])) { videomode = &m[n]; break; } } return videomode; } static int am335x_lcd_configure(struct am335x_lcd_softc *sc) { int div; uint32_t reg, timing0, timing1, timing2; uint32_t burst_log; size_t dma_size; uint32_t hbp, hfp, hsw; uint32_t vbp, vfp, vsw; uint32_t width, height; unsigned int ref_freq; int err; /* * try to adjust clock to get double of requested frequency * HDMI/DVI displays are very sensitive to error in frequncy value */ if (ti_prcm_clk_set_source_freq(LCDC_CLK, sc->sc_panel.panel_pxl_clk*2)) { device_printf(sc->sc_dev, "can't set source frequency\n"); return (ENXIO); } if (ti_prcm_clk_get_source_freq(LCDC_CLK, &ref_freq)) { device_printf(sc->sc_dev, "can't get reference frequency\n"); return (ENXIO); } /* Panle initialization */ dma_size = round_page(sc->sc_panel.panel_width*sc->sc_panel.panel_height*sc->sc_panel.bpp/8); /* * Now allocate framebuffer memory */ err = bus_dma_tag_create( bus_get_dma_tag(sc->sc_dev), 4, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ dma_size, 1, /* maxsize, nsegments */ dma_size, 0, /* maxsegsize, flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->sc_dma_tag); if (err) goto done; err = bus_dmamem_alloc(sc->sc_dma_tag, (void **)&sc->sc_fb_base, BUS_DMA_COHERENT, &sc->sc_dma_map); if (err) { device_printf(sc->sc_dev, "cannot allocate framebuffer\n"); goto done; } err = bus_dmamap_load(sc->sc_dma_tag, sc->sc_dma_map, sc->sc_fb_base, dma_size, am335x_fb_dmamap_cb, &sc->sc_fb_phys, BUS_DMA_NOWAIT); if (err) { device_printf(sc->sc_dev, "cannot load DMA map\n"); goto done; } /* Make sure it's blank */ memset(sc->sc_fb_base, 0x0, dma_size); /* Calculate actual FB Size */ sc->sc_fb_size = sc->sc_panel.panel_width*sc->sc_panel.panel_height*sc->sc_panel.bpp/8; /* Only raster mode is supported */ reg = CTRL_RASTER_MODE; div = am335x_lcd_calc_divisor(ref_freq, sc->sc_panel.panel_pxl_clk); reg |= (div << CTRL_DIV_SHIFT); LCD_WRITE4(sc, LCD_CTRL, reg); /* Set timing */ timing0 = timing1 = timing2 = 0; hbp = sc->sc_panel.panel_hbp - 1; hfp = sc->sc_panel.panel_hfp - 1; hsw = sc->sc_panel.panel_hsw - 1; vbp = sc->sc_panel.panel_vbp; vfp = sc->sc_panel.panel_vfp; vsw = sc->sc_panel.panel_vsw - 1; height = sc->sc_panel.panel_height - 1; width = sc->sc_panel.panel_width - 1; /* Horizontal back porch */ timing0 |= (hbp & 0xff) << RASTER_TIMING_0_HBP_SHIFT; timing2 |= ((hbp >> 8) & 3) << RASTER_TIMING_2_HBPHI_SHIFT; /* Horizontal front porch */ timing0 |= (hfp & 0xff) << RASTER_TIMING_0_HFP_SHIFT; timing2 |= ((hfp >> 8) & 3) << RASTER_TIMING_2_HFPHI_SHIFT; /* Horizontal sync width */ timing0 |= (hsw & 0x3f) << RASTER_TIMING_0_HSW_SHIFT; timing2 |= ((hsw >> 6) & 0xf) << RASTER_TIMING_2_HSWHI_SHIFT; /* Vertical back porch, front porch, sync width */ timing1 |= (vbp & 0xff) << RASTER_TIMING_1_VBP_SHIFT; timing1 |= (vfp & 0xff) << RASTER_TIMING_1_VFP_SHIFT; timing1 |= (vsw & 0x3f) << RASTER_TIMING_1_VSW_SHIFT; /* Pixels per line */ timing0 |= ((width >> 10) & 1) << RASTER_TIMING_0_PPLMSB_SHIFT; timing0 |= ((width >> 4) & 0x3f) << RASTER_TIMING_0_PPLLSB_SHIFT; /* Lines per panel */ timing1 |= (height & 0x3ff) << RASTER_TIMING_1_LPP_SHIFT; timing2 |= ((height >> 10 ) & 1) << RASTER_TIMING_2_LPP_B10_SHIFT; /* clock signal settings */ if (sc->sc_panel.sync_ctrl) timing2 |= RASTER_TIMING_2_PHSVS; if (sc->sc_panel.sync_edge) timing2 |= RASTER_TIMING_2_PHSVS_RISE; else timing2 |= RASTER_TIMING_2_PHSVS_FALL; if (sc->sc_panel.hsync_active == 0) timing2 |= RASTER_TIMING_2_IHS; if (sc->sc_panel.vsync_active == 0) timing2 |= RASTER_TIMING_2_IVS; if (sc->sc_panel.pixelclk_active == 0) timing2 |= RASTER_TIMING_2_IPC; /* AC bias */ timing2 |= (sc->sc_panel.ac_bias << RASTER_TIMING_2_ACB_SHIFT); timing2 |= (sc->sc_panel.ac_bias_intrpt << RASTER_TIMING_2_ACBI_SHIFT); LCD_WRITE4(sc, LCD_RASTER_TIMING_0, timing0); LCD_WRITE4(sc, LCD_RASTER_TIMING_1, timing1); LCD_WRITE4(sc, LCD_RASTER_TIMING_2, timing2); /* DMA settings */ reg = LCDDMA_CTRL_FB0_FB1; /* Find power of 2 for current burst size */ switch (sc->sc_panel.dma_burst_sz) { case 1: burst_log = 0; break; case 2: burst_log = 1; break; case 4: burst_log = 2; break; case 8: burst_log = 3; break; case 16: default: burst_log = 4; break; } reg |= (burst_log << LCDDMA_CTRL_BURST_SIZE_SHIFT); /* XXX: FIFO TH */ reg |= (0 << LCDDMA_CTRL_TH_FIFO_RDY_SHIFT); LCD_WRITE4(sc, LCD_LCDDMA_CTRL, reg); LCD_WRITE4(sc, LCD_LCDDMA_FB0_BASE, sc->sc_fb_phys); LCD_WRITE4(sc, LCD_LCDDMA_FB0_CEILING, sc->sc_fb_phys + sc->sc_fb_size - 1); LCD_WRITE4(sc, LCD_LCDDMA_FB1_BASE, sc->sc_fb_phys); LCD_WRITE4(sc, LCD_LCDDMA_FB1_CEILING, sc->sc_fb_phys + sc->sc_fb_size - 1); /* Enable LCD */ reg = RASTER_CTRL_LCDTFT; reg |= (sc->sc_panel.fdd << RASTER_CTRL_REQDLY_SHIFT); reg |= (PALETTE_DATA_ONLY << RASTER_CTRL_PALMODE_SHIFT); if (sc->sc_panel.bpp >= 24) reg |= RASTER_CTRL_TFT24; if (sc->sc_panel.bpp == 32) reg |= RASTER_CTRL_TFT24_UNPACKED; LCD_WRITE4(sc, LCD_RASTER_CTRL, reg); LCD_WRITE4(sc, LCD_CLKC_ENABLE, CLKC_ENABLE_DMA | CLKC_ENABLE_LDID | CLKC_ENABLE_CORE); LCD_WRITE4(sc, LCD_CLKC_RESET, CLKC_RESET_MAIN); DELAY(100); LCD_WRITE4(sc, LCD_CLKC_RESET, 0); reg = IRQ_EOF1 | IRQ_EOF0 | IRQ_FUF | IRQ_PL | IRQ_ACB | IRQ_SYNC_LOST | IRQ_RASTER_DONE | IRQ_FRAME_DONE; LCD_WRITE4(sc, LCD_IRQENABLE_SET, reg); reg = LCD_READ4(sc, LCD_RASTER_CTRL); reg |= RASTER_CTRL_LCDEN; LCD_WRITE4(sc, LCD_RASTER_CTRL, reg); LCD_WRITE4(sc, LCD_SYSCONFIG, SYSCONFIG_STANDBY_SMART | SYSCONFIG_IDLE_SMART); sc->sc_fb_info.fb_name = device_get_nameunit(sc->sc_dev); sc->sc_fb_info.fb_vbase = (intptr_t)sc->sc_fb_base; sc->sc_fb_info.fb_pbase = sc->sc_fb_phys; sc->sc_fb_info.fb_size = sc->sc_fb_size; sc->sc_fb_info.fb_bpp = sc->sc_fb_info.fb_depth = sc->sc_panel.bpp; sc->sc_fb_info.fb_stride = sc->sc_panel.panel_width*sc->sc_panel.bpp / 8; sc->sc_fb_info.fb_width = sc->sc_panel.panel_width; sc->sc_fb_info.fb_height = sc->sc_panel.panel_height; #ifdef DEV_SC err = (sc_attach_unit(device_get_unit(sc->sc_dev), device_get_flags(sc->sc_dev) | SC_AUTODETECT_KBD)); if (err) { device_printf(sc->sc_dev, "failed to attach syscons\n"); goto fail; } am335x_lcd_syscons_setup((vm_offset_t)sc->sc_fb_base, sc->sc_fb_phys, &panel); #else /* VT */ device_t fbd = device_add_child(sc->sc_dev, "fbd", device_get_unit(sc->sc_dev)); if (fbd != NULL) { if (device_probe_and_attach(fbd) != 0) device_printf(sc->sc_dev, "failed to attach fbd device\n"); } else device_printf(sc->sc_dev, "failed to add fbd child\n"); #endif done: return (err); } static void am335x_lcd_hdmi_event(void *arg, device_t hdmi, int event) { struct am335x_lcd_softc *sc; const struct videomode *videomode; struct videomode hdmi_mode; device_t hdmi_dev; uint8_t *edid; uint32_t edid_len; struct edid_info ei; sc = arg; /* Nothing to work with */ if (!sc->sc_hdmi_framer) { device_printf(sc->sc_dev, "HDMI event without HDMI framer set\n"); return; } hdmi_dev = OF_device_from_xref(sc->sc_hdmi_framer); if (!hdmi_dev) { device_printf(sc->sc_dev, "no actual device for \"hdmi\" property\n"); return; } edid = NULL; edid_len = 0; if (HDMI_GET_EDID(hdmi_dev, &edid, &edid_len) != 0) { device_printf(sc->sc_dev, "failed to get EDID info from HDMI framer\n"); return; } videomode = NULL; if (edid_parse(edid, &ei) == 0) { edid_print(&ei); videomode = am335x_lcd_pick_mode(&ei); } else device_printf(sc->sc_dev, "failed to parse EDID\n"); /* Use standard VGA as fallback */ if (videomode == NULL) videomode = pick_mode_by_ref(640, 480, 60); if (videomode == NULL) { device_printf(sc->sc_dev, "failed to find usable videomode"); return; } device_printf(sc->sc_dev, "detected videomode: %dx%d @ %dKHz\n", videomode->hdisplay, videomode->vdisplay, am335x_mode_vrefresh(videomode)); sc->sc_panel.panel_width = videomode->hdisplay; sc->sc_panel.panel_height = videomode->vdisplay; sc->sc_panel.panel_hfp = videomode->hsync_start - videomode->hdisplay; sc->sc_panel.panel_hbp = videomode->htotal - videomode->hsync_end; sc->sc_panel.panel_hsw = videomode->hsync_end - videomode->hsync_start; sc->sc_panel.panel_vfp = videomode->vsync_start - videomode->vdisplay; sc->sc_panel.panel_vbp = videomode->vtotal - videomode->vsync_end; sc->sc_panel.panel_vsw = videomode->vsync_end - videomode->vsync_start; sc->sc_panel.pixelclk_active = 1; /* logic for HSYNC should be reversed */ if (videomode->flags & VID_NHSYNC) sc->sc_panel.hsync_active = 1; else sc->sc_panel.hsync_active = 0; if (videomode->flags & VID_NVSYNC) sc->sc_panel.vsync_active = 0; else sc->sc_panel.vsync_active = 1; sc->sc_panel.panel_pxl_clk = videomode->dot_clock * 1000; am335x_lcd_configure(sc); memcpy(&hdmi_mode, videomode, sizeof(hdmi_mode)); hdmi_mode.hskew = videomode->hsync_end - videomode->hsync_start; hdmi_mode.flags |= VID_HSKEW; HDMI_SET_VIDEOMODE(hdmi_dev, &hdmi_mode); } static int am335x_lcd_probe(device_t dev) { #ifdef DEV_SC int err; #endif if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "ti,am33xx-tilcdc")) return (ENXIO); device_set_desc(dev, "AM335x LCD controller"); #ifdef DEV_SC err = sc_probe_unit(device_get_unit(dev), device_get_flags(dev) | SC_AUTODETECT_KBD); if (err != 0) return (err); #endif return (BUS_PROBE_DEFAULT); } static int am335x_lcd_attach(device_t dev) { struct am335x_lcd_softc *sc; int err; int rid; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; phandle_t root, panel_node; err = 0; sc = device_get_softc(dev); sc->sc_dev = dev; am335x_read_hdmi_property(dev); root = OF_finddevice("/"); if (root == -1) { device_printf(dev, "failed to get FDT root node\n"); return (ENXIO); } sc->sc_panel.ac_bias = 255; sc->sc_panel.ac_bias_intrpt = 0; sc->sc_panel.dma_burst_sz = 16; sc->sc_panel.bpp = 16; sc->sc_panel.fdd = 128; sc->sc_panel.sync_edge = 0; sc->sc_panel.sync_ctrl = 1; panel_node = fdt_find_compatible(root, "ti,tilcdc,panel", 1); if (panel_node != 0) { device_printf(dev, "using static panel info\n"); if (am335x_read_panel_info(dev, panel_node, &sc->sc_panel)) { device_printf(dev, "failed to read panel info\n"); return (ENXIO); } if (am335x_read_timing(dev, panel_node, &sc->sc_panel)) { device_printf(dev, "failed to read timings\n"); return (ENXIO); } } ti_prcm_clk_enable(LCDC_CLK); rid = 0; sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->sc_mem_res) { device_printf(dev, "cannot allocate memory window\n"); return (ENXIO); } rid = 0; sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (!sc->sc_irq_res) { bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); device_printf(dev, "cannot allocate interrupt\n"); return (ENXIO); } if (bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, am335x_lcd_intr, sc, &sc->sc_intr_hl) != 0) { bus_release_resource(dev, SYS_RES_IRQ, rid, sc->sc_irq_res); bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->sc_mem_res); device_printf(dev, "Unable to setup the irq handler.\n"); return (ENXIO); } LCD_LOCK_INIT(sc); /* Init backlight interface */ ctx = device_get_sysctl_ctx(sc->sc_dev); tree = device_get_sysctl_tree(sc->sc_dev); sc->sc_oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "backlight", CTLTYPE_INT | CTLFLAG_RW, sc, 0, am335x_lcd_sysctl_backlight, "I", "LCD backlight"); sc->sc_backlight = 0; /* Check if eCAS interface is available at this point */ if (am335x_pwm_config_ecap(PWM_UNIT, PWM_PERIOD, PWM_PERIOD) == 0) sc->sc_backlight = 100; if (panel_node != 0) am335x_lcd_configure(sc); else sc->sc_hdmi_evh = EVENTHANDLER_REGISTER(hdmi_event, am335x_lcd_hdmi_event, sc, EVENTHANDLER_PRI_ANY); return (0); } static int am335x_lcd_detach(device_t dev) { /* Do not let unload driver */ return (EBUSY); } static struct fb_info * am335x_lcd_fb_getinfo(device_t dev) { struct am335x_lcd_softc *sc; sc = device_get_softc(dev); return (&sc->sc_fb_info); } static device_method_t am335x_lcd_methods[] = { DEVMETHOD(device_probe, am335x_lcd_probe), DEVMETHOD(device_attach, am335x_lcd_attach), DEVMETHOD(device_detach, am335x_lcd_detach), /* Framebuffer service methods */ DEVMETHOD(fb_getinfo, am335x_lcd_fb_getinfo), DEVMETHOD_END }; static driver_t am335x_lcd_driver = { "fb", am335x_lcd_methods, sizeof(struct am335x_lcd_softc), }; static devclass_t am335x_lcd_devclass; DRIVER_MODULE(am335x_lcd, simplebus, am335x_lcd_driver, am335x_lcd_devclass, 0, 0); MODULE_VERSION(am335x_lcd, 1); MODULE_DEPEND(am335x_lcd, simplebus, 1, 1, 1); Index: user/ngie/bug-237403/sys/arm/ti/am335x/tda19988.c =================================================================== --- user/ngie/bug-237403/sys/arm/ti/am335x/tda19988.c (revision 348028) +++ user/ngie/bug-237403/sys/arm/ti/am335x/tda19988.c (revision 348029) @@ -1,809 +1,810 @@ /*- * Copyright (c) 2015 Oleksandr Tymoshenko * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * NXP TDA19988 HDMI encoder */ #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "iicbus_if.h" #include "hdmi_if.h" #define MKREG(page, addr) (((page) << 8) | (addr)) #define REGPAGE(reg) (((reg) >> 8) & 0xff) #define REGADDR(reg) ((reg) & 0xff) #define TDA_VERSION MKREG(0x00, 0x00) #define TDA_MAIN_CNTRL0 MKREG(0x00, 0x01) #define MAIN_CNTRL0_SR (1 << 0) #define TDA_VERSION_MSB MKREG(0x00, 0x02) #define TDA_SOFTRESET MKREG(0x00, 0x0a) #define SOFTRESET_I2C (1 << 1) #define SOFTRESET_AUDIO (1 << 0) #define TDA_DDC_CTRL MKREG(0x00, 0x0b) #define DDC_ENABLE 0 #define TDA_CCLK MKREG(0x00, 0x0c) #define CCLK_ENABLE 1 #define TDA_INT_FLAGS_2 MKREG(0x00, 0x11) #define INT_FLAGS_2_EDID_BLK_RD (1 << 1) #define TDA_VIP_CNTRL_0 MKREG(0x00, 0x20) #define TDA_VIP_CNTRL_1 MKREG(0x00, 0x21) #define TDA_VIP_CNTRL_2 MKREG(0x00, 0x22) #define TDA_VIP_CNTRL_3 MKREG(0x00, 0x23) #define VIP_CNTRL_3_SYNC_HS (2 << 4) #define VIP_CNTRL_3_V_TGL (1 << 2) #define VIP_CNTRL_3_H_TGL (1 << 1) #define TDA_VIP_CNTRL_4 MKREG(0x00, 0x24) #define VIP_CNTRL_4_BLANKIT_NDE (0 << 2) #define VIP_CNTRL_4_BLANKIT_HS_VS (1 << 2) #define VIP_CNTRL_4_BLANKIT_NHS_VS (2 << 2) #define VIP_CNTRL_4_BLANKIT_HE_VE (3 << 2) #define VIP_CNTRL_4_BLC_NONE (0 << 0) #define VIP_CNTRL_4_BLC_RGB444 (1 << 0) #define VIP_CNTRL_4_BLC_YUV444 (2 << 0) #define VIP_CNTRL_4_BLC_YUV422 (3 << 0) #define TDA_VIP_CNTRL_5 MKREG(0x00, 0x25) #define VIP_CNTRL_5_SP_CNT(n) (((n) & 3) << 1) #define TDA_MUX_VP_VIP_OUT MKREG(0x00, 0x27) #define TDA_MAT_CONTRL MKREG(0x00, 0x80) #define MAT_CONTRL_MAT_BP (1 << 2) #define TDA_VIDFORMAT MKREG(0x00, 0xa0) #define TDA_REFPIX_MSB MKREG(0x00, 0xa1) #define TDA_REFPIX_LSB MKREG(0x00, 0xa2) #define TDA_REFLINE_MSB MKREG(0x00, 0xa3) #define TDA_REFLINE_LSB MKREG(0x00, 0xa4) #define TDA_NPIX_MSB MKREG(0x00, 0xa5) #define TDA_NPIX_LSB MKREG(0x00, 0xa6) #define TDA_NLINE_MSB MKREG(0x00, 0xa7) #define TDA_NLINE_LSB MKREG(0x00, 0xa8) #define TDA_VS_LINE_STRT_1_MSB MKREG(0x00, 0xa9) #define TDA_VS_LINE_STRT_1_LSB MKREG(0x00, 0xaa) #define TDA_VS_PIX_STRT_1_MSB MKREG(0x00, 0xab) #define TDA_VS_PIX_STRT_1_LSB MKREG(0x00, 0xac) #define TDA_VS_LINE_END_1_MSB MKREG(0x00, 0xad) #define TDA_VS_LINE_END_1_LSB MKREG(0x00, 0xae) #define TDA_VS_PIX_END_1_MSB MKREG(0x00, 0xaf) #define TDA_VS_PIX_END_1_LSB MKREG(0x00, 0xb0) #define TDA_VS_LINE_STRT_2_MSB MKREG(0x00, 0xb1) #define TDA_VS_LINE_STRT_2_LSB MKREG(0x00, 0xb2) #define TDA_VS_PIX_STRT_2_MSB MKREG(0x00, 0xb3) #define TDA_VS_PIX_STRT_2_LSB MKREG(0x00, 0xb4) #define TDA_VS_LINE_END_2_MSB MKREG(0x00, 0xb5) #define TDA_VS_LINE_END_2_LSB MKREG(0x00, 0xb6) #define TDA_VS_PIX_END_2_MSB MKREG(0x00, 0xb7) #define TDA_VS_PIX_END_2_LSB MKREG(0x00, 0xb8) #define TDA_HS_PIX_START_MSB MKREG(0x00, 0xb9) #define TDA_HS_PIX_START_LSB MKREG(0x00, 0xba) #define TDA_HS_PIX_STOP_MSB MKREG(0x00, 0xbb) #define TDA_HS_PIX_STOP_LSB MKREG(0x00, 0xbc) #define TDA_VWIN_START_1_MSB MKREG(0x00, 0xbd) #define TDA_VWIN_START_1_LSB MKREG(0x00, 0xbe) #define TDA_VWIN_END_1_MSB MKREG(0x00, 0xbf) #define TDA_VWIN_END_1_LSB MKREG(0x00, 0xc0) #define TDA_VWIN_START_2_MSB MKREG(0x00, 0xc1) #define TDA_VWIN_START_2_LSB MKREG(0x00, 0xc2) #define TDA_VWIN_END_2_MSB MKREG(0x00, 0xc3) #define TDA_VWIN_END_2_LSB MKREG(0x00, 0xc4) #define TDA_DE_START_MSB MKREG(0x00, 0xc5) #define TDA_DE_START_LSB MKREG(0x00, 0xc6) #define TDA_DE_STOP_MSB MKREG(0x00, 0xc7) #define TDA_DE_STOP_LSB MKREG(0x00, 0xc8) #define TDA_TBG_CNTRL_0 MKREG(0x00, 0xca) #define TBG_CNTRL_0_SYNC_ONCE (1 << 7) #define TBG_CNTRL_0_SYNC_MTHD (1 << 6) #define TDA_TBG_CNTRL_1 MKREG(0x00, 0xcb) #define TBG_CNTRL_1_DWIN_DIS (1 << 6) #define TBG_CNTRL_1_TGL_EN (1 << 2) #define TBG_CNTRL_1_V_TGL (1 << 1) #define TBG_CNTRL_1_H_TGL (1 << 0) #define TDA_HVF_CNTRL_0 MKREG(0x00, 0xe4) #define HVF_CNTRL_0_PREFIL_NONE (0 << 2) #define HVF_CNTRL_0_INTPOL_BYPASS (0 << 0) #define TDA_HVF_CNTRL_1 MKREG(0x00, 0xe5) #define HVF_CNTRL_1_VQR(x) (((x) & 3) << 2) #define HVF_CNTRL_1_VQR_FULL HVF_CNTRL_1_VQR(0) #define TDA_ENABLE_SPACE MKREG(0x00, 0xd6) #define TDA_RPT_CNTRL MKREG(0x00, 0xf0) #define TDA_PLL_SERIAL_1 MKREG(0x02, 0x00) #define PLL_SERIAL_1_SRL_MAN_IP (1 << 6) #define TDA_PLL_SERIAL_2 MKREG(0x02, 0x01) #define PLL_SERIAL_2_SRL_PR(x) (((x) & 0xf) << 4) #define PLL_SERIAL_2_SRL_NOSC(x) (((x) & 0x3) << 0) #define TDA_PLL_SERIAL_3 MKREG(0x02, 0x02) #define PLL_SERIAL_3_SRL_PXIN_SEL (1 << 4) #define PLL_SERIAL_3_SRL_DE (1 << 2) #define PLL_SERIAL_3_SRL_CCIR (1 << 0) #define TDA_SERIALIZER MKREG(0x02, 0x03) #define TDA_BUFFER_OUT MKREG(0x02, 0x04) #define TDA_PLL_SCG1 MKREG(0x02, 0x05) #define TDA_PLL_SCG2 MKREG(0x02, 0x06) #define TDA_PLL_SCGN1 MKREG(0x02, 0x07) #define TDA_PLL_SCGN2 MKREG(0x02, 0x08) #define TDA_PLL_SCGR1 MKREG(0x02, 0x09) #define TDA_PLL_SCGR2 MKREG(0x02, 0x0a) #define TDA_SEL_CLK MKREG(0x02, 0x11) #define SEL_CLK_ENA_SC_CLK (1 << 3) #define SEL_CLK_SEL_VRF_CLK(x) (((x) & 3) << 1) #define SEL_CLK_SEL_CLK1 (1 << 0) #define TDA_ANA_GENERAL MKREG(0x02, 0x12) #define TDA_EDID_DATA0 MKREG(0x09, 0x00) #define TDA_EDID_CTRL MKREG(0x09, 0xfa) #define TDA_DDC_ADDR MKREG(0x09, 0xfb) #define TDA_DDC_OFFS MKREG(0x09, 0xfc) #define TDA_DDC_SEGM_ADDR MKREG(0x09, 0xfd) #define TDA_DDC_SEGM MKREG(0x09, 0xfe) #define TDA_IF_VSP MKREG(0x10, 0x20) #define TDA_IF_AVI MKREG(0x10, 0x40) #define TDA_IF_SPD MKREG(0x10, 0x60) #define TDA_IF_AUD MKREG(0x10, 0x80) #define TDA_IF_MPS MKREG(0x10, 0xa0) #define TDA_ENC_CNTRL MKREG(0x11, 0x0d) #define ENC_CNTRL_DVI_MODE (0 << 2) #define ENC_CNTRL_HDMI_MODE (1 << 2) #define TDA_DIP_IF_FLAGS MKREG(0x11, 0x0f) #define DIP_IF_FLAGS_IF5 (1 << 5) #define DIP_IF_FLAGS_IF4 (1 << 4) #define DIP_IF_FLAGS_IF3 (1 << 3) #define DIP_IF_FLAGS_IF2 (1 << 2) /* AVI IF on page 10h */ #define DIP_IF_FLAGS_IF1 (1 << 1) #define TDA_TX3 MKREG(0x12, 0x9a) #define TDA_TX4 MKREG(0x12, 0x9b) #define TX4_PD_RAM (1 << 1) #define TDA_HDCP_TX33 MKREG(0x12, 0xb8) #define HDCP_TX33_HDMI (1 << 1) #define TDA_CURPAGE_ADDR 0xff #define TDA_CEC_ENAMODS 0xff #define ENAMODS_RXSENS (1 << 2) #define ENAMODS_HDMI (1 << 1) #define TDA_CEC_FRO_IM_CLK_CTRL 0xfb #define CEC_FRO_IM_CLK_CTRL_GHOST_DIS (1 << 7) #define CEC_FRO_IM_CLK_CTRL_IMCLK_SEL (1 << 1) /* EDID reading */ #define EDID_LENGTH 0x80 #define MAX_READ_ATTEMPTS 100 /* EDID fields */ #define EDID_MODES0 35 #define EDID_MODES1 36 #define EDID_TIMING_START 38 #define EDID_TIMING_END 54 #define EDID_TIMING_X(v) (((v) + 31) * 8) #define EDID_FREQ(v) (((v) & 0x3f) + 60) #define EDID_RATIO(v) (((v) >> 6) & 0x3) #define EDID_RATIO_10x16 0 #define EDID_RATIO_3x4 1 #define EDID_RATIO_4x5 2 #define EDID_RATIO_9x16 3 #define TDA19988 0x0301 struct tda19988_softc { device_t sc_dev; uint32_t sc_addr; uint32_t sc_cec_addr; uint16_t sc_version; struct intr_config_hook enum_hook; int sc_current_page; uint8_t *sc_edid; uint32_t sc_edid_len; }; static int tda19988_set_page(struct tda19988_softc *sc, uint8_t page) { uint8_t addr = TDA_CURPAGE_ADDR; uint8_t cmd[2]; int result; struct iic_msg msg[] = { { sc->sc_addr, IIC_M_WR, 2, cmd }, }; cmd[0] = addr; cmd[1] = page; result = (iicbus_transfer(sc->sc_dev, msg, 1)); if (result) printf("tda19988_set_page failed: %d\n", result); else sc->sc_current_page = page; return (result); } static int tda19988_cec_read(struct tda19988_softc *sc, uint8_t addr, uint8_t *data) { int result; struct iic_msg msg[] = { { sc->sc_cec_addr, IIC_M_WR, 1, &addr }, { sc->sc_cec_addr, IIC_M_RD, 1, data }, }; result = iicbus_transfer(sc->sc_dev, msg, 2); if (result) printf("tda19988_cec_read failed: %d\n", result); return (result); } static int tda19988_cec_write(struct tda19988_softc *sc, uint8_t address, uint8_t data) { uint8_t cmd[2]; int result; struct iic_msg msg[] = { { sc->sc_cec_addr, IIC_M_WR, 2, cmd }, }; cmd[0] = address; cmd[1] = data; result = iicbus_transfer(sc->sc_dev, msg, 1); if (result) printf("tda19988_cec_write failed: %d\n", result); return (result); } static int tda19988_block_read(struct tda19988_softc *sc, uint16_t addr, uint8_t *data, int len) { uint8_t reg; int result; struct iic_msg msg[] = { { sc->sc_addr, IIC_M_WR, 1, ® }, { sc->sc_addr, IIC_M_RD, len, data }, }; reg = REGADDR(addr); if (sc->sc_current_page != REGPAGE(addr)) tda19988_set_page(sc, REGPAGE(addr)); result = (iicbus_transfer(sc->sc_dev, msg, 2)); if (result) device_printf(sc->sc_dev, "tda19988_block_read failed: %d\n", result); return (result); } static int tda19988_reg_read(struct tda19988_softc *sc, uint16_t addr, uint8_t *data) { uint8_t reg; int result; struct iic_msg msg[] = { { sc->sc_addr, IIC_M_WR, 1, ® }, { sc->sc_addr, IIC_M_RD, 1, data }, }; reg = REGADDR(addr); if (sc->sc_current_page != REGPAGE(addr)) tda19988_set_page(sc, REGPAGE(addr)); result = (iicbus_transfer(sc->sc_dev, msg, 2)); if (result) device_printf(sc->sc_dev, "tda19988_reg_read failed: %d\n", result); return (result); } static int tda19988_reg_write(struct tda19988_softc *sc, uint16_t address, uint8_t data) { uint8_t cmd[2]; int result; struct iic_msg msg[] = { { sc->sc_addr, IIC_M_WR, 2, cmd }, }; cmd[0] = REGADDR(address); cmd[1] = data; if (sc->sc_current_page != REGPAGE(address)) tda19988_set_page(sc, REGPAGE(address)); result = iicbus_transfer(sc->sc_dev, msg, 1); if (result) device_printf(sc->sc_dev, "tda19988_reg_write failed: %d\n", result); return (result); } static int tda19988_reg_write2(struct tda19988_softc *sc, uint16_t address, uint16_t data) { uint8_t cmd[3]; int result; struct iic_msg msg[] = { { sc->sc_addr, IIC_M_WR, 3, cmd }, }; cmd[0] = REGADDR(address); cmd[1] = (data >> 8); cmd[2] = (data & 0xff); if (sc->sc_current_page != REGPAGE(address)) tda19988_set_page(sc, REGPAGE(address)); result = iicbus_transfer(sc->sc_dev, msg, 1); if (result) device_printf(sc->sc_dev, "tda19988_reg_write2 failed: %d\n", result); return (result); } static void tda19988_reg_set(struct tda19988_softc *sc, uint16_t addr, uint8_t flags) { uint8_t data; tda19988_reg_read(sc, addr, &data); data |= flags; tda19988_reg_write(sc, addr, data); } static void tda19988_reg_clear(struct tda19988_softc *sc, uint16_t addr, uint8_t flags) { uint8_t data; tda19988_reg_read(sc, addr, &data); data &= ~flags; tda19988_reg_write(sc, addr, data); } static int tda19988_probe(device_t dev) { if (!ofw_bus_is_compatible(dev, "nxp,tda998x")) return (ENXIO); return (BUS_PROBE_DEFAULT); } static void tda19988_init_encoder(struct tda19988_softc *sc, const struct videomode *mode) { uint16_t ref_pix, ref_line, n_pix, n_line; uint16_t hs_pix_start, hs_pix_stop; uint16_t vs1_pix_start, vs1_pix_stop; uint16_t vs1_line_start, vs1_line_end; uint16_t vs2_pix_start, vs2_pix_stop; uint16_t vs2_line_start, vs2_line_end; uint16_t vwin1_line_start, vwin1_line_end; uint16_t vwin2_line_start, vwin2_line_end; uint16_t de_start, de_stop; uint8_t reg, div; n_pix = mode->htotal; n_line = mode->vtotal; hs_pix_stop = mode->hsync_end - mode->hdisplay; hs_pix_start = mode->hsync_start - mode->hdisplay; de_stop = mode->htotal; de_start = mode->htotal - mode->hdisplay; ref_pix = hs_pix_start + 3; if (mode->flags & VID_HSKEW) ref_pix += mode->hskew; if ((mode->flags & VID_INTERLACE) == 0) { ref_line = 1 + mode->vsync_start - mode->vdisplay; vwin1_line_start = mode->vtotal - mode->vdisplay - 1; vwin1_line_end = vwin1_line_start + mode->vdisplay; vs1_pix_start = vs1_pix_stop = hs_pix_start; vs1_line_start = mode->vsync_start - mode->vdisplay; vs1_line_end = vs1_line_start + mode->vsync_end - mode->vsync_start; vwin2_line_start = vwin2_line_end = 0; vs2_pix_start = vs2_pix_stop = 0; vs2_line_start = vs2_line_end = 0; } else { ref_line = 1 + (mode->vsync_start - mode->vdisplay)/2; vwin1_line_start = (mode->vtotal - mode->vdisplay)/2; vwin1_line_end = vwin1_line_start + mode->vdisplay/2; vs1_pix_start = vs1_pix_stop = hs_pix_start; vs1_line_start = (mode->vsync_start - mode->vdisplay)/2; vs1_line_end = vs1_line_start + (mode->vsync_end - mode->vsync_start)/2; vwin2_line_start = vwin1_line_start + mode->vtotal/2; vwin2_line_end = vwin2_line_start + mode->vdisplay/2; vs2_pix_start = vs2_pix_stop = hs_pix_start + mode->htotal/2; vs2_line_start = vs1_line_start + mode->vtotal/2 ; vs2_line_end = vs2_line_start + (mode->vsync_end - mode->vsync_start)/2; } div = 148500 / mode->dot_clock; if (div != 0) { div--; if (div > 3) div = 3; } /* set HDMI HDCP mode off */ tda19988_reg_set(sc, TDA_TBG_CNTRL_1, TBG_CNTRL_1_DWIN_DIS); tda19988_reg_clear(sc, TDA_HDCP_TX33, HDCP_TX33_HDMI); tda19988_reg_write(sc, TDA_ENC_CNTRL, ENC_CNTRL_DVI_MODE); /* no pre-filter or interpolator */ tda19988_reg_write(sc, TDA_HVF_CNTRL_0, HVF_CNTRL_0_INTPOL_BYPASS | HVF_CNTRL_0_PREFIL_NONE); tda19988_reg_write(sc, TDA_VIP_CNTRL_5, VIP_CNTRL_5_SP_CNT(0)); tda19988_reg_write(sc, TDA_VIP_CNTRL_4, VIP_CNTRL_4_BLANKIT_NDE | VIP_CNTRL_4_BLC_NONE); tda19988_reg_clear(sc, TDA_PLL_SERIAL_3, PLL_SERIAL_3_SRL_CCIR); tda19988_reg_clear(sc, TDA_PLL_SERIAL_1, PLL_SERIAL_1_SRL_MAN_IP); tda19988_reg_clear(sc, TDA_PLL_SERIAL_3, PLL_SERIAL_3_SRL_DE); tda19988_reg_write(sc, TDA_SERIALIZER, 0); tda19988_reg_write(sc, TDA_HVF_CNTRL_1, HVF_CNTRL_1_VQR_FULL); tda19988_reg_write(sc, TDA_RPT_CNTRL, 0); tda19988_reg_write(sc, TDA_SEL_CLK, SEL_CLK_SEL_VRF_CLK(0) | SEL_CLK_SEL_CLK1 | SEL_CLK_ENA_SC_CLK); tda19988_reg_write(sc, TDA_PLL_SERIAL_2, PLL_SERIAL_2_SRL_NOSC(div) | PLL_SERIAL_2_SRL_PR(0)); tda19988_reg_set(sc, TDA_MAT_CONTRL, MAT_CONTRL_MAT_BP); tda19988_reg_write(sc, TDA_ANA_GENERAL, 0x09); tda19988_reg_clear(sc, TDA_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_MTHD); /* * Sync on rising HSYNC/VSYNC */ reg = VIP_CNTRL_3_SYNC_HS; if (mode->flags & VID_NHSYNC) reg |= VIP_CNTRL_3_H_TGL; if (mode->flags & VID_NVSYNC) reg |= VIP_CNTRL_3_V_TGL; tda19988_reg_write(sc, TDA_VIP_CNTRL_3, reg); reg = TBG_CNTRL_1_TGL_EN; if (mode->flags & VID_NHSYNC) reg |= TBG_CNTRL_1_H_TGL; if (mode->flags & VID_NVSYNC) reg |= TBG_CNTRL_1_V_TGL; tda19988_reg_write(sc, TDA_TBG_CNTRL_1, reg); /* Program timing */ tda19988_reg_write(sc, TDA_VIDFORMAT, 0x00); tda19988_reg_write2(sc, TDA_REFPIX_MSB, ref_pix); tda19988_reg_write2(sc, TDA_REFLINE_MSB, ref_line); tda19988_reg_write2(sc, TDA_NPIX_MSB, n_pix); tda19988_reg_write2(sc, TDA_NLINE_MSB, n_line); tda19988_reg_write2(sc, TDA_VS_LINE_STRT_1_MSB, vs1_line_start); tda19988_reg_write2(sc, TDA_VS_PIX_STRT_1_MSB, vs1_pix_start); tda19988_reg_write2(sc, TDA_VS_LINE_END_1_MSB, vs1_line_end); tda19988_reg_write2(sc, TDA_VS_PIX_END_1_MSB, vs1_pix_stop); tda19988_reg_write2(sc, TDA_VS_LINE_STRT_2_MSB, vs2_line_start); tda19988_reg_write2(sc, TDA_VS_PIX_STRT_2_MSB, vs2_pix_start); tda19988_reg_write2(sc, TDA_VS_LINE_END_2_MSB, vs2_line_end); tda19988_reg_write2(sc, TDA_VS_PIX_END_2_MSB, vs2_pix_stop); tda19988_reg_write2(sc, TDA_HS_PIX_START_MSB, hs_pix_start); tda19988_reg_write2(sc, TDA_HS_PIX_STOP_MSB, hs_pix_stop); tda19988_reg_write2(sc, TDA_VWIN_START_1_MSB, vwin1_line_start); tda19988_reg_write2(sc, TDA_VWIN_END_1_MSB, vwin1_line_end); tda19988_reg_write2(sc, TDA_VWIN_START_2_MSB, vwin2_line_start); tda19988_reg_write2(sc, TDA_VWIN_END_2_MSB, vwin2_line_end); tda19988_reg_write2(sc, TDA_DE_START_MSB, de_start); tda19988_reg_write2(sc, TDA_DE_STOP_MSB, de_stop); if (sc->sc_version == TDA19988) tda19988_reg_write(sc, TDA_ENABLE_SPACE, 0x00); /* must be last register set */ tda19988_reg_clear(sc, TDA_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_ONCE); } static int tda19988_read_edid_block(struct tda19988_softc *sc, uint8_t *buf, int block) { int attempt, err; uint8_t data; err = 0; tda19988_reg_set(sc, TDA_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD); /* Block 0 */ tda19988_reg_write(sc, TDA_DDC_ADDR, 0xa0); tda19988_reg_write(sc, TDA_DDC_OFFS, (block % 2) ? 128 : 0); tda19988_reg_write(sc, TDA_DDC_SEGM_ADDR, 0x60); tda19988_reg_write(sc, TDA_DDC_SEGM, block / 2); tda19988_reg_write(sc, TDA_EDID_CTRL, 1); tda19988_reg_write(sc, TDA_EDID_CTRL, 0); data = 0; for (attempt = 0; attempt < MAX_READ_ATTEMPTS; attempt++) { tda19988_reg_read(sc, TDA_INT_FLAGS_2, &data); if (data & INT_FLAGS_2_EDID_BLK_RD) break; pause("EDID", 1); } if (attempt == MAX_READ_ATTEMPTS) { err = -1; goto done; } if (tda19988_block_read(sc, TDA_EDID_DATA0, buf, EDID_LENGTH) != 0) { err = -1; goto done; } done: tda19988_reg_clear(sc, TDA_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD); return (err); } static int tda19988_read_edid(struct tda19988_softc *sc) { int err; int blocks, i; uint8_t *buf; err = 0; if (sc->sc_version == TDA19988) tda19988_reg_clear(sc, TDA_TX4, TX4_PD_RAM); err = tda19988_read_edid_block(sc, sc->sc_edid, 0); if (err) goto done; blocks = sc->sc_edid[0x7e]; if (blocks > 0) { sc->sc_edid = realloc(sc->sc_edid, EDID_LENGTH*(blocks+1), M_DEVBUF, M_WAITOK); sc->sc_edid_len = EDID_LENGTH*(blocks+1); for (i = 0; i < blocks; i++) { /* TODO: check validity */ buf = sc->sc_edid + EDID_LENGTH*(i+1); err = tda19988_read_edid_block(sc, buf, i); if (err) goto done; } } EVENTHANDLER_INVOKE(hdmi_event, sc->sc_dev, HDMI_EVENT_CONNECTED); done: if (sc->sc_version == TDA19988) tda19988_reg_set(sc, TDA_TX4, TX4_PD_RAM); return (err); } static void tda19988_start(void *xdev) { struct tda19988_softc *sc; device_t dev = (device_t)xdev; uint8_t data; uint16_t version; sc = device_get_softc(dev); tda19988_cec_write(sc, TDA_CEC_ENAMODS, ENAMODS_RXSENS | ENAMODS_HDMI); DELAY(1000); tda19988_cec_read(sc, 0xfe, &data); /* Reset core */ tda19988_reg_set(sc, TDA_SOFTRESET, 3); DELAY(100); tda19988_reg_clear(sc, TDA_SOFTRESET, 3); DELAY(100); /* reset transmitter: */ tda19988_reg_set(sc, TDA_MAIN_CNTRL0, MAIN_CNTRL0_SR); tda19988_reg_clear(sc, TDA_MAIN_CNTRL0, MAIN_CNTRL0_SR); /* PLL registers common configuration */ tda19988_reg_write(sc, TDA_PLL_SERIAL_1, 0x00); tda19988_reg_write(sc, TDA_PLL_SERIAL_2, PLL_SERIAL_2_SRL_NOSC(1)); tda19988_reg_write(sc, TDA_PLL_SERIAL_3, 0x00); tda19988_reg_write(sc, TDA_SERIALIZER, 0x00); tda19988_reg_write(sc, TDA_BUFFER_OUT, 0x00); tda19988_reg_write(sc, TDA_PLL_SCG1, 0x00); tda19988_reg_write(sc, TDA_SEL_CLK, SEL_CLK_SEL_CLK1 | SEL_CLK_ENA_SC_CLK); tda19988_reg_write(sc, TDA_PLL_SCGN1, 0xfa); tda19988_reg_write(sc, TDA_PLL_SCGN2, 0x00); tda19988_reg_write(sc, TDA_PLL_SCGR1, 0x5b); tda19988_reg_write(sc, TDA_PLL_SCGR2, 0x00); tda19988_reg_write(sc, TDA_PLL_SCG2, 0x10); /* Write the default value MUX register */ tda19988_reg_write(sc, TDA_MUX_VP_VIP_OUT, 0x24); version = 0; tda19988_reg_read(sc, TDA_VERSION, &data); version |= data; tda19988_reg_read(sc, TDA_VERSION_MSB, &data); version |= (data << 8); /* Clear feature bits */ sc->sc_version = version & ~0x30; switch (sc->sc_version) { case TDA19988: device_printf(dev, "TDA19988\n"); break; default: device_printf(dev, "Unknown device: %04x\n", sc->sc_version); goto done; } tda19988_reg_write(sc, TDA_DDC_CTRL, DDC_ENABLE); tda19988_reg_write(sc, TDA_TX3, 39); tda19988_cec_write(sc, TDA_CEC_FRO_IM_CLK_CTRL, CEC_FRO_IM_CLK_CTRL_GHOST_DIS | CEC_FRO_IM_CLK_CTRL_IMCLK_SEL); if (tda19988_read_edid(sc) < 0) { device_printf(dev, "failed to read EDID\n"); goto done; } /* Default values for RGB 4:4:4 mapping */ tda19988_reg_write(sc, TDA_VIP_CNTRL_0, 0x23); tda19988_reg_write(sc, TDA_VIP_CNTRL_1, 0x01); tda19988_reg_write(sc, TDA_VIP_CNTRL_2, 0x45); done: config_intrhook_disestablish(&sc->enum_hook); } static int tda19988_attach(device_t dev) { struct tda19988_softc *sc; phandle_t node; sc = device_get_softc(dev); sc->sc_dev = dev; sc->sc_addr = iicbus_get_addr(dev); sc->sc_cec_addr = (0x34 << 1); /* hardcoded */ sc->sc_edid = malloc(EDID_LENGTH, M_DEVBUF, M_WAITOK | M_ZERO); sc->sc_edid_len = EDID_LENGTH; device_set_desc(dev, "NXP TDA19988 HDMI transmitter"); sc->enum_hook.ich_func = tda19988_start; sc->enum_hook.ich_arg = dev; if (config_intrhook_establish(&sc->enum_hook) != 0) return (ENOMEM); node = ofw_bus_get_node(dev); OF_device_register_xref(OF_xref_from_node(node), dev); return (0); } static int tda19988_detach(device_t dev) { /* XXX: Do not let unload drive */ return (EBUSY); } static int tda19988_get_edid(device_t dev, uint8_t **edid, uint32_t *edid_len) { struct tda19988_softc *sc; sc = device_get_softc(dev); if (sc->sc_edid) { *edid = sc->sc_edid; *edid_len = sc->sc_edid_len; } else return (ENXIO); return (0); } static int tda19988_set_videomode(device_t dev, const struct videomode *mode) { struct tda19988_softc *sc; sc = device_get_softc(dev); tda19988_init_encoder(sc, mode); return (0); } static device_method_t tda_methods[] = { DEVMETHOD(device_probe, tda19988_probe), DEVMETHOD(device_attach, tda19988_attach), DEVMETHOD(device_detach, tda19988_detach), /* HDMI methods */ DEVMETHOD(hdmi_get_edid, tda19988_get_edid), DEVMETHOD(hdmi_set_videomode, tda19988_set_videomode), {0, 0}, }; static driver_t tda_driver = { "tda", tda_methods, sizeof(struct tda19988_softc), }; static devclass_t tda_devclass; DRIVER_MODULE(tda, iicbus, tda_driver, tda_devclass, 0, 0); MODULE_VERSION(tda, 1); MODULE_DEPEND(tda, iicbus, 1, 1, 1); Index: user/ngie/bug-237403/sys/compat/linuxkpi/common/src/linux_compat.c =================================================================== --- user/ngie/bug-237403/sys/compat/linuxkpi/common/src/linux_compat.c (revision 348028) +++ user/ngie/bug-237403/sys/compat/linuxkpi/common/src/linux_compat.c (revision 348029) @@ -1,2459 +1,2460 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2018 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_stack.h" #include #include #include #include #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__i386__) || defined(__amd64__) #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__i386__) || defined(__amd64__) #include #endif SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW, 0, "LinuxKPI parameters"); int linuxkpi_debug; SYSCTL_INT(_compat_linuxkpi, OID_AUTO, debug, CTLFLAG_RWTUN, &linuxkpi_debug, 0, "Set to enable pr_debug() prints. Clear to disable."); MALLOC_DEFINE(M_KMALLOC, "linux", "Linux kmalloc compat"); #include /* Undo Linux compat changes. */ #undef RB_ROOT #undef file #undef cdev #define RB_ROOT(head) (head)->rbh_root static void linux_cdev_deref(struct linux_cdev *ldev); static struct vm_area_struct *linux_cdev_handle_find(void *handle); struct kobject linux_class_root; struct device linux_root_device; struct class linux_class_misc; struct list_head pci_drivers; struct list_head pci_devices; spinlock_t pci_lock; unsigned long linux_timer_hz_mask; int panic_cmp(struct rb_node *one, struct rb_node *two) { panic("no cmp"); } RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args) { va_list tmp_va; int len; char *old; char *name; char dummy; old = kobj->name; if (old && fmt == NULL) return (0); /* compute length of string */ va_copy(tmp_va, args); len = vsnprintf(&dummy, 0, fmt, tmp_va); va_end(tmp_va); /* account for zero termination */ len++; /* check for error */ if (len < 1) return (-EINVAL); /* allocate memory for string */ name = kzalloc(len, GFP_KERNEL); if (name == NULL) return (-ENOMEM); vsnprintf(name, len, fmt, args); kobj->name = name; /* free old string */ kfree(old); /* filter new string */ for (; *name != '\0'; name++) if (*name == '/') *name = '!'; return (0); } int kobject_set_name(struct kobject *kobj, const char *fmt, ...) { va_list args; int error; va_start(args, fmt); error = kobject_set_name_vargs(kobj, fmt, args); va_end(args); return (error); } static int kobject_add_complete(struct kobject *kobj, struct kobject *parent) { const struct kobj_type *t; int error; kobj->parent = parent; error = sysfs_create_dir(kobj); if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) { struct attribute **attr; t = kobj->ktype; for (attr = t->default_attrs; *attr != NULL; attr++) { error = sysfs_create_file(kobj, *attr); if (error) break; } if (error) sysfs_remove_dir(kobj); } return (error); } int kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...) { va_list args; int error; va_start(args, fmt); error = kobject_set_name_vargs(kobj, fmt, args); va_end(args); if (error) return (error); return kobject_add_complete(kobj, parent); } void linux_kobject_release(struct kref *kref) { struct kobject *kobj; char *name; kobj = container_of(kref, struct kobject, kref); sysfs_remove_dir(kobj); name = kobj->name; if (kobj->ktype && kobj->ktype->release) kobj->ktype->release(kobj); kfree(name); } static void linux_kobject_kfree(struct kobject *kobj) { kfree(kobj); } static void linux_kobject_kfree_name(struct kobject *kobj) { if (kobj) { kfree(kobj->name); } } const struct kobj_type linux_kfree_type = { .release = linux_kobject_kfree }; static void linux_device_release(struct device *dev) { pr_debug("linux_device_release: %s\n", dev_name(dev)); kfree(dev); } static ssize_t linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct class_attribute *dattr; ssize_t error; dattr = container_of(attr, struct class_attribute, attr); error = -EIO; if (dattr->show) error = dattr->show(container_of(kobj, struct class, kobj), dattr, buf); return (error); } static ssize_t linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct class_attribute *dattr; ssize_t error; dattr = container_of(attr, struct class_attribute, attr); error = -EIO; if (dattr->store) error = dattr->store(container_of(kobj, struct class, kobj), dattr, buf, count); return (error); } static void linux_class_release(struct kobject *kobj) { struct class *class; class = container_of(kobj, struct class, kobj); if (class->class_release) class->class_release(class); } static const struct sysfs_ops linux_class_sysfs = { .show = linux_class_show, .store = linux_class_store, }; const struct kobj_type linux_class_ktype = { .release = linux_class_release, .sysfs_ops = &linux_class_sysfs }; static void linux_dev_release(struct kobject *kobj) { struct device *dev; dev = container_of(kobj, struct device, kobj); /* This is the precedence defined by linux. */ if (dev->release) dev->release(dev); else if (dev->class && dev->class->dev_release) dev->class->dev_release(dev); } static ssize_t linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct device_attribute *dattr; ssize_t error; dattr = container_of(attr, struct device_attribute, attr); error = -EIO; if (dattr->show) error = dattr->show(container_of(kobj, struct device, kobj), dattr, buf); return (error); } static ssize_t linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct device_attribute *dattr; ssize_t error; dattr = container_of(attr, struct device_attribute, attr); error = -EIO; if (dattr->store) error = dattr->store(container_of(kobj, struct device, kobj), dattr, buf, count); return (error); } static const struct sysfs_ops linux_dev_sysfs = { .show = linux_dev_show, .store = linux_dev_store, }; const struct kobj_type linux_dev_ktype = { .release = linux_dev_release, .sysfs_ops = &linux_dev_sysfs }; struct device * device_create(struct class *class, struct device *parent, dev_t devt, void *drvdata, const char *fmt, ...) { struct device *dev; va_list args; dev = kzalloc(sizeof(*dev), M_WAITOK); dev->parent = parent; dev->class = class; dev->devt = devt; dev->driver_data = drvdata; dev->release = linux_device_release; va_start(args, fmt); kobject_set_name_vargs(&dev->kobj, fmt, args); va_end(args); device_register(dev); return (dev); } int kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype, struct kobject *parent, const char *fmt, ...) { va_list args; int error; kobject_init(kobj, ktype); kobj->ktype = ktype; kobj->parent = parent; kobj->name = NULL; va_start(args, fmt); error = kobject_set_name_vargs(kobj, fmt, args); va_end(args); if (error) return (error); return kobject_add_complete(kobj, parent); } static void linux_kq_lock(void *arg) { spinlock_t *s = arg; spin_lock(s); } static void linux_kq_unlock(void *arg) { spinlock_t *s = arg; spin_unlock(s); } static void linux_kq_lock_owned(void *arg) { #ifdef INVARIANTS spinlock_t *s = arg; mtx_assert(&s->m, MA_OWNED); #endif } static void linux_kq_lock_unowned(void *arg) { #ifdef INVARIANTS spinlock_t *s = arg; mtx_assert(&s->m, MA_NOTOWNED); #endif } static void linux_file_kqfilter_poll(struct linux_file *, int); struct linux_file * linux_file_alloc(void) { struct linux_file *filp; filp = kzalloc(sizeof(*filp), GFP_KERNEL); /* set initial refcount */ filp->f_count = 1; /* setup fields needed by kqueue support */ spin_lock_init(&filp->f_kqlock); knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock, linux_kq_lock, linux_kq_unlock, linux_kq_lock_owned, linux_kq_lock_unowned); return (filp); } void linux_file_free(struct linux_file *filp) { if (filp->_file == NULL) { if (filp->f_shmem != NULL) vm_object_deallocate(filp->f_shmem); kfree(filp); } else { /* * The close method of the character device or file * will free the linux_file structure: */ _fdrop(filp->_file, curthread); } } static int linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, vm_page_t *mres) { struct vm_area_struct *vmap; vmap = linux_cdev_handle_find(vm_obj->handle); MPASS(vmap != NULL); MPASS(vmap->vm_private_data == vm_obj->handle); if (likely(vmap->vm_ops != NULL && offset < vmap->vm_len)) { vm_paddr_t paddr = IDX_TO_OFF(vmap->vm_pfn) + offset; vm_page_t page; if (((*mres)->flags & PG_FICTITIOUS) != 0) { /* * If the passed in result page is a fake * page, update it with the new physical * address. */ page = *mres; vm_page_updatefake(page, paddr, vm_obj->memattr); } else { /* * Replace the passed in "mres" page with our * own fake page and free up the all of the * original pages. */ VM_OBJECT_WUNLOCK(vm_obj); page = vm_page_getfake(paddr, vm_obj->memattr); VM_OBJECT_WLOCK(vm_obj); vm_page_replace_checked(page, vm_obj, (*mres)->pindex, *mres); vm_page_lock(*mres); vm_page_free(*mres); vm_page_unlock(*mres); *mres = page; } page->valid = VM_PAGE_BITS_ALL; return (VM_PAGER_OK); } return (VM_PAGER_FAIL); } static int linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type, vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last) { struct vm_area_struct *vmap; int err; linux_set_current(curthread); /* get VM area structure */ vmap = linux_cdev_handle_find(vm_obj->handle); MPASS(vmap != NULL); MPASS(vmap->vm_private_data == vm_obj->handle); VM_OBJECT_WUNLOCK(vm_obj); down_write(&vmap->vm_mm->mmap_sem); if (unlikely(vmap->vm_ops == NULL)) { err = VM_FAULT_SIGBUS; } else { struct vm_fault vmf; /* fill out VM fault structure */ vmf.virtual_address = (void *)(uintptr_t)IDX_TO_OFF(pidx); vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0; vmf.pgoff = 0; vmf.page = NULL; vmf.vma = vmap; vmap->vm_pfn_count = 0; vmap->vm_pfn_pcount = &vmap->vm_pfn_count; vmap->vm_obj = vm_obj; err = vmap->vm_ops->fault(vmap, &vmf); while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) { kern_yield(PRI_USER); err = vmap->vm_ops->fault(vmap, &vmf); } } /* translate return code */ switch (err) { case VM_FAULT_OOM: err = VM_PAGER_AGAIN; break; case VM_FAULT_SIGBUS: err = VM_PAGER_BAD; break; case VM_FAULT_NOPAGE: /* * By contract the fault handler will return having * busied all the pages itself. If pidx is already * found in the object, it will simply xbusy the first * page and return with vm_pfn_count set to 1. */ *first = vmap->vm_pfn_first; *last = *first + vmap->vm_pfn_count - 1; err = VM_PAGER_OK; break; default: err = VM_PAGER_ERROR; break; } up_write(&vmap->vm_mm->mmap_sem); VM_OBJECT_WLOCK(vm_obj); return (err); } static struct rwlock linux_vma_lock; static TAILQ_HEAD(, vm_area_struct) linux_vma_head = TAILQ_HEAD_INITIALIZER(linux_vma_head); static void linux_cdev_handle_free(struct vm_area_struct *vmap) { /* Drop reference on vm_file */ if (vmap->vm_file != NULL) fput(vmap->vm_file); /* Drop reference on mm_struct */ mmput(vmap->vm_mm); kfree(vmap); } static void linux_cdev_handle_remove(struct vm_area_struct *vmap) { rw_wlock(&linux_vma_lock); TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry); rw_wunlock(&linux_vma_lock); } static struct vm_area_struct * linux_cdev_handle_find(void *handle) { struct vm_area_struct *vmap; rw_rlock(&linux_vma_lock); TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) { if (vmap->vm_private_data == handle) break; } rw_runlock(&linux_vma_lock); return (vmap); } static int linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t foff, struct ucred *cred, u_short *color) { MPASS(linux_cdev_handle_find(handle) != NULL); *color = 0; return (0); } static void linux_cdev_pager_dtor(void *handle) { const struct vm_operations_struct *vm_ops; struct vm_area_struct *vmap; vmap = linux_cdev_handle_find(handle); MPASS(vmap != NULL); /* * Remove handle before calling close operation to prevent * other threads from reusing the handle pointer. */ linux_cdev_handle_remove(vmap); down_write(&vmap->vm_mm->mmap_sem); vm_ops = vmap->vm_ops; if (likely(vm_ops != NULL)) vm_ops->close(vmap); up_write(&vmap->vm_mm->mmap_sem); linux_cdev_handle_free(vmap); } static struct cdev_pager_ops linux_cdev_pager_ops[2] = { { /* OBJT_MGTDEVICE */ .cdev_pg_populate = linux_cdev_pager_populate, .cdev_pg_ctor = linux_cdev_pager_ctor, .cdev_pg_dtor = linux_cdev_pager_dtor }, { /* OBJT_DEVICE */ .cdev_pg_fault = linux_cdev_pager_fault, .cdev_pg_ctor = linux_cdev_pager_ctor, .cdev_pg_dtor = linux_cdev_pager_dtor }, }; int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, unsigned long size) { vm_object_t obj; vm_page_t m; obj = vma->vm_obj; if (obj == NULL || (obj->flags & OBJ_UNMANAGED) != 0) return (-ENOTSUP); VM_OBJECT_RLOCK(obj); for (m = vm_page_find_least(obj, OFF_TO_IDX(address)); m != NULL && m->pindex < OFF_TO_IDX(address + size); m = TAILQ_NEXT(m, listq)) pmap_remove_all(m); VM_OBJECT_RUNLOCK(obj); return (0); } static struct file_operations dummy_ldev_ops = { /* XXXKIB */ }; static struct linux_cdev dummy_ldev = { .ops = &dummy_ldev_ops, }; #define LDEV_SI_DTR 0x0001 #define LDEV_SI_REF 0x0002 static void linux_get_fop(struct linux_file *filp, const struct file_operations **fop, struct linux_cdev **dev) { struct linux_cdev *ldev; u_int siref; ldev = filp->f_cdev; *fop = filp->f_op; if (ldev != NULL) { for (siref = ldev->siref;;) { if ((siref & LDEV_SI_DTR) != 0) { ldev = &dummy_ldev; siref = ldev->siref; *fop = ldev->ops; MPASS((ldev->siref & LDEV_SI_DTR) == 0); } else if (atomic_fcmpset_int(&ldev->siref, &siref, siref + LDEV_SI_REF)) { break; } } } *dev = ldev; } static void linux_drop_fop(struct linux_cdev *ldev) { if (ldev == NULL) return; MPASS((ldev->siref & ~LDEV_SI_DTR) != 0); atomic_subtract_int(&ldev->siref, LDEV_SI_REF); } #define OPW(fp,td,code) ({ \ struct file *__fpop; \ __typeof(code) __retval; \ \ __fpop = (td)->td_fpop; \ (td)->td_fpop = (fp); \ __retval = (code); \ (td)->td_fpop = __fpop; \ __retval; \ }) static int linux_dev_fdopen(struct cdev *dev, int fflags, struct thread *td, struct file *file) { struct linux_cdev *ldev; struct linux_file *filp; const struct file_operations *fop; int error; ldev = dev->si_drv1; filp = linux_file_alloc(); filp->f_dentry = &filp->f_dentry_store; filp->f_op = ldev->ops; filp->f_mode = file->f_flag; filp->f_flags = file->f_flag; filp->f_vnode = file->f_vnode; filp->_file = file; refcount_acquire(&ldev->refs); filp->f_cdev = ldev; linux_set_current(td); linux_get_fop(filp, &fop, &ldev); if (fop->open != NULL) { error = -fop->open(file->f_vnode, filp); if (error != 0) { linux_drop_fop(ldev); linux_cdev_deref(filp->f_cdev); kfree(filp); return (error); } } /* hold on to the vnode - used for fstat() */ vhold(filp->f_vnode); /* release the file from devfs */ finit(file, filp->f_mode, DTYPE_DEV, filp, &linuxfileops); linux_drop_fop(ldev); return (ENXIO); } #define LINUX_IOCTL_MIN_PTR 0x10000UL #define LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX) static inline int linux_remap_address(void **uaddr, size_t len) { uintptr_t uaddr_val = (uintptr_t)(*uaddr); if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR && uaddr_val < LINUX_IOCTL_MAX_PTR)) { struct task_struct *pts = current; if (pts == NULL) { *uaddr = NULL; return (1); } /* compute data offset */ uaddr_val -= LINUX_IOCTL_MIN_PTR; /* check that length is within bounds */ if ((len > IOCPARM_MAX) || (uaddr_val + len) > pts->bsd_ioctl_len) { *uaddr = NULL; return (1); } /* re-add kernel buffer address */ uaddr_val += (uintptr_t)pts->bsd_ioctl_data; /* update address location */ *uaddr = (void *)uaddr_val; return (1); } return (0); } int linux_copyin(const void *uaddr, void *kaddr, size_t len) { if (linux_remap_address(__DECONST(void **, &uaddr), len)) { if (uaddr == NULL) return (-EFAULT); memcpy(kaddr, uaddr, len); return (0); } return (-copyin(uaddr, kaddr, len)); } int linux_copyout(const void *kaddr, void *uaddr, size_t len) { if (linux_remap_address(&uaddr, len)) { if (uaddr == NULL) return (-EFAULT); memcpy(uaddr, kaddr, len); return (0); } return (-copyout(kaddr, uaddr, len)); } size_t linux_clear_user(void *_uaddr, size_t _len) { uint8_t *uaddr = _uaddr; size_t len = _len; /* make sure uaddr is aligned before going into the fast loop */ while (((uintptr_t)uaddr & 7) != 0 && len > 7) { if (subyte(uaddr, 0)) return (_len); uaddr++; len--; } /* zero 8 bytes at a time */ while (len > 7) { #ifdef __LP64__ if (suword64(uaddr, 0)) return (_len); #else if (suword32(uaddr, 0)) return (_len); if (suword32(uaddr + 4, 0)) return (_len); #endif uaddr += 8; len -= 8; } /* zero fill end, if any */ while (len > 0) { if (subyte(uaddr, 0)) return (_len); uaddr++; len--; } return (0); } int linux_access_ok(const void *uaddr, size_t len) { uintptr_t saddr; uintptr_t eaddr; /* get start and end address */ saddr = (uintptr_t)uaddr; eaddr = (uintptr_t)uaddr + len; /* verify addresses are valid for userspace */ return ((saddr == eaddr) || (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS)); } /* * This function should return either EINTR or ERESTART depending on * the signal type sent to this thread: */ static int linux_get_error(struct task_struct *task, int error) { /* check for signal type interrupt code */ if (error == EINTR || error == ERESTARTSYS || error == ERESTART) { error = -linux_schedule_get_interrupt_value(task); if (error == 0) error = EINTR; } return (error); } static int linux_file_ioctl_sub(struct file *fp, struct linux_file *filp, const struct file_operations *fop, u_long cmd, caddr_t data, struct thread *td) { struct task_struct *task = current; unsigned size; int error; size = IOCPARM_LEN(cmd); /* refer to logic in sys_ioctl() */ if (size > 0) { /* * Setup hint for linux_copyin() and linux_copyout(). * * Background: Linux code expects a user-space address * while FreeBSD supplies a kernel-space address. */ task->bsd_ioctl_data = data; task->bsd_ioctl_len = size; data = (void *)LINUX_IOCTL_MIN_PTR; } else { /* fetch user-space pointer */ data = *(void **)data; } #if defined(__amd64__) if (td->td_proc->p_elf_machine == EM_386) { /* try the compat IOCTL handler first */ if (fop->compat_ioctl != NULL) { error = -OPW(fp, td, fop->compat_ioctl(filp, cmd, (u_long)data)); } else { error = ENOTTY; } /* fallback to the regular IOCTL handler, if any */ if (error == ENOTTY && fop->unlocked_ioctl != NULL) { error = -OPW(fp, td, fop->unlocked_ioctl(filp, cmd, (u_long)data)); } } else #endif { if (fop->unlocked_ioctl != NULL) { error = -OPW(fp, td, fop->unlocked_ioctl(filp, cmd, (u_long)data)); } else { error = ENOTTY; } } if (size > 0) { task->bsd_ioctl_data = NULL; task->bsd_ioctl_len = 0; } if (error == EWOULDBLOCK) { /* update kqfilter status, if any */ linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); } else { error = linux_get_error(task, error); } return (error); } #define LINUX_POLL_TABLE_NORMAL ((poll_table *)1) /* * This function atomically updates the poll wakeup state and returns * the previous state at the time of update. */ static uint8_t linux_poll_wakeup_state(atomic_t *v, const uint8_t *pstate) { int c, old; c = v->counter; while ((old = atomic_cmpxchg(v, c, pstate[c])) != c) c = old; return (c); } static int linux_poll_wakeup_callback(wait_queue_t *wq, unsigned int wq_state, int flags, void *key) { static const uint8_t state[LINUX_FWQ_STATE_MAX] = { [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_READY, [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_READY, /* NOP */ }; struct linux_file *filp = container_of(wq, struct linux_file, f_wait_queue.wq); switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { case LINUX_FWQ_STATE_QUEUED: linux_poll_wakeup(filp); return (1); default: return (0); } } void linux_poll_wait(struct linux_file *filp, wait_queue_head_t *wqh, poll_table *p) { static const uint8_t state[LINUX_FWQ_STATE_MAX] = { [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_NOT_READY, [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_QUEUED, /* NOP */ [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_QUEUED, }; /* check if we are called inside the select system call */ if (p == LINUX_POLL_TABLE_NORMAL) selrecord(curthread, &filp->f_selinfo); switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { case LINUX_FWQ_STATE_INIT: /* NOTE: file handles can only belong to one wait-queue */ filp->f_wait_queue.wqh = wqh; filp->f_wait_queue.wq.func = &linux_poll_wakeup_callback; add_wait_queue(wqh, &filp->f_wait_queue.wq); atomic_set(&filp->f_wait_queue.state, LINUX_FWQ_STATE_QUEUED); break; default: break; } } static void linux_poll_wait_dequeue(struct linux_file *filp) { static const uint8_t state[LINUX_FWQ_STATE_MAX] = { [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_INIT, [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_INIT, [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_INIT, }; seldrain(&filp->f_selinfo); switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { case LINUX_FWQ_STATE_NOT_READY: case LINUX_FWQ_STATE_QUEUED: case LINUX_FWQ_STATE_READY: remove_wait_queue(filp->f_wait_queue.wqh, &filp->f_wait_queue.wq); break; default: break; } } void linux_poll_wakeup(struct linux_file *filp) { /* this function should be NULL-safe */ if (filp == NULL) return; selwakeup(&filp->f_selinfo); spin_lock(&filp->f_kqlock); filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ | LINUX_KQ_FLAG_NEED_WRITE; /* make sure the "knote" gets woken up */ KNOTE_LOCKED(&filp->f_selinfo.si_note, 1); spin_unlock(&filp->f_kqlock); } static void linux_file_kqfilter_detach(struct knote *kn) { struct linux_file *filp = kn->kn_hook; spin_lock(&filp->f_kqlock); knlist_remove(&filp->f_selinfo.si_note, kn, 1); spin_unlock(&filp->f_kqlock); } static int linux_file_kqfilter_read_event(struct knote *kn, long hint) { struct linux_file *filp = kn->kn_hook; mtx_assert(&filp->f_kqlock.m, MA_OWNED); return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_READ) ? 1 : 0); } static int linux_file_kqfilter_write_event(struct knote *kn, long hint) { struct linux_file *filp = kn->kn_hook; mtx_assert(&filp->f_kqlock.m, MA_OWNED); return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_WRITE) ? 1 : 0); } static struct filterops linux_dev_kqfiltops_read = { .f_isfd = 1, .f_detach = linux_file_kqfilter_detach, .f_event = linux_file_kqfilter_read_event, }; static struct filterops linux_dev_kqfiltops_write = { .f_isfd = 1, .f_detach = linux_file_kqfilter_detach, .f_event = linux_file_kqfilter_write_event, }; static void linux_file_kqfilter_poll(struct linux_file *filp, int kqflags) { struct thread *td; const struct file_operations *fop; struct linux_cdev *ldev; int temp; if ((filp->f_kqflags & kqflags) == 0) return; td = curthread; linux_get_fop(filp, &fop, &ldev); /* get the latest polling state */ temp = OPW(filp->_file, td, fop->poll(filp, NULL)); linux_drop_fop(ldev); spin_lock(&filp->f_kqlock); /* clear kqflags */ filp->f_kqflags &= ~(LINUX_KQ_FLAG_NEED_READ | LINUX_KQ_FLAG_NEED_WRITE); /* update kqflags */ if ((temp & (POLLIN | POLLOUT)) != 0) { if ((temp & POLLIN) != 0) filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ; if ((temp & POLLOUT) != 0) filp->f_kqflags |= LINUX_KQ_FLAG_NEED_WRITE; /* make sure the "knote" gets woken up */ KNOTE_LOCKED(&filp->f_selinfo.si_note, 0); } spin_unlock(&filp->f_kqlock); } static int linux_file_kqfilter(struct file *file, struct knote *kn) { struct linux_file *filp; struct thread *td; int error; td = curthread; filp = (struct linux_file *)file->f_data; filp->f_flags = file->f_flag; if (filp->f_op->poll == NULL) return (EINVAL); spin_lock(&filp->f_kqlock); switch (kn->kn_filter) { case EVFILT_READ: filp->f_kqflags |= LINUX_KQ_FLAG_HAS_READ; kn->kn_fop = &linux_dev_kqfiltops_read; kn->kn_hook = filp; knlist_add(&filp->f_selinfo.si_note, kn, 1); error = 0; break; case EVFILT_WRITE: filp->f_kqflags |= LINUX_KQ_FLAG_HAS_WRITE; kn->kn_fop = &linux_dev_kqfiltops_write; kn->kn_hook = filp; knlist_add(&filp->f_selinfo.si_note, kn, 1); error = 0; break; default: error = EINVAL; break; } spin_unlock(&filp->f_kqlock); if (error == 0) { linux_set_current(td); /* update kqfilter status, if any */ linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); } return (error); } static int linux_file_mmap_single(struct file *fp, const struct file_operations *fop, vm_ooffset_t *offset, vm_size_t size, struct vm_object **object, int nprot, struct thread *td) { struct task_struct *task; struct vm_area_struct *vmap; struct mm_struct *mm; struct linux_file *filp; vm_memattr_t attr; int error; filp = (struct linux_file *)fp->f_data; filp->f_flags = fp->f_flag; if (fop->mmap == NULL) return (EOPNOTSUPP); linux_set_current(td); /* * The same VM object might be shared by multiple processes * and the mm_struct is usually freed when a process exits. * * The atomic reference below makes sure the mm_struct is * available as long as the vmap is in the linux_vma_head. */ task = current; mm = task->mm; if (atomic_inc_not_zero(&mm->mm_users) == 0) return (EINVAL); vmap = kzalloc(sizeof(*vmap), GFP_KERNEL); vmap->vm_start = 0; vmap->vm_end = size; vmap->vm_pgoff = *offset / PAGE_SIZE; vmap->vm_pfn = 0; vmap->vm_flags = vmap->vm_page_prot = (nprot & VM_PROT_ALL); vmap->vm_ops = NULL; vmap->vm_file = get_file(filp); vmap->vm_mm = mm; if (unlikely(down_write_killable(&vmap->vm_mm->mmap_sem))) { error = linux_get_error(task, EINTR); } else { error = -OPW(fp, td, fop->mmap(filp, vmap)); error = linux_get_error(task, error); up_write(&vmap->vm_mm->mmap_sem); } if (error != 0) { linux_cdev_handle_free(vmap); return (error); } attr = pgprot2cachemode(vmap->vm_page_prot); if (vmap->vm_ops != NULL) { struct vm_area_struct *ptr; void *vm_private_data; bool vm_no_fault; if (vmap->vm_ops->open == NULL || vmap->vm_ops->close == NULL || vmap->vm_private_data == NULL) { /* free allocated VM area struct */ linux_cdev_handle_free(vmap); return (EINVAL); } vm_private_data = vmap->vm_private_data; rw_wlock(&linux_vma_lock); TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) { if (ptr->vm_private_data == vm_private_data) break; } /* check if there is an existing VM area struct */ if (ptr != NULL) { /* check if the VM area structure is invalid */ if (ptr->vm_ops == NULL || ptr->vm_ops->open == NULL || ptr->vm_ops->close == NULL) { error = ESTALE; vm_no_fault = 1; } else { error = EEXIST; vm_no_fault = (ptr->vm_ops->fault == NULL); } } else { /* insert VM area structure into list */ TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry); error = 0; vm_no_fault = (vmap->vm_ops->fault == NULL); } rw_wunlock(&linux_vma_lock); if (error != 0) { /* free allocated VM area struct */ linux_cdev_handle_free(vmap); /* check for stale VM area struct */ if (error != EEXIST) return (error); } /* check if there is no fault handler */ if (vm_no_fault) { *object = cdev_pager_allocate(vm_private_data, OBJT_DEVICE, &linux_cdev_pager_ops[1], size, nprot, *offset, td->td_ucred); } else { *object = cdev_pager_allocate(vm_private_data, OBJT_MGTDEVICE, &linux_cdev_pager_ops[0], size, nprot, *offset, td->td_ucred); } /* check if allocating the VM object failed */ if (*object == NULL) { if (error == 0) { /* remove VM area struct from list */ linux_cdev_handle_remove(vmap); /* free allocated VM area struct */ linux_cdev_handle_free(vmap); } return (EINVAL); } } else { struct sglist *sg; sg = sglist_alloc(1, M_WAITOK); sglist_append_phys(sg, (vm_paddr_t)vmap->vm_pfn << PAGE_SHIFT, vmap->vm_len); *object = vm_pager_allocate(OBJT_SG, sg, vmap->vm_len, nprot, 0, td->td_ucred); linux_cdev_handle_free(vmap); if (*object == NULL) { sglist_free(sg); return (EINVAL); } } if (attr != VM_MEMATTR_DEFAULT) { VM_OBJECT_WLOCK(*object); vm_object_set_memattr(*object, attr); VM_OBJECT_WUNLOCK(*object); } *offset = 0; return (0); } struct cdevsw linuxcdevsw = { .d_version = D_VERSION, .d_fdopen = linux_dev_fdopen, .d_name = "lkpidev", }; static int linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred, int flags, struct thread *td) { struct linux_file *filp; const struct file_operations *fop; struct linux_cdev *ldev; ssize_t bytes; int error; error = 0; filp = (struct linux_file *)file->f_data; filp->f_flags = file->f_flag; /* XXX no support for I/O vectors currently */ if (uio->uio_iovcnt != 1) return (EOPNOTSUPP); if (uio->uio_resid > DEVFS_IOSIZE_MAX) return (EINVAL); linux_set_current(td); linux_get_fop(filp, &fop, &ldev); if (fop->read != NULL) { bytes = OPW(file, td, fop->read(filp, uio->uio_iov->iov_base, uio->uio_iov->iov_len, &uio->uio_offset)); if (bytes >= 0) { uio->uio_iov->iov_base = ((uint8_t *)uio->uio_iov->iov_base) + bytes; uio->uio_iov->iov_len -= bytes; uio->uio_resid -= bytes; } else { error = linux_get_error(current, -bytes); } } else error = ENXIO; /* update kqfilter status, if any */ linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ); linux_drop_fop(ldev); return (error); } static int linux_file_write(struct file *file, struct uio *uio, struct ucred *active_cred, int flags, struct thread *td) { struct linux_file *filp; const struct file_operations *fop; struct linux_cdev *ldev; ssize_t bytes; int error; filp = (struct linux_file *)file->f_data; filp->f_flags = file->f_flag; /* XXX no support for I/O vectors currently */ if (uio->uio_iovcnt != 1) return (EOPNOTSUPP); if (uio->uio_resid > DEVFS_IOSIZE_MAX) return (EINVAL); linux_set_current(td); linux_get_fop(filp, &fop, &ldev); if (fop->write != NULL) { bytes = OPW(file, td, fop->write(filp, uio->uio_iov->iov_base, uio->uio_iov->iov_len, &uio->uio_offset)); if (bytes >= 0) { uio->uio_iov->iov_base = ((uint8_t *)uio->uio_iov->iov_base) + bytes; uio->uio_iov->iov_len -= bytes; uio->uio_resid -= bytes; error = 0; } else { error = linux_get_error(current, -bytes); } } else error = ENXIO; /* update kqfilter status, if any */ linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_WRITE); linux_drop_fop(ldev); return (error); } static int linux_file_poll(struct file *file, int events, struct ucred *active_cred, struct thread *td) { struct linux_file *filp; const struct file_operations *fop; struct linux_cdev *ldev; int revents; filp = (struct linux_file *)file->f_data; filp->f_flags = file->f_flag; linux_set_current(td); linux_get_fop(filp, &fop, &ldev); if (fop->poll != NULL) { revents = OPW(file, td, fop->poll(filp, LINUX_POLL_TABLE_NORMAL)) & events; } else { revents = 0; } linux_drop_fop(ldev); return (revents); } static int linux_file_close(struct file *file, struct thread *td) { struct linux_file *filp; const struct file_operations *fop; struct linux_cdev *ldev; int error; filp = (struct linux_file *)file->f_data; KASSERT(file_count(filp) == 0, ("File refcount(%d) is not zero", file_count(filp))); error = 0; filp->f_flags = file->f_flag; linux_set_current(td); linux_poll_wait_dequeue(filp); linux_get_fop(filp, &fop, &ldev); if (fop->release != NULL) error = -OPW(file, td, fop->release(filp->f_vnode, filp)); funsetown(&filp->f_sigio); if (filp->f_vnode != NULL) vdrop(filp->f_vnode); linux_drop_fop(ldev); if (filp->f_cdev != NULL) linux_cdev_deref(filp->f_cdev); kfree(filp); return (error); } static int linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred, struct thread *td) { struct linux_file *filp; const struct file_operations *fop; struct linux_cdev *ldev; int error; error = 0; filp = (struct linux_file *)fp->f_data; filp->f_flags = fp->f_flag; linux_get_fop(filp, &fop, &ldev); linux_set_current(td); switch (cmd) { case FIONBIO: break; case FIOASYNC: if (fop->fasync == NULL) break; error = -OPW(fp, td, fop->fasync(0, filp, fp->f_flag & FASYNC)); break; case FIOSETOWN: error = fsetown(*(int *)data, &filp->f_sigio); if (error == 0) { if (fop->fasync == NULL) break; error = -OPW(fp, td, fop->fasync(0, filp, fp->f_flag & FASYNC)); } break; case FIOGETOWN: *(int *)data = fgetown(&filp->f_sigio); break; default: error = linux_file_ioctl_sub(fp, filp, fop, cmd, data, td); break; } linux_drop_fop(ldev); return (error); } static int linux_file_mmap_sub(struct thread *td, vm_size_t objsize, vm_prot_t prot, vm_prot_t *maxprotp, int *flagsp, struct file *fp, vm_ooffset_t *foff, const struct file_operations *fop, vm_object_t *objp) { /* * Character devices do not provide private mappings * of any kind: */ if ((*maxprotp & VM_PROT_WRITE) == 0 && (prot & VM_PROT_WRITE) != 0) return (EACCES); if ((*flagsp & (MAP_PRIVATE | MAP_COPY)) != 0) return (EINVAL); return (linux_file_mmap_single(fp, fop, foff, objsize, objp, (int)prot, td)); } static int linux_file_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff, struct thread *td) { struct linux_file *filp; const struct file_operations *fop; struct linux_cdev *ldev; struct mount *mp; struct vnode *vp; vm_object_t object; vm_prot_t maxprot; int error; filp = (struct linux_file *)fp->f_data; vp = filp->f_vnode; if (vp == NULL) return (EOPNOTSUPP); /* * Ensure that file and memory protections are * compatible. */ mp = vp->v_mount; if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) { maxprot = VM_PROT_NONE; if ((prot & VM_PROT_EXECUTE) != 0) return (EACCES); } else maxprot = VM_PROT_EXECUTE; if ((fp->f_flag & FREAD) != 0) maxprot |= VM_PROT_READ; else if ((prot & VM_PROT_READ) != 0) return (EACCES); /* * If we are sharing potential changes via MAP_SHARED and we * are trying to get write permission although we opened it * without asking for it, bail out. * * Note that most character devices always share mappings. * * Rely on linux_file_mmap_sub() to fail invalid MAP_PRIVATE * requests rather than doing it here. */ if ((flags & MAP_SHARED) != 0) { if ((fp->f_flag & FWRITE) != 0) maxprot |= VM_PROT_WRITE; else if ((prot & VM_PROT_WRITE) != 0) return (EACCES); } maxprot &= cap_maxprot; linux_get_fop(filp, &fop, &ldev); error = linux_file_mmap_sub(td, size, prot, &maxprot, &flags, fp, &foff, fop, &object); if (error != 0) goto out; error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object, foff, FALSE, td); if (error != 0) vm_object_deallocate(object); out: linux_drop_fop(ldev); return (error); } static int linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred, struct thread *td) { struct linux_file *filp; struct vnode *vp; int error; filp = (struct linux_file *)fp->f_data; if (filp->f_vnode == NULL) return (EOPNOTSUPP); vp = filp->f_vnode; vn_lock(vp, LK_SHARED | LK_RETRY); error = vn_stat(vp, sb, td->td_ucred, NOCRED, td); VOP_UNLOCK(vp, 0); return (error); } static int linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp) { struct linux_file *filp; struct vnode *vp; int error; filp = fp->f_data; vp = filp->f_vnode; if (vp == NULL) { error = 0; kif->kf_type = KF_TYPE_DEV; } else { vref(vp); FILEDESC_SUNLOCK(fdp); error = vn_fill_kinfo_vnode(vp, kif); vrele(vp); kif->kf_type = KF_TYPE_VNODE; FILEDESC_SLOCK(fdp); } return (error); } unsigned int linux_iminor(struct inode *inode) { struct linux_cdev *ldev; if (inode == NULL || inode->v_rdev == NULL || inode->v_rdev->si_devsw != &linuxcdevsw) return (-1U); ldev = inode->v_rdev->si_drv1; if (ldev == NULL) return (-1U); return (minor(ldev->dev)); } struct fileops linuxfileops = { .fo_read = linux_file_read, .fo_write = linux_file_write, .fo_truncate = invfo_truncate, .fo_kqfilter = linux_file_kqfilter, .fo_stat = linux_file_stat, .fo_fill_kinfo = linux_file_fill_kinfo, .fo_poll = linux_file_poll, .fo_close = linux_file_close, .fo_ioctl = linux_file_ioctl, .fo_mmap = linux_file_mmap, .fo_chmod = invfo_chmod, .fo_chown = invfo_chown, .fo_sendfile = invfo_sendfile, .fo_flags = DFLAG_PASSABLE, }; /* * Hash of vmmap addresses. This is infrequently accessed and does not * need to be particularly large. This is done because we must store the * caller's idea of the map size to properly unmap. */ struct vmmap { LIST_ENTRY(vmmap) vm_next; void *vm_addr; unsigned long vm_size; }; struct vmmaphd { struct vmmap *lh_first; }; #define VMMAP_HASH_SIZE 64 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1) #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE]; static struct mtx vmmaplock; static void vmmap_add(void *addr, unsigned long size) { struct vmmap *vmmap; vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL); mtx_lock(&vmmaplock); vmmap->vm_size = size; vmmap->vm_addr = addr; LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next); mtx_unlock(&vmmaplock); } static struct vmmap * vmmap_remove(void *addr) { struct vmmap *vmmap; mtx_lock(&vmmaplock); LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next) if (vmmap->vm_addr == addr) break; if (vmmap) LIST_REMOVE(vmmap, vm_next); mtx_unlock(&vmmaplock); return (vmmap); } #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) void * _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr) { void *addr; addr = pmap_mapdev_attr(phys_addr, size, attr); if (addr == NULL) return (NULL); vmmap_add(addr, size); return (addr); } #endif void iounmap(void *addr) { struct vmmap *vmmap; vmmap = vmmap_remove(addr); if (vmmap == NULL) return; #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) pmap_unmapdev((vm_offset_t)addr, vmmap->vm_size); #endif kfree(vmmap); } void * vmap(struct page **pages, unsigned int count, unsigned long flags, int prot) { vm_offset_t off; size_t size; size = count * PAGE_SIZE; off = kva_alloc(size); if (off == 0) return (NULL); vmmap_add((void *)off, size); pmap_qenter(off, pages, count); return ((void *)off); } void vunmap(void *addr) { struct vmmap *vmmap; vmmap = vmmap_remove(addr); if (vmmap == NULL) return; pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE); kva_free((vm_offset_t)addr, vmmap->vm_size); kfree(vmmap); } char * kvasprintf(gfp_t gfp, const char *fmt, va_list ap) { unsigned int len; char *p; va_list aq; va_copy(aq, ap); len = vsnprintf(NULL, 0, fmt, aq); va_end(aq); p = kmalloc(len + 1, gfp); if (p != NULL) vsnprintf(p, len + 1, fmt, ap); return (p); } char * kasprintf(gfp_t gfp, const char *fmt, ...) { va_list ap; char *p; va_start(ap, fmt); p = kvasprintf(gfp, fmt, ap); va_end(ap); return (p); } static void linux_timer_callback_wrapper(void *context) { struct timer_list *timer; linux_set_current(curthread); timer = context; timer->function(timer->data); } void mod_timer(struct timer_list *timer, int expires) { timer->expires = expires; callout_reset(&timer->callout, linux_timer_jiffies_until(expires), &linux_timer_callback_wrapper, timer); } void add_timer(struct timer_list *timer) { callout_reset(&timer->callout, linux_timer_jiffies_until(timer->expires), &linux_timer_callback_wrapper, timer); } void add_timer_on(struct timer_list *timer, int cpu) { callout_reset_on(&timer->callout, linux_timer_jiffies_until(timer->expires), &linux_timer_callback_wrapper, timer, cpu); } int del_timer(struct timer_list *timer) { if (callout_stop(&(timer)->callout) == -1) return (0); return (1); } static void linux_timer_init(void *arg) { /* * Compute an internal HZ value which can divide 2**32 to * avoid timer rounding problems when the tick value wraps * around 2**32: */ linux_timer_hz_mask = 1; while (linux_timer_hz_mask < (unsigned long)hz) linux_timer_hz_mask *= 2; linux_timer_hz_mask--; } SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL); void linux_complete_common(struct completion *c, int all) { int wakeup_swapper; sleepq_lock(c); if (all) { c->done = UINT_MAX; wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); } else { if (c->done != UINT_MAX) c->done++; wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); } sleepq_release(c); if (wakeup_swapper) kick_proc0(); } /* * Indefinite wait for done != 0 with or without signals. */ int linux_wait_for_common(struct completion *c, int flags) { struct task_struct *task; int error; if (SCHEDULER_STOPPED()) return (0); task = current; if (flags != 0) flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; else flags = SLEEPQ_SLEEP; error = 0; for (;;) { sleepq_lock(c); if (c->done) break; sleepq_add(c, NULL, "completion", flags, 0); if (flags & SLEEPQ_INTERRUPTIBLE) { DROP_GIANT(); error = -sleepq_wait_sig(c, 0); PICKUP_GIANT(); if (error != 0) { linux_schedule_save_interrupt_value(task, error); error = -ERESTARTSYS; goto intr; } } else { DROP_GIANT(); sleepq_wait(c, 0); PICKUP_GIANT(); } } if (c->done != UINT_MAX) c->done--; sleepq_release(c); intr: return (error); } /* * Time limited wait for done != 0 with or without signals. */ int linux_wait_for_timeout_common(struct completion *c, int timeout, int flags) { struct task_struct *task; int end = jiffies + timeout; int error; if (SCHEDULER_STOPPED()) return (0); task = current; if (flags != 0) flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; else flags = SLEEPQ_SLEEP; for (;;) { sleepq_lock(c); if (c->done) break; sleepq_add(c, NULL, "completion", flags, 0); sleepq_set_timeout(c, linux_timer_jiffies_until(end)); DROP_GIANT(); if (flags & SLEEPQ_INTERRUPTIBLE) error = -sleepq_timedwait_sig(c, 0); else error = -sleepq_timedwait(c, 0); PICKUP_GIANT(); if (error != 0) { /* check for timeout */ if (error == -EWOULDBLOCK) { error = 0; /* timeout */ } else { /* signal happened */ linux_schedule_save_interrupt_value(task, error); error = -ERESTARTSYS; } goto done; } } if (c->done != UINT_MAX) c->done--; sleepq_release(c); /* return how many jiffies are left */ error = linux_timer_jiffies_until(end); done: return (error); } int linux_try_wait_for_completion(struct completion *c) { int isdone; sleepq_lock(c); isdone = (c->done != 0); if (c->done != 0 && c->done != UINT_MAX) c->done--; sleepq_release(c); return (isdone); } int linux_completion_done(struct completion *c) { int isdone; sleepq_lock(c); isdone = (c->done != 0); sleepq_release(c); return (isdone); } static void linux_cdev_deref(struct linux_cdev *ldev) { if (refcount_release(&ldev->refs)) kfree(ldev); } static void linux_cdev_release(struct kobject *kobj) { struct linux_cdev *cdev; struct kobject *parent; cdev = container_of(kobj, struct linux_cdev, kobj); parent = kobj->parent; linux_destroy_dev(cdev); linux_cdev_deref(cdev); kobject_put(parent); } static void linux_cdev_static_release(struct kobject *kobj) { struct linux_cdev *cdev; struct kobject *parent; cdev = container_of(kobj, struct linux_cdev, kobj); parent = kobj->parent; linux_destroy_dev(cdev); kobject_put(parent); } void linux_destroy_dev(struct linux_cdev *ldev) { if (ldev->cdev == NULL) return; MPASS((ldev->siref & LDEV_SI_DTR) == 0); atomic_set_int(&ldev->siref, LDEV_SI_DTR); while ((atomic_load_int(&ldev->siref) & ~LDEV_SI_DTR) != 0) pause("ldevdtr", hz / 4); destroy_dev(ldev->cdev); ldev->cdev = NULL; } const struct kobj_type linux_cdev_ktype = { .release = linux_cdev_release, }; const struct kobj_type linux_cdev_static_ktype = { .release = linux_cdev_static_release, }; static void linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate) { struct notifier_block *nb; nb = arg; if (linkstate == LINK_STATE_UP) nb->notifier_call(nb, NETDEV_UP, ifp); else nb->notifier_call(nb, NETDEV_DOWN, ifp); } static void linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp) { struct notifier_block *nb; nb = arg; nb->notifier_call(nb, NETDEV_REGISTER, ifp); } static void linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp) { struct notifier_block *nb; nb = arg; nb->notifier_call(nb, NETDEV_UNREGISTER, ifp); } static void linux_handle_iflladdr_event(void *arg, struct ifnet *ifp) { struct notifier_block *nb; nb = arg; nb->notifier_call(nb, NETDEV_CHANGEADDR, ifp); } static void linux_handle_ifaddr_event(void *arg, struct ifnet *ifp) { struct notifier_block *nb; nb = arg; nb->notifier_call(nb, NETDEV_CHANGEIFADDR, ifp); } int register_netdevice_notifier(struct notifier_block *nb) { nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER( ifnet_link_event, linux_handle_ifnet_link_event, nb, 0); nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER( ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0); nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER( ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0); nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER( iflladdr_event, linux_handle_iflladdr_event, nb, 0); return (0); } int register_inetaddr_notifier(struct notifier_block *nb) { nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER( ifaddr_event, linux_handle_ifaddr_event, nb, 0); return (0); } int unregister_netdevice_notifier(struct notifier_block *nb) { EVENTHANDLER_DEREGISTER(ifnet_link_event, nb->tags[NETDEV_UP]); EVENTHANDLER_DEREGISTER(ifnet_arrival_event, nb->tags[NETDEV_REGISTER]); EVENTHANDLER_DEREGISTER(ifnet_departure_event, nb->tags[NETDEV_UNREGISTER]); EVENTHANDLER_DEREGISTER(iflladdr_event, nb->tags[NETDEV_CHANGEADDR]); return (0); } int unregister_inetaddr_notifier(struct notifier_block *nb) { EVENTHANDLER_DEREGISTER(ifaddr_event, nb->tags[NETDEV_CHANGEIFADDR]); return (0); } struct list_sort_thunk { int (*cmp)(void *, struct list_head *, struct list_head *); void *priv; }; static inline int linux_le_cmp(void *priv, const void *d1, const void *d2) { struct list_head *le1, *le2; struct list_sort_thunk *thunk; thunk = priv; le1 = *(__DECONST(struct list_head **, d1)); le2 = *(__DECONST(struct list_head **, d2)); return ((thunk->cmp)(thunk->priv, le1, le2)); } void list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv, struct list_head *a, struct list_head *b)) { struct list_sort_thunk thunk; struct list_head **ar, *le; size_t count, i; count = 0; list_for_each(le, head) count++; ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK); i = 0; list_for_each(le, head) ar[i++] = le; thunk.cmp = cmp; thunk.priv = priv; qsort_r(ar, count, sizeof(struct list_head *), &thunk, linux_le_cmp); INIT_LIST_HEAD(head); for (i = 0; i < count; i++) list_add_tail(ar[i], head); free(ar, M_KMALLOC); } void linux_irq_handler(void *ent) { struct irq_ent *irqe; linux_set_current(curthread); irqe = ent; irqe->handler(irqe->irq, irqe->arg); } #if defined(__i386__) || defined(__amd64__) int linux_wbinvd_on_all_cpus(void) { pmap_invalidate_cache(); return (0); } #endif int linux_on_each_cpu(void callback(void *), void *data) { smp_rendezvous(smp_no_rendezvous_barrier, callback, smp_no_rendezvous_barrier, data); return (0); } int linux_in_atomic(void) { return ((curthread->td_pflags & TDP_NOFAULTING) != 0); } struct linux_cdev * linux_find_cdev(const char *name, unsigned major, unsigned minor) { dev_t dev = MKDEV(major, minor); struct cdev *cdev; dev_lock(); LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) { struct linux_cdev *ldev = cdev->si_drv1; if (ldev->dev == dev && strcmp(kobject_name(&ldev->kobj), name) == 0) { break; } } dev_unlock(); return (cdev != NULL ? cdev->si_drv1 : NULL); } int __register_chrdev(unsigned int major, unsigned int baseminor, unsigned int count, const char *name, const struct file_operations *fops) { struct linux_cdev *cdev; int ret = 0; int i; for (i = baseminor; i < baseminor + count; i++) { cdev = cdev_alloc(); cdev->ops = fops; kobject_set_name(&cdev->kobj, name); ret = cdev_add(cdev, makedev(major, i), 1); if (ret != 0) break; } return (ret); } int __register_chrdev_p(unsigned int major, unsigned int baseminor, unsigned int count, const char *name, const struct file_operations *fops, uid_t uid, gid_t gid, int mode) { struct linux_cdev *cdev; int ret = 0; int i; for (i = baseminor; i < baseminor + count; i++) { cdev = cdev_alloc(); cdev->ops = fops; kobject_set_name(&cdev->kobj, name); ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode); if (ret != 0) break; } return (ret); } void __unregister_chrdev(unsigned int major, unsigned int baseminor, unsigned int count, const char *name) { struct linux_cdev *cdevp; int i; for (i = baseminor; i < baseminor + count; i++) { cdevp = linux_find_cdev(name, major, i); if (cdevp != NULL) cdev_del(cdevp); } } void linux_dump_stack(void) { #ifdef STACK struct stack st; stack_zero(&st); stack_save(&st); stack_print(&st); #endif } #if defined(__i386__) || defined(__amd64__) bool linux_cpu_has_clflush; #endif static void linux_compat_init(void *arg) { struct sysctl_oid *rootoid; int i; #if defined(__i386__) || defined(__amd64__) linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH); #endif rw_init(&linux_vma_lock, "lkpi-vma-lock"); rootoid = SYSCTL_ADD_ROOT_NODE(NULL, OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys"); kobject_init(&linux_class_root, &linux_class_ktype); kobject_set_name(&linux_class_root, "class"); linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class"); kobject_init(&linux_root_device.kobj, &linux_dev_ktype); kobject_set_name(&linux_root_device.kobj, "device"); linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", CTLFLAG_RD, NULL, "device"); linux_root_device.bsddev = root_bus; linux_class_misc.name = "misc"; class_register(&linux_class_misc); INIT_LIST_HEAD(&pci_drivers); INIT_LIST_HEAD(&pci_devices); spin_lock_init(&pci_lock); mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF); for (i = 0; i < VMMAP_HASH_SIZE; i++) LIST_INIT(&vmmaphead[i]); } SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL); static void linux_compat_uninit(void *arg) { linux_kobject_kfree_name(&linux_class_root); linux_kobject_kfree_name(&linux_root_device.kobj); linux_kobject_kfree_name(&linux_class_misc.kobj); mtx_destroy(&vmmaplock); spin_lock_destroy(&pci_lock); rw_destroy(&linux_vma_lock); } SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL); /* * NOTE: Linux frequently uses "unsigned long" for pointer to integer * conversion and vice versa, where in FreeBSD "uintptr_t" would be * used. Assert these types have the same size, else some parts of the * LinuxKPI may not work like expected: */ CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t)); Index: user/ngie/bug-237403/sys/dev/drm2/ttm/ttm_page_alloc.c =================================================================== --- user/ngie/bug-237403/sys/dev/drm2/ttm/ttm_page_alloc.c (revision 348028) +++ user/ngie/bug-237403/sys/dev/drm2/ttm/ttm_page_alloc.c (revision 348029) @@ -1,925 +1,926 @@ /* * Copyright (c) Red Hat Inc. * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sub license, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Jerome Glisse * Pauli Nieminen */ /* * Copyright (c) 2013 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. */ /* simple list based uncached page pool * - Pool collects resently freed pages for reuse * - Use page->lru to keep a free list * - doesn't track currently in use pages */ #include __FBSDID("$FreeBSD$"); #include #include #include +#include #include #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(vm_page_t)) #define SMALL_ALLOCATION 16 #define FREE_ALL_PAGES (~0U) /* times are in msecs */ #define PAGE_FREE_INTERVAL 1000 /** * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages. * * @lock: Protects the shared pool from concurrnet access. Must be used with * irqsave/irqrestore variants because pool allocator maybe called from * delayed work. * @fill_lock: Prevent concurrent calls to fill. * @list: Pool of free uc/wc pages for fast reuse. * @gfp_flags: Flags to pass for alloc_page. * @npages: Number of pages in pool. */ struct ttm_page_pool { struct mtx lock; bool fill_lock; bool dma32; struct pglist list; int ttm_page_alloc_flags; unsigned npages; char *name; unsigned long nfrees; unsigned long nrefills; }; /** * Limits for the pool. They are handled without locks because only place where * they may change is in sysfs store. They won't have immediate effect anyway * so forcing serialization to access them is pointless. */ struct ttm_pool_opts { unsigned alloc_size; unsigned max_size; unsigned small; }; #define NUM_POOLS 4 /** * struct ttm_pool_manager - Holds memory pools for fst allocation * * Manager is read only object for pool code so it doesn't need locking. * * @free_interval: minimum number of jiffies between freeing pages from pool. * @page_alloc_inited: reference counting for pool allocation. * @work: Work that is used to shrink the pool. Work is only run when there is * some pages to free. * @small_allocation: Limit in number of pages what is small allocation. * * @pools: All pool objects in use. **/ struct ttm_pool_manager { unsigned int kobj_ref; eventhandler_tag lowmem_handler; struct ttm_pool_opts options; union { struct ttm_page_pool u_pools[NUM_POOLS]; struct _utag { struct ttm_page_pool u_wc_pool; struct ttm_page_pool u_uc_pool; struct ttm_page_pool u_wc_pool_dma32; struct ttm_page_pool u_uc_pool_dma32; } _ut; } _u; }; #define pools _u.u_pools #define wc_pool _u._ut.u_wc_pool #define uc_pool _u._ut.u_uc_pool #define wc_pool_dma32 _u._ut.u_wc_pool_dma32 #define uc_pool_dma32 _u._ut.u_uc_pool_dma32 MALLOC_DEFINE(M_TTM_POOLMGR, "ttm_poolmgr", "TTM Pool Manager"); static void ttm_vm_page_free(vm_page_t m) { KASSERT(m->object == NULL, ("ttm page %p is owned", m)); KASSERT(m->wire_count == 1, ("ttm lost wire %p", m)); KASSERT((m->flags & PG_FICTITIOUS) != 0, ("ttm lost fictitious %p", m)); KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("ttm got unmanaged %p", m)); m->flags &= ~PG_FICTITIOUS; m->oflags |= VPO_UNMANAGED; vm_page_unwire(m, PQ_NONE); vm_page_free(m); } static vm_memattr_t ttm_caching_state_to_vm(enum ttm_caching_state cstate) { switch (cstate) { case tt_uncached: return (VM_MEMATTR_UNCACHEABLE); case tt_wc: return (VM_MEMATTR_WRITE_COMBINING); case tt_cached: return (VM_MEMATTR_WRITE_BACK); } panic("caching state %d\n", cstate); } static vm_page_t ttm_vm_page_alloc_dma32(int req, vm_memattr_t memattr) { vm_page_t p; int tries; for (tries = 0; ; tries++) { p = vm_page_alloc_contig(NULL, 0, req, 1, 0, 0xffffffff, PAGE_SIZE, 0, memattr); if (p != NULL || tries > 2) return (p); if (!vm_page_reclaim_contig(req, 1, 0, 0xffffffff, PAGE_SIZE, 0)) vm_wait(NULL); } } static vm_page_t ttm_vm_page_alloc_any(int req, vm_memattr_t memattr) { vm_page_t p; while (1) { p = vm_page_alloc(NULL, 0, req); if (p != NULL) break; vm_wait(NULL); } pmap_page_set_memattr(p, memattr); return (p); } static vm_page_t ttm_vm_page_alloc(int flags, enum ttm_caching_state cstate) { vm_page_t p; vm_memattr_t memattr; int req; memattr = ttm_caching_state_to_vm(cstate); req = VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ; if ((flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0) req |= VM_ALLOC_ZERO; if ((flags & TTM_PAGE_FLAG_DMA32) != 0) p = ttm_vm_page_alloc_dma32(req, memattr); else p = ttm_vm_page_alloc_any(req, memattr); if (p != NULL) { p->oflags &= ~VPO_UNMANAGED; p->flags |= PG_FICTITIOUS; } return (p); } static void ttm_pool_kobj_release(struct ttm_pool_manager *m) { free(m, M_TTM_POOLMGR); } #if 0 /* XXXKIB sysctl */ static ssize_t ttm_pool_store(struct ttm_pool_manager *m, struct attribute *attr, const char *buffer, size_t size) { int chars; unsigned val; chars = sscanf(buffer, "%u", &val); if (chars == 0) return size; /* Convert kb to number of pages */ val = val / (PAGE_SIZE >> 10); if (attr == &ttm_page_pool_max) m->options.max_size = val; else if (attr == &ttm_page_pool_small) m->options.small = val; else if (attr == &ttm_page_pool_alloc_size) { if (val > NUM_PAGES_TO_ALLOC*8) { pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n", NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); return size; } else if (val > NUM_PAGES_TO_ALLOC) { pr_warn("Setting allocation size to larger than %lu is not recommended\n", NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); } m->options.alloc_size = val; } return size; } static ssize_t ttm_pool_show(struct ttm_pool_manager *m, struct attribute *attr, char *buffer) { unsigned val = 0; if (attr == &ttm_page_pool_max) val = m->options.max_size; else if (attr == &ttm_page_pool_small) val = m->options.small; else if (attr == &ttm_page_pool_alloc_size) val = m->options.alloc_size; val = val * (PAGE_SIZE >> 10); return snprintf(buffer, PAGE_SIZE, "%u\n", val); } #endif static struct ttm_pool_manager *_manager; static int set_pages_array_wb(vm_page_t *pages, int addrinarray) { #ifdef TTM_HAS_AGP int i; for (i = 0; i < addrinarray; i++) pmap_page_set_memattr(pages[i], VM_MEMATTR_WRITE_BACK); #endif return 0; } static int set_pages_array_wc(vm_page_t *pages, int addrinarray) { #ifdef TTM_HAS_AGP int i; for (i = 0; i < addrinarray; i++) pmap_page_set_memattr(pages[i], VM_MEMATTR_WRITE_COMBINING); #endif return 0; } static int set_pages_array_uc(vm_page_t *pages, int addrinarray) { #ifdef TTM_HAS_AGP int i; for (i = 0; i < addrinarray; i++) pmap_page_set_memattr(pages[i], VM_MEMATTR_UNCACHEABLE); #endif return 0; } /** * Select the right pool or requested caching state and ttm flags. */ static struct ttm_page_pool *ttm_get_pool(int flags, enum ttm_caching_state cstate) { int pool_index; if (cstate == tt_cached) return NULL; if (cstate == tt_wc) pool_index = 0x0; else pool_index = 0x1; if (flags & TTM_PAGE_FLAG_DMA32) pool_index |= 0x2; return &_manager->pools[pool_index]; } /* set memory back to wb and free the pages. */ static void ttm_pages_put(vm_page_t *pages, unsigned npages) { unsigned i; /* Our VM handles vm memattr automatically on the page free. */ if (set_pages_array_wb(pages, npages)) printf("[TTM] Failed to set %d pages to wb!\n", npages); for (i = 0; i < npages; ++i) ttm_vm_page_free(pages[i]); } static void ttm_pool_update_free_locked(struct ttm_page_pool *pool, unsigned freed_pages) { pool->npages -= freed_pages; pool->nfrees += freed_pages; } /** * Free pages from pool. * * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC * number of pages in one go. * * @pool: to free the pages from * @free_all: If set to true will free all pages in pool **/ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free) { vm_page_t p, p1; vm_page_t *pages_to_free; unsigned freed_pages = 0, npages_to_free = nr_free; unsigned i; if (NUM_PAGES_TO_ALLOC < nr_free) npages_to_free = NUM_PAGES_TO_ALLOC; pages_to_free = malloc(npages_to_free * sizeof(vm_page_t), M_TEMP, M_WAITOK | M_ZERO); restart: mtx_lock(&pool->lock); TAILQ_FOREACH_REVERSE_SAFE(p, &pool->list, pglist, plinks.q, p1) { if (freed_pages >= npages_to_free) break; pages_to_free[freed_pages++] = p; /* We can only remove NUM_PAGES_TO_ALLOC at a time. */ if (freed_pages >= NUM_PAGES_TO_ALLOC) { /* remove range of pages from the pool */ for (i = 0; i < freed_pages; i++) TAILQ_REMOVE(&pool->list, pages_to_free[i], plinks.q); ttm_pool_update_free_locked(pool, freed_pages); /** * Because changing page caching is costly * we unlock the pool to prevent stalling. */ mtx_unlock(&pool->lock); ttm_pages_put(pages_to_free, freed_pages); if (likely(nr_free != FREE_ALL_PAGES)) nr_free -= freed_pages; if (NUM_PAGES_TO_ALLOC >= nr_free) npages_to_free = nr_free; else npages_to_free = NUM_PAGES_TO_ALLOC; freed_pages = 0; /* free all so restart the processing */ if (nr_free) goto restart; /* Not allowed to fall through or break because * following context is inside spinlock while we are * outside here. */ goto out; } } /* remove range of pages from the pool */ if (freed_pages) { for (i = 0; i < freed_pages; i++) TAILQ_REMOVE(&pool->list, pages_to_free[i], plinks.q); ttm_pool_update_free_locked(pool, freed_pages); nr_free -= freed_pages; } mtx_unlock(&pool->lock); if (freed_pages) ttm_pages_put(pages_to_free, freed_pages); out: free(pages_to_free, M_TEMP); return nr_free; } /* Get good estimation how many pages are free in pools */ static int ttm_pool_get_num_unused_pages(void) { unsigned i; int total = 0; for (i = 0; i < NUM_POOLS; ++i) total += _manager->pools[i].npages; return total; } /** * Callback for mm to request pool to reduce number of page held. */ static int ttm_pool_mm_shrink(void *arg) { static unsigned int start_pool = 0; unsigned i; unsigned pool_offset = atomic_fetchadd_int(&start_pool, 1); struct ttm_page_pool *pool; int shrink_pages = 100; /* XXXKIB */ pool_offset = pool_offset % NUM_POOLS; /* select start pool in round robin fashion */ for (i = 0; i < NUM_POOLS; ++i) { unsigned nr_free = shrink_pages; if (shrink_pages == 0) break; pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; shrink_pages = ttm_page_pool_free(pool, nr_free); } /* return estimated number of unused pages in pool */ return ttm_pool_get_num_unused_pages(); } static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager) { manager->lowmem_handler = EVENTHANDLER_REGISTER(vm_lowmem, ttm_pool_mm_shrink, manager, EVENTHANDLER_PRI_ANY); } static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager) { EVENTHANDLER_DEREGISTER(vm_lowmem, manager->lowmem_handler); } static int ttm_set_pages_caching(vm_page_t *pages, enum ttm_caching_state cstate, unsigned cpages) { int r = 0; /* Set page caching */ switch (cstate) { case tt_uncached: r = set_pages_array_uc(pages, cpages); if (r) printf("[TTM] Failed to set %d pages to uc!\n", cpages); break; case tt_wc: r = set_pages_array_wc(pages, cpages); if (r) printf("[TTM] Failed to set %d pages to wc!\n", cpages); break; default: break; } return r; } /** * Free pages the pages that failed to change the caching state. If there is * any pages that have changed their caching state already put them to the * pool. */ static void ttm_handle_caching_state_failure(struct pglist *pages, int ttm_flags, enum ttm_caching_state cstate, vm_page_t *failed_pages, unsigned cpages) { unsigned i; /* Failed pages have to be freed */ for (i = 0; i < cpages; ++i) { TAILQ_REMOVE(pages, failed_pages[i], plinks.q); ttm_vm_page_free(failed_pages[i]); } } /** * Allocate new pages with correct caching. * * This function is reentrant if caller updates count depending on number of * pages returned in pages array. */ static int ttm_alloc_new_pages(struct pglist *pages, int ttm_alloc_flags, int ttm_flags, enum ttm_caching_state cstate, unsigned count) { vm_page_t *caching_array; vm_page_t p; int r = 0; unsigned i, cpages; unsigned max_cpages = min(count, (unsigned)(PAGE_SIZE/sizeof(vm_page_t))); /* allocate array for page caching change */ caching_array = malloc(max_cpages * sizeof(vm_page_t), M_TEMP, M_WAITOK | M_ZERO); for (i = 0, cpages = 0; i < count; ++i) { p = ttm_vm_page_alloc(ttm_alloc_flags, cstate); if (!p) { printf("[TTM] Unable to get page %u\n", i); /* store already allocated pages in the pool after * setting the caching state */ if (cpages) { r = ttm_set_pages_caching(caching_array, cstate, cpages); if (r) ttm_handle_caching_state_failure(pages, ttm_flags, cstate, caching_array, cpages); } r = -ENOMEM; goto out; } #ifdef CONFIG_HIGHMEM /* KIB: nop */ /* gfp flags of highmem page should never be dma32 so we * we should be fine in such case */ if (!PageHighMem(p)) #endif { caching_array[cpages++] = p; if (cpages == max_cpages) { r = ttm_set_pages_caching(caching_array, cstate, cpages); if (r) { ttm_handle_caching_state_failure(pages, ttm_flags, cstate, caching_array, cpages); goto out; } cpages = 0; } } TAILQ_INSERT_HEAD(pages, p, plinks.q); } if (cpages) { r = ttm_set_pages_caching(caching_array, cstate, cpages); if (r) ttm_handle_caching_state_failure(pages, ttm_flags, cstate, caching_array, cpages); } out: free(caching_array, M_TEMP); return r; } /** * Fill the given pool if there aren't enough pages and the requested number of * pages is small. */ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags, enum ttm_caching_state cstate, unsigned count) { vm_page_t p; int r; unsigned cpages = 0; /** * Only allow one pool fill operation at a time. * If pool doesn't have enough pages for the allocation new pages are * allocated from outside of pool. */ if (pool->fill_lock) return; pool->fill_lock = true; /* If allocation request is small and there are not enough * pages in a pool we fill the pool up first. */ if (count < _manager->options.small && count > pool->npages) { struct pglist new_pages; unsigned alloc_size = _manager->options.alloc_size; /** * Can't change page caching if in irqsave context. We have to * drop the pool->lock. */ mtx_unlock(&pool->lock); TAILQ_INIT(&new_pages); r = ttm_alloc_new_pages(&new_pages, pool->ttm_page_alloc_flags, ttm_flags, cstate, alloc_size); mtx_lock(&pool->lock); if (!r) { TAILQ_CONCAT(&pool->list, &new_pages, plinks.q); ++pool->nrefills; pool->npages += alloc_size; } else { printf("[TTM] Failed to fill pool (%p)\n", pool); /* If we have any pages left put them to the pool. */ TAILQ_FOREACH(p, &pool->list, plinks.q) { ++cpages; } TAILQ_CONCAT(&pool->list, &new_pages, plinks.q); pool->npages += cpages; } } pool->fill_lock = false; } /** * Cut 'count' number of pages from the pool and put them on the return list. * * @return count of pages still required to fulfill the request. */ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, struct pglist *pages, int ttm_flags, enum ttm_caching_state cstate, unsigned count) { vm_page_t p; unsigned i; mtx_lock(&pool->lock); ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count); if (count >= pool->npages) { /* take all pages from the pool */ TAILQ_CONCAT(pages, &pool->list, plinks.q); count -= pool->npages; pool->npages = 0; goto out; } for (i = 0; i < count; i++) { p = TAILQ_FIRST(&pool->list); TAILQ_REMOVE(&pool->list, p, plinks.q); TAILQ_INSERT_TAIL(pages, p, plinks.q); } pool->npages -= count; count = 0; out: mtx_unlock(&pool->lock); return count; } /* Put all pages in pages list to correct pool to wait for reuse */ static void ttm_put_pages(vm_page_t *pages, unsigned npages, int flags, enum ttm_caching_state cstate) { struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); unsigned i; if (pool == NULL) { /* No pool for this memory type so free the pages */ for (i = 0; i < npages; i++) { if (pages[i]) { ttm_vm_page_free(pages[i]); pages[i] = NULL; } } return; } mtx_lock(&pool->lock); for (i = 0; i < npages; i++) { if (pages[i]) { TAILQ_INSERT_TAIL(&pool->list, pages[i], plinks.q); pages[i] = NULL; pool->npages++; } } /* Check that we don't go over the pool limit */ npages = 0; if (pool->npages > _manager->options.max_size) { npages = pool->npages - _manager->options.max_size; /* free at least NUM_PAGES_TO_ALLOC number of pages * to reduce calls to set_memory_wb */ if (npages < NUM_PAGES_TO_ALLOC) npages = NUM_PAGES_TO_ALLOC; } mtx_unlock(&pool->lock); if (npages) ttm_page_pool_free(pool, npages); } /* * On success pages list will hold count number of correctly * cached pages. */ static int ttm_get_pages(vm_page_t *pages, unsigned npages, int flags, enum ttm_caching_state cstate) { struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); struct pglist plist; vm_page_t p = NULL; int gfp_flags; unsigned count; int r; /* No pool for cached pages */ if (pool == NULL) { for (r = 0; r < npages; ++r) { p = ttm_vm_page_alloc(flags, cstate); if (!p) { printf("[TTM] Unable to allocate page\n"); return -ENOMEM; } pages[r] = p; } return 0; } /* combine zero flag to pool flags */ gfp_flags = flags | pool->ttm_page_alloc_flags; /* First we take pages from the pool */ TAILQ_INIT(&plist); npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages); count = 0; TAILQ_FOREACH(p, &plist, plinks.q) { pages[count++] = p; } /* clear the pages coming from the pool if requested */ if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) { TAILQ_FOREACH(p, &plist, plinks.q) { pmap_zero_page(p); } } /* If pool didn't have enough pages allocate new one. */ if (npages > 0) { /* ttm_alloc_new_pages doesn't reference pool so we can run * multiple requests in parallel. **/ TAILQ_INIT(&plist); r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages); TAILQ_FOREACH(p, &plist, plinks.q) { pages[count++] = p; } if (r) { /* If there is any pages in the list put them back to * the pool. */ printf("[TTM] Failed to allocate extra pages for large request\n"); ttm_put_pages(pages, count, flags, cstate); return r; } } return 0; } static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags, char *name) { mtx_init(&pool->lock, "ttmpool", NULL, MTX_DEF); pool->fill_lock = false; TAILQ_INIT(&pool->list); pool->npages = pool->nfrees = 0; pool->ttm_page_alloc_flags = flags; pool->name = name; } int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) { if (_manager != NULL) printf("[TTM] manager != NULL\n"); printf("[TTM] Initializing pool allocator\n"); _manager = malloc(sizeof(*_manager), M_TTM_POOLMGR, M_WAITOK | M_ZERO); ttm_page_pool_init_locked(&_manager->wc_pool, 0, "wc"); ttm_page_pool_init_locked(&_manager->uc_pool, 0, "uc"); ttm_page_pool_init_locked(&_manager->wc_pool_dma32, TTM_PAGE_FLAG_DMA32, "wc dma"); ttm_page_pool_init_locked(&_manager->uc_pool_dma32, TTM_PAGE_FLAG_DMA32, "uc dma"); _manager->options.max_size = max_pages; _manager->options.small = SMALL_ALLOCATION; _manager->options.alloc_size = NUM_PAGES_TO_ALLOC; refcount_init(&_manager->kobj_ref, 1); ttm_pool_mm_shrink_init(_manager); return 0; } void ttm_page_alloc_fini(void) { int i; printf("[TTM] Finalizing pool allocator\n"); ttm_pool_mm_shrink_fini(_manager); for (i = 0; i < NUM_POOLS; ++i) ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES); if (refcount_release(&_manager->kobj_ref)) ttm_pool_kobj_release(_manager); _manager = NULL; } int ttm_pool_populate(struct ttm_tt *ttm) { struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; unsigned i; int ret; if (ttm->state != tt_unpopulated) return 0; for (i = 0; i < ttm->num_pages; ++i) { ret = ttm_get_pages(&ttm->pages[i], 1, ttm->page_flags, ttm->caching_state); if (ret != 0) { ttm_pool_unpopulate(ttm); return -ENOMEM; } ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], false, false); if (unlikely(ret != 0)) { ttm_pool_unpopulate(ttm); return -ENOMEM; } } if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { ret = ttm_tt_swapin(ttm); if (unlikely(ret != 0)) { ttm_pool_unpopulate(ttm); return ret; } } ttm->state = tt_unbound; return 0; } void ttm_pool_unpopulate(struct ttm_tt *ttm) { unsigned i; for (i = 0; i < ttm->num_pages; ++i) { if (ttm->pages[i]) { ttm_mem_global_free_page(ttm->glob->mem_glob, ttm->pages[i]); ttm_put_pages(&ttm->pages[i], 1, ttm->page_flags, ttm->caching_state); } } ttm->state = tt_unpopulated; } #if 0 /* XXXKIB sysctl */ int ttm_page_alloc_debugfs(struct seq_file *m, void *data) { struct ttm_page_pool *p; unsigned i; char *h[] = {"pool", "refills", "pages freed", "size"}; if (!_manager) { seq_printf(m, "No pool allocator running.\n"); return 0; } seq_printf(m, "%6s %12s %13s %8s\n", h[0], h[1], h[2], h[3]); for (i = 0; i < NUM_POOLS; ++i) { p = &_manager->pools[i]; seq_printf(m, "%6s %12ld %13ld %8d\n", p->name, p->nrefills, p->nfrees, p->npages); } return 0; } #endif Index: user/ngie/bug-237403/sys/dev/fb/creator.c =================================================================== --- user/ngie/bug-237403/sys/dev/fb/creator.c (revision 348028) +++ user/ngie/bug-237403/sys/dev/fb/creator.c (revision 348029) @@ -1,1129 +1,1130 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2003 Jake Burkholder. * Copyright (c) 2005 - 2006 Marius Strobl * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define CREATOR_DRIVER_NAME "creator" struct creator_softc { video_adapter_t sc_va; /* XXX must be first */ phandle_t sc_node; struct cdev *sc_si; struct resource *sc_reg[FFB_NREG]; bus_space_tag_t sc_bt[FFB_NREG]; bus_space_handle_t sc_bh[FFB_NREG]; u_long sc_reg_size; u_int sc_height; u_int sc_width; u_int sc_xmargin; u_int sc_ymargin; const u_char *sc_font; int sc_bg_cache; int sc_fg_cache; int sc_fifo_cache; int sc_fontinc_cache; int sc_fontw_cache; int sc_pmask_cache; u_int sc_flags; #define CREATOR_AFB (1 << 0) #define CREATOR_CONSOLE (1 << 1) #define CREATOR_CUREN (1 << 2) #define CREATOR_CURINV (1 << 3) #define CREATOR_PAC1 (1 << 4) }; #define FFB_READ(sc, reg, off) \ bus_space_read_4((sc)->sc_bt[(reg)], (sc)->sc_bh[(reg)], (off)) #define FFB_WRITE(sc, reg, off, val) \ bus_space_write_4((sc)->sc_bt[(reg)], (sc)->sc_bh[(reg)], (off), (val)) #define C(r, g, b) ((b << 16) | (g << 8) | (r)) static const uint32_t creator_cmap[] = { C(0x00, 0x00, 0x00), /* black */ C(0x00, 0x00, 0xff), /* blue */ C(0x00, 0xff, 0x00), /* green */ C(0x00, 0xc0, 0xc0), /* cyan */ C(0xff, 0x00, 0x00), /* red */ C(0xc0, 0x00, 0xc0), /* magenta */ C(0xc0, 0xc0, 0x00), /* brown */ C(0xc0, 0xc0, 0xc0), /* light grey */ C(0x80, 0x80, 0x80), /* dark grey */ C(0x80, 0x80, 0xff), /* light blue */ C(0x80, 0xff, 0x80), /* light green */ C(0x80, 0xff, 0xff), /* light cyan */ C(0xff, 0x80, 0x80), /* light red */ C(0xff, 0x80, 0xff), /* light magenta */ C(0xff, 0xff, 0x80), /* yellow */ C(0xff, 0xff, 0xff), /* white */ }; #undef C static const struct { vm_offset_t virt; vm_paddr_t phys; vm_size_t size; } creator_fb_map[] = { { FFB_VIRT_SFB8R, FFB_PHYS_SFB8R, FFB_SIZE_SFB8R }, { FFB_VIRT_SFB8G, FFB_PHYS_SFB8G, FFB_SIZE_SFB8G }, { FFB_VIRT_SFB8B, FFB_PHYS_SFB8B, FFB_SIZE_SFB8B }, { FFB_VIRT_SFB8X, FFB_PHYS_SFB8X, FFB_SIZE_SFB8X }, { FFB_VIRT_SFB32, FFB_PHYS_SFB32, FFB_SIZE_SFB32 }, { FFB_VIRT_SFB64, FFB_PHYS_SFB64, FFB_SIZE_SFB64 }, { FFB_VIRT_FBC, FFB_PHYS_FBC, FFB_SIZE_FBC }, { FFB_VIRT_FBC_BM, FFB_PHYS_FBC_BM, FFB_SIZE_FBC_BM }, { FFB_VIRT_DFB8R, FFB_PHYS_DFB8R, FFB_SIZE_DFB8R }, { FFB_VIRT_DFB8G, FFB_PHYS_DFB8G, FFB_SIZE_DFB8G }, { FFB_VIRT_DFB8B, FFB_PHYS_DFB8B, FFB_SIZE_DFB8B }, { FFB_VIRT_DFB8X, FFB_PHYS_DFB8X, FFB_SIZE_DFB8X }, { FFB_VIRT_DFB24, FFB_PHYS_DFB24, FFB_SIZE_DFB24 }, { FFB_VIRT_DFB32, FFB_PHYS_DFB32, FFB_SIZE_DFB32 }, { FFB_VIRT_DFB422A, FFB_PHYS_DFB422A, FFB_SIZE_DFB422A }, { FFB_VIRT_DFB422AD, FFB_PHYS_DFB422AD, FFB_SIZE_DFB422AD }, { FFB_VIRT_DFB24B, FFB_PHYS_DFB24B, FFB_SIZE_DFB24B }, { FFB_VIRT_DFB422B, FFB_PHYS_DFB422B, FFB_SIZE_DFB422B }, { FFB_VIRT_DFB422BD, FFB_PHYS_DFB422BD, FFB_SIZE_DFB422BD }, { FFB_VIRT_SFB16Z, FFB_PHYS_SFB16Z, FFB_SIZE_SFB16Z }, { FFB_VIRT_SFB8Z, FFB_PHYS_SFB8Z, FFB_SIZE_SFB8Z }, { FFB_VIRT_SFB422, FFB_PHYS_SFB422, FFB_SIZE_SFB422 }, { FFB_VIRT_SFB422D, FFB_PHYS_SFB422D, FFB_SIZE_SFB422D }, { FFB_VIRT_FBC_KREG, FFB_PHYS_FBC_KREG, FFB_SIZE_FBC_KREG }, { FFB_VIRT_DAC, FFB_PHYS_DAC, FFB_SIZE_DAC }, { FFB_VIRT_PROM, FFB_PHYS_PROM, FFB_SIZE_PROM }, { FFB_VIRT_EXP, FFB_PHYS_EXP, FFB_SIZE_EXP }, }; #define CREATOR_FB_MAP_SIZE nitems(creator_fb_map) extern const struct gfb_font gallant12x22; static struct creator_softc creator_softc; static struct bus_space_tag creator_bst_store[FFB_FBC]; static device_probe_t creator_bus_probe; static device_attach_t creator_bus_attach; static device_method_t creator_bus_methods[] = { DEVMETHOD(device_probe, creator_bus_probe), DEVMETHOD(device_attach, creator_bus_attach), { 0, 0 } }; static devclass_t creator_devclass; DEFINE_CLASS_0(creator, creator_bus_driver, creator_bus_methods, sizeof(struct creator_softc)); DRIVER_MODULE(creator, nexus, creator_bus_driver, creator_devclass, 0, 0); DRIVER_MODULE(creator, upa, creator_bus_driver, creator_devclass, 0, 0); static d_open_t creator_fb_open; static d_close_t creator_fb_close; static d_ioctl_t creator_fb_ioctl; static d_mmap_t creator_fb_mmap; static struct cdevsw creator_fb_devsw = { .d_version = D_VERSION, .d_flags = D_NEEDGIANT, .d_open = creator_fb_open, .d_close = creator_fb_close, .d_ioctl = creator_fb_ioctl, .d_mmap = creator_fb_mmap, .d_name = "fb", }; static void creator_cursor_enable(struct creator_softc *sc, int onoff); static void creator_cursor_install(struct creator_softc *sc); static void creator_shutdown(void *xsc); static int creator_configure(int flags); static vi_probe_t creator_probe; static vi_init_t creator_init; static vi_get_info_t creator_get_info; static vi_query_mode_t creator_query_mode; static vi_set_mode_t creator_set_mode; static vi_save_font_t creator_save_font; static vi_load_font_t creator_load_font; static vi_show_font_t creator_show_font; static vi_save_palette_t creator_save_palette; static vi_load_palette_t creator_load_palette; static vi_set_border_t creator_set_border; static vi_save_state_t creator_save_state; static vi_load_state_t creator_load_state; static vi_set_win_org_t creator_set_win_org; static vi_read_hw_cursor_t creator_read_hw_cursor; static vi_set_hw_cursor_t creator_set_hw_cursor; static vi_set_hw_cursor_shape_t creator_set_hw_cursor_shape; static vi_blank_display_t creator_blank_display; static vi_mmap_t creator_mmap; static vi_ioctl_t creator_ioctl; static vi_clear_t creator_clear; static vi_fill_rect_t creator_fill_rect; static vi_bitblt_t creator_bitblt; static vi_diag_t creator_diag; static vi_save_cursor_palette_t creator_save_cursor_palette; static vi_load_cursor_palette_t creator_load_cursor_palette; static vi_copy_t creator_copy; static vi_putp_t creator_putp; static vi_putc_t creator_putc; static vi_puts_t creator_puts; static vi_putm_t creator_putm; static video_switch_t creatorvidsw = { .probe = creator_probe, .init = creator_init, .get_info = creator_get_info, .query_mode = creator_query_mode, .set_mode = creator_set_mode, .save_font = creator_save_font, .load_font = creator_load_font, .show_font = creator_show_font, .save_palette = creator_save_palette, .load_palette = creator_load_palette, .set_border = creator_set_border, .save_state = creator_save_state, .load_state = creator_load_state, .set_win_org = creator_set_win_org, .read_hw_cursor = creator_read_hw_cursor, .set_hw_cursor = creator_set_hw_cursor, .set_hw_cursor_shape = creator_set_hw_cursor_shape, .blank_display = creator_blank_display, .mmap = creator_mmap, .ioctl = creator_ioctl, .clear = creator_clear, .fill_rect = creator_fill_rect, .bitblt = creator_bitblt, .diag = creator_diag, .save_cursor_palette = creator_save_cursor_palette, .load_cursor_palette = creator_load_cursor_palette, .copy = creator_copy, .putp = creator_putp, .putc = creator_putc, .puts = creator_puts, .putm = creator_putm }; VIDEO_DRIVER(creator, creatorvidsw, creator_configure); extern sc_rndr_sw_t txtrndrsw; RENDERER(creator, 0, txtrndrsw, gfb_set); RENDERER_MODULE(creator, gfb_set); static const u_char creator_mouse_pointer[64][8] __aligned(8) = { { 0x00, 0x00, }, /* ............ */ { 0x80, 0x00, }, /* *........... */ { 0xc0, 0x00, }, /* **.......... */ { 0xe0, 0x00, }, /* ***......... */ { 0xf0, 0x00, }, /* ****........ */ { 0xf8, 0x00, }, /* *****....... */ { 0xfc, 0x00, }, /* ******...... */ { 0xfe, 0x00, }, /* *******..... */ { 0xff, 0x00, }, /* ********.... */ { 0xff, 0x80, }, /* *********... */ { 0xfc, 0xc0, }, /* ******..**.. */ { 0xdc, 0x00, }, /* **.***...... */ { 0x8e, 0x00, }, /* *...***..... */ { 0x0e, 0x00, }, /* ....***..... */ { 0x07, 0x00, }, /* .....***.... */ { 0x04, 0x00, }, /* .....*...... */ { 0x00, 0x00, }, /* ............ */ { 0x00, 0x00, }, /* ............ */ { 0x00, 0x00, }, /* ............ */ { 0x00, 0x00, }, /* ............ */ { 0x00, 0x00, }, /* ............ */ { 0x00, 0x00, }, /* ............ */ }; static inline void creator_ras_fifo_wait(struct creator_softc *sc, int n); static inline void creator_ras_setfontinc(struct creator_softc *sc, int fontinc); static inline void creator_ras_setfontw(struct creator_softc *sc, int fontw); static inline void creator_ras_setbg(struct creator_softc *sc, int bg); static inline void creator_ras_setfg(struct creator_softc *sc, int fg); static inline void creator_ras_setpmask(struct creator_softc *sc, int pmask); static inline void creator_ras_wait(struct creator_softc *sc); static inline void creator_ras_wait(struct creator_softc *sc) { int ucsr; int r; for (;;) { ucsr = FFB_READ(sc, FFB_FBC, FFB_FBC_UCSR); if ((ucsr & (FBC_UCSR_FB_BUSY | FBC_UCSR_RP_BUSY)) == 0) break; r = ucsr & (FBC_UCSR_READ_ERR | FBC_UCSR_FIFO_OVFL); if (r != 0) FFB_WRITE(sc, FFB_FBC, FFB_FBC_UCSR, r); } } static inline void creator_ras_fifo_wait(struct creator_softc *sc, int n) { int cache; cache = sc->sc_fifo_cache; while (cache < n) cache = (FFB_READ(sc, FFB_FBC, FFB_FBC_UCSR) & FBC_UCSR_FIFO_MASK) - 8; sc->sc_fifo_cache = cache - n; } static inline void creator_ras_setfontinc(struct creator_softc *sc, int fontinc) { if (fontinc == sc->sc_fontinc_cache) return; sc->sc_fontinc_cache = fontinc; creator_ras_fifo_wait(sc, 1); FFB_WRITE(sc, FFB_FBC, FFB_FBC_FONTINC, fontinc); creator_ras_wait(sc); } static inline void creator_ras_setfontw(struct creator_softc *sc, int fontw) { if (fontw == sc->sc_fontw_cache) return; sc->sc_fontw_cache = fontw; creator_ras_fifo_wait(sc, 1); FFB_WRITE(sc, FFB_FBC, FFB_FBC_FONTW, fontw); creator_ras_wait(sc); } static inline void creator_ras_setbg(struct creator_softc *sc, int bg) { if (bg == sc->sc_bg_cache) return; sc->sc_bg_cache = bg; creator_ras_fifo_wait(sc, 1); FFB_WRITE(sc, FFB_FBC, FFB_FBC_BG, bg); creator_ras_wait(sc); } static inline void creator_ras_setfg(struct creator_softc *sc, int fg) { if (fg == sc->sc_fg_cache) return; sc->sc_fg_cache = fg; creator_ras_fifo_wait(sc, 1); FFB_WRITE(sc, FFB_FBC, FFB_FBC_FG, fg); creator_ras_wait(sc); } static inline void creator_ras_setpmask(struct creator_softc *sc, int pmask) { if (pmask == sc->sc_pmask_cache) return; sc->sc_pmask_cache = pmask; creator_ras_fifo_wait(sc, 1); FFB_WRITE(sc, FFB_FBC, FFB_FBC_PMASK, pmask); creator_ras_wait(sc); } /* * video driver interface */ static int creator_configure(int flags) { struct creator_softc *sc; phandle_t chosen; phandle_t output; ihandle_t stdout; bus_addr_t addr; char buf[sizeof("SUNW,ffb")]; int i; int space; /* * For the high-level console probing return the number of * registered adapters. */ if (!(flags & VIO_PROBE_ONLY)) { for (i = 0; vid_find_adapter(CREATOR_DRIVER_NAME, i) >= 0; i++) ; return (i); } /* Low-level console probing and initialization. */ sc = &creator_softc; if (sc->sc_va.va_flags & V_ADP_REGISTERED) goto found; if ((chosen = OF_finddevice("/chosen")) == -1) return (0); if (OF_getprop(chosen, "stdout", &stdout, sizeof(stdout)) == -1) return (0); if ((output = OF_instance_to_package(stdout)) == -1) return (0); if (OF_getprop(output, "name", buf, sizeof(buf)) == -1) return (0); if (strcmp(buf, "SUNW,ffb") == 0 || strcmp(buf, "SUNW,afb") == 0) { sc->sc_flags = CREATOR_CONSOLE; if (strcmp(buf, "SUNW,afb") == 0) sc->sc_flags |= CREATOR_AFB; sc->sc_node = output; } else return (0); for (i = FFB_DAC; i <= FFB_FBC; i++) { if (OF_decode_addr(output, i, &space, &addr) != 0) return (0); sc->sc_bt[i] = &creator_bst_store[i - FFB_DAC]; sc->sc_bh[i] = sparc64_fake_bustag(space, addr, sc->sc_bt[i]); } if (creator_init(0, &sc->sc_va, 0) < 0) return (0); found: /* Return number of found adapters. */ return (1); } static int creator_probe(int unit, video_adapter_t **adpp, void *arg, int flags) { return (0); } static int creator_init(int unit, video_adapter_t *adp, int flags) { struct creator_softc *sc; phandle_t options; video_info_t *vi; char buf[sizeof("screen-#columns")]; sc = (struct creator_softc *)adp; vi = &adp->va_info; vid_init_struct(adp, CREATOR_DRIVER_NAME, -1, unit); if (OF_getprop(sc->sc_node, "height", &sc->sc_height, sizeof(sc->sc_height)) == -1) return (ENXIO); if (OF_getprop(sc->sc_node, "width", &sc->sc_width, sizeof(sc->sc_width)) == -1) return (ENXIO); if ((options = OF_finddevice("/options")) == -1) return (ENXIO); if (OF_getprop(options, "screen-#rows", buf, sizeof(buf)) == -1) return (ENXIO); vi->vi_height = strtol(buf, NULL, 10); if (OF_getprop(options, "screen-#columns", buf, sizeof(buf)) == -1) return (ENXIO); vi->vi_width = strtol(buf, NULL, 10); vi->vi_cwidth = gallant12x22.width; vi->vi_cheight = gallant12x22.height; vi->vi_flags = V_INFO_COLOR; vi->vi_mem_model = V_INFO_MM_OTHER; sc->sc_font = gallant12x22.data; sc->sc_xmargin = (sc->sc_width - (vi->vi_width * vi->vi_cwidth)) / 2; sc->sc_ymargin = (sc->sc_height - (vi->vi_height * vi->vi_cheight)) / 2; creator_set_mode(adp, 0); if (!(sc->sc_flags & CREATOR_AFB)) { FFB_WRITE(sc, FFB_DAC, FFB_DAC_TYPE, FFB_DAC_CFG_DID); if (((FFB_READ(sc, FFB_DAC, FFB_DAC_VALUE) & FFB_DAC_CFG_DID_PNUM) >> 12) != 0x236e) { sc->sc_flags |= CREATOR_PAC1; FFB_WRITE(sc, FFB_DAC, FFB_DAC_TYPE, FFB_DAC_CFG_UCTRL); if (((FFB_READ(sc, FFB_DAC, FFB_DAC_VALUE) & FFB_DAC_UCTRL_MANREV) >> 8) <= 2) sc->sc_flags |= CREATOR_CURINV; } } creator_blank_display(adp, V_DISPLAY_ON); creator_clear(adp); /* * Setting V_ADP_MODECHANGE serves as hack so creator_set_mode() * (which will invalidate our caches and restore our settings) is * called when the X server shuts down. Otherwise screen corruption * happens most of the time. */ adp->va_flags |= V_ADP_COLOR | V_ADP_MODECHANGE | V_ADP_BORDER | V_ADP_INITIALIZED; if (vid_register(adp) < 0) return (ENXIO); adp->va_flags |= V_ADP_REGISTERED; return (0); } static int creator_get_info(video_adapter_t *adp, int mode, video_info_t *info) { bcopy(&adp->va_info, info, sizeof(*info)); return (0); } static int creator_query_mode(video_adapter_t *adp, video_info_t *info) { return (ENODEV); } static int creator_set_mode(video_adapter_t *adp, int mode) { struct creator_softc *sc; sc = (struct creator_softc *)adp; sc->sc_bg_cache = -1; sc->sc_fg_cache = -1; sc->sc_fontinc_cache = -1; sc->sc_fontw_cache = -1; sc->sc_pmask_cache = -1; creator_ras_wait(sc); sc->sc_fifo_cache = 0; creator_ras_fifo_wait(sc, 2); FFB_WRITE(sc, FFB_FBC, FFB_FBC_PPC, FBC_PPC_VCE_DIS | FBC_PPC_TBE_OPAQUE | FBC_PPC_APE_DIS | FBC_PPC_CS_CONST); FFB_WRITE(sc, FFB_FBC, FFB_FBC_FBC, FFB_FBC_WB_A | FFB_FBC_RB_A | FFB_FBC_SB_BOTH | FFB_FBC_XE_OFF | FFB_FBC_RGBE_MASK); return (0); } static int creator_save_font(video_adapter_t *adp, int page, int size, int width, u_char *data, int c, int count) { return (ENODEV); } static int creator_load_font(video_adapter_t *adp, int page, int size, int width, u_char *data, int c, int count) { return (ENODEV); } static int creator_show_font(video_adapter_t *adp, int page) { return (ENODEV); } static int creator_save_palette(video_adapter_t *adp, u_char *palette) { return (ENODEV); } static int creator_load_palette(video_adapter_t *adp, u_char *palette) { return (ENODEV); } static int creator_set_border(video_adapter_t *adp, int border) { struct creator_softc *sc; sc = (struct creator_softc *)adp; creator_fill_rect(adp, border, 0, 0, sc->sc_width, sc->sc_ymargin); creator_fill_rect(adp, border, 0, sc->sc_height - sc->sc_ymargin, sc->sc_width, sc->sc_ymargin); creator_fill_rect(adp, border, 0, 0, sc->sc_xmargin, sc->sc_height); creator_fill_rect(adp, border, sc->sc_width - sc->sc_xmargin, 0, sc->sc_xmargin, sc->sc_height); return (0); } static int creator_save_state(video_adapter_t *adp, void *p, size_t size) { return (ENODEV); } static int creator_load_state(video_adapter_t *adp, void *p) { return (ENODEV); } static int creator_set_win_org(video_adapter_t *adp, off_t offset) { return (ENODEV); } static int creator_read_hw_cursor(video_adapter_t *adp, int *col, int *row) { *col = 0; *row = 0; return (0); } static int creator_set_hw_cursor(video_adapter_t *adp, int col, int row) { return (ENODEV); } static int creator_set_hw_cursor_shape(video_adapter_t *adp, int base, int height, int celsize, int blink) { return (ENODEV); } static int creator_blank_display(video_adapter_t *adp, int mode) { struct creator_softc *sc; uint32_t v; int i; sc = (struct creator_softc *)adp; FFB_WRITE(sc, FFB_DAC, FFB_DAC_TYPE, FFB_DAC_CFG_TGEN); v = FFB_READ(sc, FFB_DAC, FFB_DAC_VALUE); switch (mode) { case V_DISPLAY_ON: v |= FFB_DAC_CFG_TGEN_VIDE; break; case V_DISPLAY_BLANK: case V_DISPLAY_STAND_BY: case V_DISPLAY_SUSPEND: v &= ~FFB_DAC_CFG_TGEN_VIDE; break; } FFB_WRITE(sc, FFB_DAC, FFB_DAC_TYPE, FFB_DAC_CFG_TGEN); FFB_WRITE(sc, FFB_DAC, FFB_DAC_VALUE, v); for (i = 0; i < 10; i++) { FFB_WRITE(sc, FFB_DAC, FFB_DAC_TYPE, FFB_DAC_CFG_TGEN); (void)FFB_READ(sc, FFB_DAC, FFB_DAC_VALUE); } return (0); } static int creator_mmap(video_adapter_t *adp, vm_ooffset_t offset, vm_paddr_t *paddr, int prot, vm_memattr_t *memattr) { return (EINVAL); } static int creator_ioctl(video_adapter_t *adp, u_long cmd, caddr_t data) { struct creator_softc *sc; struct fbcursor *fbc; struct fbtype *fb; sc = (struct creator_softc *)adp; switch (cmd) { case FBIOGTYPE: fb = (struct fbtype *)data; fb->fb_type = FBTYPE_CREATOR; fb->fb_height = sc->sc_height; fb->fb_width = sc->sc_width; fb->fb_depth = fb->fb_cmsize = fb->fb_size = 0; break; case FBIOSCURSOR: fbc = (struct fbcursor *)data; if (fbc->set & FB_CUR_SETCUR && fbc->enable == 0) { creator_cursor_enable(sc, 0); sc->sc_flags &= ~CREATOR_CUREN; } else return (ENODEV); break; break; default: return (fb_commonioctl(adp, cmd, data)); } return (0); } static int creator_clear(video_adapter_t *adp) { struct creator_softc *sc; sc = (struct creator_softc *)adp; creator_fill_rect(adp, (SC_NORM_ATTR >> 4) & 0xf, 0, 0, sc->sc_width, sc->sc_height); return (0); } static int creator_fill_rect(video_adapter_t *adp, int val, int x, int y, int cx, int cy) { struct creator_softc *sc; sc = (struct creator_softc *)adp; creator_ras_setpmask(sc, 0xffffffff); creator_ras_fifo_wait(sc, 2); FFB_WRITE(sc, FFB_FBC, FFB_FBC_ROP, FBC_ROP_NEW); FFB_WRITE(sc, FFB_FBC, FFB_FBC_DRAWOP, FBC_DRAWOP_RECTANGLE); creator_ras_setfg(sc, creator_cmap[val & 0xf]); /* * Note that at least the Elite3D cards are sensitive to the order * of operations here. */ creator_ras_fifo_wait(sc, 4); FFB_WRITE(sc, FFB_FBC, FFB_FBC_BY, y); FFB_WRITE(sc, FFB_FBC, FFB_FBC_BX, x); FFB_WRITE(sc, FFB_FBC, FFB_FBC_BH, cy); FFB_WRITE(sc, FFB_FBC, FFB_FBC_BW, cx); creator_ras_wait(sc); return (0); } static int creator_bitblt(video_adapter_t *adp, ...) { return (ENODEV); } static int creator_diag(video_adapter_t *adp, int level) { video_info_t info; fb_dump_adp_info(adp->va_name, adp, level); creator_get_info(adp, 0, &info); fb_dump_mode_info(adp->va_name, adp, &info, level); return (0); } static int creator_save_cursor_palette(video_adapter_t *adp, u_char *palette) { return (ENODEV); } static int creator_load_cursor_palette(video_adapter_t *adp, u_char *palette) { return (ENODEV); } static int creator_copy(video_adapter_t *adp, vm_offset_t src, vm_offset_t dst, int n) { return (ENODEV); } static int creator_putp(video_adapter_t *adp, vm_offset_t off, u_int32_t p, u_int32_t a, int size, int bpp, int bit_ltor, int byte_ltor) { return (ENODEV); } static int creator_putc(video_adapter_t *adp, vm_offset_t off, u_int8_t c, u_int8_t a) { struct creator_softc *sc; const uint16_t *p; int row; int col; int i; sc = (struct creator_softc *)adp; row = (off / adp->va_info.vi_width) * adp->va_info.vi_cheight; col = (off % adp->va_info.vi_width) * adp->va_info.vi_cwidth; p = (const uint16_t *)sc->sc_font + (c * adp->va_info.vi_cheight); creator_ras_setfg(sc, creator_cmap[a & 0xf]); creator_ras_setbg(sc, creator_cmap[(a >> 4) & 0xf]); creator_ras_fifo_wait(sc, 1 + adp->va_info.vi_cheight); FFB_WRITE(sc, FFB_FBC, FFB_FBC_FONTXY, ((row + sc->sc_ymargin) << 16) | (col + sc->sc_xmargin)); creator_ras_setfontw(sc, adp->va_info.vi_cwidth); creator_ras_setfontinc(sc, 0x10000); for (i = 0; i < adp->va_info.vi_cheight; i++) { FFB_WRITE(sc, FFB_FBC, FFB_FBC_FONT, *p++ << 16); } return (0); } static int creator_puts(video_adapter_t *adp, vm_offset_t off, u_int16_t *s, int len) { int i; for (i = 0; i < len; i++) { vidd_putc(adp, off + i, s[i] & 0xff, (s[i] & 0xff00) >> 8); } return (0); } static int creator_putm(video_adapter_t *adp, int x, int y, u_int8_t *pixel_image, u_int32_t pixel_mask, int size, int width) { struct creator_softc *sc; sc = (struct creator_softc *)adp; if (!(sc->sc_flags & CREATOR_CUREN)) { creator_cursor_install(sc); creator_cursor_enable(sc, 1); sc->sc_flags |= CREATOR_CUREN; } FFB_WRITE(sc, FFB_DAC, FFB_DAC_TYPE2, FFB_DAC_CUR_POS); FFB_WRITE(sc, FFB_DAC, FFB_DAC_VALUE2, ((y + sc->sc_ymargin) << 16) | (x + sc->sc_xmargin)); return (0); } /* * bus interface */ static int creator_bus_probe(device_t dev) { const char *name; phandle_t node; int type; name = ofw_bus_get_name(dev); node = ofw_bus_get_node(dev); if (strcmp(name, "SUNW,ffb") == 0) { if (OF_getprop(node, "board_type", &type, sizeof(type)) == -1) return (ENXIO); switch (type & 7) { case 0x0: device_set_desc(dev, "Creator"); break; case 0x3: device_set_desc(dev, "Creator3D"); break; default: return (ENXIO); } } else if (strcmp(name, "SUNW,afb") == 0) device_set_desc(dev, "Elite3D"); else return (ENXIO); return (BUS_PROBE_DEFAULT); } static int creator_bus_attach(device_t dev) { struct creator_softc *sc; video_adapter_t *adp; video_switch_t *sw; phandle_t node; int error; int rid; int unit; int i; node = ofw_bus_get_node(dev); if ((sc = (struct creator_softc *)vid_get_adapter(vid_find_adapter( CREATOR_DRIVER_NAME, 0))) != NULL && sc->sc_node == node) { device_printf(dev, "console\n"); device_set_softc(dev, sc); } else { sc = device_get_softc(dev); sc->sc_node = node; } adp = &sc->sc_va; /* * Allocate resources regardless of whether we are the console * and already obtained the bus tags and handles for the FFB_DAC * and FFB_FBC register banks in creator_configure() or not so * the resources are marked as taken in the respective RMAN. * The supported cards use either 15 (Creator, Elite3D?) or 24 * (Creator3D?) register banks. We make sure that we can also * allocate the resources for at least the FFB_DAC and FFB_FBC * banks here. We try but don't actually care whether we can * allocate more than these two resources and just limit the * range accessible via creator_fb_mmap() accordingly. */ for (i = 0; i < FFB_NREG; i++) { rid = i; sc->sc_reg[i] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->sc_reg[i] == NULL) { if (i <= FFB_FBC) { device_printf(dev, "cannot allocate resources\n"); error = ENXIO; goto fail; } break; } sc->sc_bt[i] = rman_get_bustag(sc->sc_reg[i]); sc->sc_bh[i] = rman_get_bushandle(sc->sc_reg[i]); } /* * The XFree86/X.Org sunffb(4) expects to be able to access the * memory spanned by the first and the last resource as one chunk * via creator_fb_mmap(), using offsets from the first resource, * even though the backing resources are actually non-continuous. * So make sure that the memory we provide is at least backed by * increasing resources. */ for (i = 1; i < FFB_NREG && sc->sc_reg[i] != NULL && rman_get_start(sc->sc_reg[i]) > rman_get_start(sc->sc_reg[i - 1]); i++) ; sc->sc_reg_size = rman_get_end(sc->sc_reg[i - 1]) - rman_get_start(sc->sc_reg[0]) + 1; if (!(sc->sc_flags & CREATOR_CONSOLE)) { if ((sw = vid_get_switch(CREATOR_DRIVER_NAME)) == NULL) { device_printf(dev, "cannot get video switch\n"); error = ENODEV; goto fail; } /* * During device configuration we don't necessarily probe * the adapter which is the console first so we can't use * the device unit number for the video adapter unit. The * worst case would be that we use the video adapter unit * 0 twice. As it doesn't really matter which unit number * the corresponding video adapter has just use the next * unused one. */ for (i = 0; i < devclass_get_maxunit(creator_devclass); i++) if (vid_find_adapter(CREATOR_DRIVER_NAME, i) < 0) break; if (strcmp(ofw_bus_get_name(dev), "SUNW,afb") == 0) sc->sc_flags |= CREATOR_AFB; if ((error = sw->init(i, adp, 0)) != 0) { device_printf(dev, "cannot initialize adapter\n"); goto fail; } } if (bootverbose) { if (sc->sc_flags & CREATOR_PAC1) device_printf(dev, "BT9068/PAC1 RAMDAC (%s cursor control)\n", sc->sc_flags & CREATOR_CURINV ? "inverted" : "normal"); else device_printf(dev, "BT498/PAC2 RAMDAC\n"); } device_printf(dev, "resolution %dx%d\n", sc->sc_width, sc->sc_height); unit = device_get_unit(dev); sc->sc_si = make_dev(&creator_fb_devsw, unit, UID_ROOT, GID_WHEEL, 0600, "fb%d", unit); sc->sc_si->si_drv1 = sc; EVENTHANDLER_REGISTER(shutdown_final, creator_shutdown, sc, SHUTDOWN_PRI_DEFAULT); return (0); fail: for (i = 0; i < FFB_NREG && sc->sc_reg[i] != NULL; i++) bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->sc_reg[i]), sc->sc_reg[i]); return (error); } /* * /dev/fb interface */ static int creator_fb_open(struct cdev *dev, int flags, int mode, struct thread *td) { return (0); } static int creator_fb_close(struct cdev *dev, int flags, int mode, struct thread *td) { return (0); } static int creator_fb_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flags, struct thread *td) { struct creator_softc *sc; sc = dev->si_drv1; return (creator_ioctl(&sc->sc_va, cmd, data)); } static int creator_fb_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, int prot, vm_memattr_t *memattr) { struct creator_softc *sc; int i; /* * NB: This is a special implementation based on the /dev/fb * requirements of the XFree86/X.Org sunffb(4). */ sc = dev->si_drv1; for (i = 0; i < CREATOR_FB_MAP_SIZE; i++) { if (offset >= creator_fb_map[i].virt && offset < creator_fb_map[i].virt + creator_fb_map[i].size) { offset += creator_fb_map[i].phys - creator_fb_map[i].virt; if (offset >= sc->sc_reg_size) return (EINVAL); *paddr = sc->sc_bh[0] + offset; return (0); } } return (EINVAL); } /* * internal functions */ static void creator_cursor_enable(struct creator_softc *sc, int onoff) { int v; FFB_WRITE(sc, FFB_DAC, FFB_DAC_TYPE2, FFB_DAC_CUR_CTRL); if (sc->sc_flags & CREATOR_CURINV) v = onoff ? FFB_DAC_CUR_CTRL_P0 | FFB_DAC_CUR_CTRL_P1 : 0; else v = onoff ? 0 : FFB_DAC_CUR_CTRL_P0 | FFB_DAC_CUR_CTRL_P1; FFB_WRITE(sc, FFB_DAC, FFB_DAC_VALUE2, v); } static void creator_cursor_install(struct creator_softc *sc) { int i, j; creator_cursor_enable(sc, 0); FFB_WRITE(sc, FFB_DAC, FFB_DAC_TYPE2, FFB_DAC_CUR_COLOR1); FFB_WRITE(sc, FFB_DAC, FFB_DAC_VALUE2, 0xffffff); FFB_WRITE(sc, FFB_DAC, FFB_DAC_VALUE2, 0x0); for (i = 0; i < 2; i++) { FFB_WRITE(sc, FFB_DAC, FFB_DAC_TYPE2, i ? FFB_DAC_CUR_BITMAP_P0 : FFB_DAC_CUR_BITMAP_P1); for (j = 0; j < 64; j++) { FFB_WRITE(sc, FFB_DAC, FFB_DAC_VALUE2, *(const uint32_t *)(&creator_mouse_pointer[j][0])); FFB_WRITE(sc, FFB_DAC, FFB_DAC_VALUE2, *(const uint32_t *)(&creator_mouse_pointer[j][4])); } } } static void creator_shutdown(void *xsc) { struct creator_softc *sc = xsc; creator_cursor_enable(sc, 0); /* * In case this is the console set the cursor of the stdout * instance to the start of the last line so OFW output ends * up beneath what FreeBSD left on the screen. */ if (sc->sc_flags & CREATOR_CONSOLE) { OF_interpret("stdout @ is my-self 0 to column#", 0); OF_interpret("stdout @ is my-self #lines 1 - to line#", 0); } } Index: user/ngie/bug-237403/sys/dev/fb/fbd.c =================================================================== --- user/ngie/bug-237403/sys/dev/fb/fbd.c (revision 348028) +++ user/ngie/bug-237403/sys/dev/fb/fbd.c (revision 348029) @@ -1,371 +1,372 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2013 The FreeBSD Foundation * All rights reserved. * * This software was developed by Aleksandr Rybalko under sponsorship from the * FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* Generic framebuffer */ /* TODO unlink from VT(9) */ /* TODO done normal /dev/fb methods */ #include __FBSDID("$FreeBSD$"); #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include "fb_if.h" LIST_HEAD(fb_list_head_t, fb_list_entry) fb_list_head = LIST_HEAD_INITIALIZER(fb_list_head); struct fb_list_entry { struct fb_info *fb_info; struct cdev *fb_si; LIST_ENTRY(fb_list_entry) fb_list; }; struct fbd_softc { device_t sc_dev; struct fb_info *sc_info; }; static void fbd_evh_init(void *); /* SI_ORDER_SECOND, just after EVENTHANDLERs initialized. */ SYSINIT(fbd_evh_init, SI_SUB_CONFIGURE, SI_ORDER_SECOND, fbd_evh_init, NULL); static d_open_t fb_open; static d_close_t fb_close; static d_read_t fb_read; static d_write_t fb_write; static d_ioctl_t fb_ioctl; static d_mmap_t fb_mmap; static struct cdevsw fb_cdevsw = { .d_version = D_VERSION, .d_flags = D_NEEDGIANT, .d_open = fb_open, .d_close = fb_close, .d_read = fb_read, .d_write = fb_write, .d_ioctl = fb_ioctl, .d_mmap = fb_mmap, .d_name = "fb", }; static int framebuffer_dev_unit = 0; static int fb_open(struct cdev *dev, int oflags, int devtype, struct thread *td) { return (0); } static int fb_close(struct cdev *dev, int fflag, int devtype, struct thread *td) { return (0); } static int fb_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { struct fb_info *info; int error; error = 0; info = dev->si_drv1; switch (cmd) { case FBIOGTYPE: bcopy(info, (struct fbtype *)data, sizeof(struct fbtype)); break; case FBIO_GETWINORG: /* get frame buffer window origin */ *(u_int *)data = 0; break; case FBIO_GETDISPSTART: /* get display start address */ ((video_display_start_t *)data)->x = 0; ((video_display_start_t *)data)->y = 0; break; case FBIO_GETLINEWIDTH: /* get scan line width in bytes */ *(u_int *)data = info->fb_stride; break; case FBIO_BLANK: /* blank display */ if (info->setblankmode != NULL) error = info->setblankmode(info->fb_priv, *(int *)data); break; default: error = ENOIOCTL; break; } return (error); } static int fb_read(struct cdev *dev, struct uio *uio, int ioflag) { return (0); /* XXX nothing to read, yet */ } static int fb_write(struct cdev *dev, struct uio *uio, int ioflag) { return (0); /* XXX nothing written */ } static int fb_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, int nprot, vm_memattr_t *memattr) { struct fb_info *info; info = dev->si_drv1; if (info->fb_flags & FB_FLAG_NOMMAP) return (ENODEV); if (offset >= 0 && offset < info->fb_size) { if (info->fb_pbase == 0) *paddr = vtophys((uint8_t *)info->fb_vbase + offset); else *paddr = info->fb_pbase + offset; if (info->fb_flags & FB_FLAG_MEMATTR) *memattr = info->fb_memattr; return (0); } return (EINVAL); } static int fb_init(struct fb_list_entry *entry, int unit) { struct fb_info *info; info = entry->fb_info; entry->fb_si = make_dev(&fb_cdevsw, unit, UID_ROOT, GID_WHEEL, 0600, "fb%d", unit); entry->fb_si->si_drv1 = info; info->fb_cdev = entry->fb_si; return (0); } int fbd_list() { struct fb_list_entry *entry; if (LIST_EMPTY(&fb_list_head)) return (ENOENT); LIST_FOREACH(entry, &fb_list_head, fb_list) { printf("FB %s @%p\n", entry->fb_info->fb_name, (void *)entry->fb_info->fb_pbase); } return (0); } static struct fb_list_entry * fbd_find(struct fb_info* info) { struct fb_list_entry *entry, *tmp; LIST_FOREACH_SAFE(entry, &fb_list_head, fb_list, tmp) { if (entry->fb_info == info) { return (entry); } } return (NULL); } int fbd_register(struct fb_info* info) { struct fb_list_entry *entry; int err, first; first = 0; if (LIST_EMPTY(&fb_list_head)) first++; entry = fbd_find(info); if (entry != NULL) { /* XXX Update framebuffer params */ return (0); } entry = malloc(sizeof(struct fb_list_entry), M_DEVBUF, M_WAITOK|M_ZERO); entry->fb_info = info; LIST_INSERT_HEAD(&fb_list_head, entry, fb_list); err = fb_init(entry, framebuffer_dev_unit++); if (err) return (err); if (first) { err = vt_fb_attach(info); if (err) return (err); } return (0); } int fbd_unregister(struct fb_info* info) { struct fb_list_entry *entry, *tmp; LIST_FOREACH_SAFE(entry, &fb_list_head, fb_list, tmp) { if (entry->fb_info == info) { LIST_REMOVE(entry, fb_list); if (LIST_EMPTY(&fb_list_head)) vt_fb_detach(info); free(entry, M_DEVBUF); return (0); } } return (ENOENT); } static void register_fb_wrap(void *arg, void *ptr) { fbd_register((struct fb_info *)ptr); } static void unregister_fb_wrap(void *arg, void *ptr) { fbd_unregister((struct fb_info *)ptr); } static void fbd_evh_init(void *ctx) { EVENTHANDLER_REGISTER(register_framebuffer, register_fb_wrap, NULL, EVENTHANDLER_PRI_ANY); EVENTHANDLER_REGISTER(unregister_framebuffer, unregister_fb_wrap, NULL, EVENTHANDLER_PRI_ANY); } /* Newbus methods. */ static int fbd_probe(device_t dev) { return (BUS_PROBE_NOWILDCARD); } static int fbd_attach(device_t dev) { struct fbd_softc *sc; int err; sc = device_get_softc(dev); sc->sc_dev = dev; sc->sc_info = FB_GETINFO(device_get_parent(dev)); if (sc->sc_info == NULL) return (ENXIO); err = fbd_register(sc->sc_info); return (err); } static int fbd_detach(device_t dev) { struct fbd_softc *sc; int err; sc = device_get_softc(dev); err = fbd_unregister(sc->sc_info); return (err); } static device_method_t fbd_methods[] = { /* Device interface */ DEVMETHOD(device_probe, fbd_probe), DEVMETHOD(device_attach, fbd_attach), DEVMETHOD(device_detach, fbd_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), { 0, 0 } }; driver_t fbd_driver = { "fbd", fbd_methods, sizeof(struct fbd_softc) }; devclass_t fbd_devclass; DRIVER_MODULE(fbd, fb, fbd_driver, fbd_devclass, 0, 0); DRIVER_MODULE(fbd, drmn, fbd_driver, fbd_devclass, 0, 0); DRIVER_MODULE(fbd, udl, fbd_driver, fbd_devclass, 0, 0); MODULE_VERSION(fbd, 1); Index: user/ngie/bug-237403/sys/dev/hdmi/dwc_hdmi.c =================================================================== --- user/ngie/bug-237403/sys/dev/hdmi/dwc_hdmi.c (revision 348028) +++ user/ngie/bug-237403/sys/dev/hdmi/dwc_hdmi.c (revision 348029) @@ -1,850 +1,851 @@ /*- * Copyright (c) 2015 Oleksandr Tymoshenko * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * HDMI core module */ #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include "hdmi_if.h" #define I2C_DDC_ADDR (0x50 << 1) #define I2C_DDC_SEGADDR (0x30 << 1) #define EDID_LENGTH 0x80 #define EXT_TAG 0x00 #define CEA_TAG_ID 0x02 #define CEA_DTD 0x03 #define DTD_BASIC_AUDIO (1 << 6) #define CEA_REV 0x02 #define CEA_DATA_OFF 0x03 #define CEA_DATA_START 4 #define BLOCK_TAG(x) (((x) >> 5) & 0x7) #define BLOCK_TAG_VSDB 3 #define BLOCK_LEN(x) ((x) & 0x1f) #define HDMI_VSDB_MINLEN 5 #define HDMI_OUI "\x03\x0c\x00" #define HDMI_OUI_LEN 3 static void dwc_hdmi_phy_wait_i2c_done(struct dwc_hdmi_softc *sc, int msec) { uint8_t val; val = RD1(sc, HDMI_IH_I2CMPHY_STAT0) & (HDMI_IH_I2CMPHY_STAT0_DONE | HDMI_IH_I2CMPHY_STAT0_ERROR); while (val == 0) { pause("HDMI_PHY", hz/100); msec -= 10; if (msec <= 0) return; val = RD1(sc, HDMI_IH_I2CMPHY_STAT0) & (HDMI_IH_I2CMPHY_STAT0_DONE | HDMI_IH_I2CMPHY_STAT0_ERROR); } } static void dwc_hdmi_phy_i2c_write(struct dwc_hdmi_softc *sc, unsigned short data, unsigned char addr) { /* clear DONE and ERROR flags */ WR1(sc, HDMI_IH_I2CMPHY_STAT0, HDMI_IH_I2CMPHY_STAT0_DONE | HDMI_IH_I2CMPHY_STAT0_ERROR); WR1(sc, HDMI_PHY_I2CM_ADDRESS_ADDR, addr); WR1(sc, HDMI_PHY_I2CM_DATAO_1_ADDR, ((data >> 8) & 0xff)); WR1(sc, HDMI_PHY_I2CM_DATAO_0_ADDR, ((data >> 0) & 0xff)); WR1(sc, HDMI_PHY_I2CM_OPERATION_ADDR, HDMI_PHY_I2CM_OPERATION_ADDR_WRITE); dwc_hdmi_phy_wait_i2c_done(sc, 1000); } static void dwc_hdmi_disable_overflow_interrupts(struct dwc_hdmi_softc *sc) { WR1(sc, HDMI_IH_MUTE_FC_STAT2, HDMI_IH_MUTE_FC_STAT2_OVERFLOW_MASK); WR1(sc, HDMI_FC_MASK2, HDMI_FC_MASK2_LOW_PRI | HDMI_FC_MASK2_HIGH_PRI); } static void dwc_hdmi_av_composer(struct dwc_hdmi_softc *sc) { uint8_t inv_val; int is_dvi; int hblank, vblank, hsync_len, hfp, vfp; /* Set up HDMI_FC_INVIDCONF */ inv_val = ((sc->sc_mode.flags & VID_PVSYNC) ? HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_HIGH : HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_LOW); inv_val |= ((sc->sc_mode.flags & VID_PHSYNC) ? HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_HIGH : HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_LOW); inv_val |= HDMI_FC_INVIDCONF_DE_IN_POLARITY_ACTIVE_HIGH; inv_val |= ((sc->sc_mode.flags & VID_INTERLACE) ? HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_HIGH : HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_LOW); inv_val |= ((sc->sc_mode.flags & VID_INTERLACE) ? HDMI_FC_INVIDCONF_IN_I_P_INTERLACED : HDMI_FC_INVIDCONF_IN_I_P_PROGRESSIVE); /* TODO: implement HDMI part */ is_dvi = sc->sc_has_audio == 0; inv_val |= (is_dvi ? HDMI_FC_INVIDCONF_DVI_MODEZ_DVI_MODE : HDMI_FC_INVIDCONF_DVI_MODEZ_HDMI_MODE); WR1(sc, HDMI_FC_INVIDCONF, inv_val); /* Set up horizontal active pixel region width */ WR1(sc, HDMI_FC_INHACTV1, sc->sc_mode.hdisplay >> 8); WR1(sc, HDMI_FC_INHACTV0, sc->sc_mode.hdisplay); /* Set up vertical blanking pixel region width */ WR1(sc, HDMI_FC_INVACTV1, sc->sc_mode.vdisplay >> 8); WR1(sc, HDMI_FC_INVACTV0, sc->sc_mode.vdisplay); /* Set up horizontal blanking pixel region width */ hblank = sc->sc_mode.htotal - sc->sc_mode.hdisplay; WR1(sc, HDMI_FC_INHBLANK1, hblank >> 8); WR1(sc, HDMI_FC_INHBLANK0, hblank); /* Set up vertical blanking pixel region width */ vblank = sc->sc_mode.vtotal - sc->sc_mode.vdisplay; WR1(sc, HDMI_FC_INVBLANK, vblank); /* Set up HSYNC active edge delay width (in pixel clks) */ hfp = sc->sc_mode.hsync_start - sc->sc_mode.hdisplay; WR1(sc, HDMI_FC_HSYNCINDELAY1, hfp >> 8); WR1(sc, HDMI_FC_HSYNCINDELAY0, hfp); /* Set up VSYNC active edge delay (in pixel clks) */ vfp = sc->sc_mode.vsync_start - sc->sc_mode.vdisplay; WR1(sc, HDMI_FC_VSYNCINDELAY, vfp); hsync_len = (sc->sc_mode.hsync_end - sc->sc_mode.hsync_start); /* Set up HSYNC active pulse width (in pixel clks) */ WR1(sc, HDMI_FC_HSYNCINWIDTH1, hsync_len >> 8); WR1(sc, HDMI_FC_HSYNCINWIDTH0, hsync_len); /* Set up VSYNC active edge delay (in pixel clks) */ WR1(sc, HDMI_FC_VSYNCINWIDTH, (sc->sc_mode.vsync_end - sc->sc_mode.vsync_start)); } static void dwc_hdmi_phy_enable_power(struct dwc_hdmi_softc *sc, uint8_t enable) { uint8_t reg; reg = RD1(sc, HDMI_PHY_CONF0); reg &= ~HDMI_PHY_CONF0_PDZ_MASK; reg |= (enable << HDMI_PHY_CONF0_PDZ_OFFSET); WR1(sc, HDMI_PHY_CONF0, reg); } static void dwc_hdmi_phy_enable_tmds(struct dwc_hdmi_softc *sc, uint8_t enable) { uint8_t reg; reg = RD1(sc, HDMI_PHY_CONF0); reg &= ~HDMI_PHY_CONF0_ENTMDS_MASK; reg |= (enable << HDMI_PHY_CONF0_ENTMDS_OFFSET); WR1(sc, HDMI_PHY_CONF0, reg); } static void dwc_hdmi_phy_gen2_pddq(struct dwc_hdmi_softc *sc, uint8_t enable) { uint8_t reg; reg = RD1(sc, HDMI_PHY_CONF0); reg &= ~HDMI_PHY_CONF0_GEN2_PDDQ_MASK; reg |= (enable << HDMI_PHY_CONF0_GEN2_PDDQ_OFFSET); WR1(sc, HDMI_PHY_CONF0, reg); } static void dwc_hdmi_phy_gen2_txpwron(struct dwc_hdmi_softc *sc, uint8_t enable) { uint8_t reg; reg = RD1(sc, HDMI_PHY_CONF0); reg &= ~HDMI_PHY_CONF0_GEN2_TXPWRON_MASK; reg |= (enable << HDMI_PHY_CONF0_GEN2_TXPWRON_OFFSET); WR1(sc, HDMI_PHY_CONF0, reg); } static void dwc_hdmi_phy_sel_data_en_pol(struct dwc_hdmi_softc *sc, uint8_t enable) { uint8_t reg; reg = RD1(sc, HDMI_PHY_CONF0); reg &= ~HDMI_PHY_CONF0_SELDATAENPOL_MASK; reg |= (enable << HDMI_PHY_CONF0_SELDATAENPOL_OFFSET); WR1(sc, HDMI_PHY_CONF0, reg); } static void dwc_hdmi_phy_sel_interface_control(struct dwc_hdmi_softc *sc, uint8_t enable) { uint8_t reg; reg = RD1(sc, HDMI_PHY_CONF0); reg &= ~HDMI_PHY_CONF0_SELDIPIF_MASK; reg |= (enable << HDMI_PHY_CONF0_SELDIPIF_OFFSET); WR1(sc, HDMI_PHY_CONF0, reg); } static inline void dwc_hdmi_phy_test_clear(struct dwc_hdmi_softc *sc, unsigned char bit) { uint8_t val; val = RD1(sc, HDMI_PHY_TST0); val &= ~HDMI_PHY_TST0_TSTCLR_MASK; val |= (bit << HDMI_PHY_TST0_TSTCLR_OFFSET) & HDMI_PHY_TST0_TSTCLR_MASK; WR1(sc, HDMI_PHY_TST0, val); } static void dwc_hdmi_clear_overflow(struct dwc_hdmi_softc *sc) { int count; uint8_t val; /* TMDS software reset */ WR1(sc, HDMI_MC_SWRSTZ, (uint8_t)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ); val = RD1(sc, HDMI_FC_INVIDCONF); for (count = 0 ; count < 4 ; count++) WR1(sc, HDMI_FC_INVIDCONF, val); } static int dwc_hdmi_phy_configure(struct dwc_hdmi_softc *sc) { uint8_t val; uint8_t msec; WR1(sc, HDMI_MC_FLOWCTRL, HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_BYPASS); /* gen2 tx power off */ dwc_hdmi_phy_gen2_txpwron(sc, 0); /* gen2 pddq */ dwc_hdmi_phy_gen2_pddq(sc, 1); /* PHY reset */ WR1(sc, HDMI_MC_PHYRSTZ, HDMI_MC_PHYRSTZ_DEASSERT); WR1(sc, HDMI_MC_PHYRSTZ, HDMI_MC_PHYRSTZ_ASSERT); WR1(sc, HDMI_MC_HEACPHY_RST, HDMI_MC_HEACPHY_RST_ASSERT); dwc_hdmi_phy_test_clear(sc, 1); WR1(sc, HDMI_PHY_I2CM_SLAVE_ADDR, HDMI_PHY_I2CM_SLAVE_ADDR_PHY_GEN2); dwc_hdmi_phy_test_clear(sc, 0); /* * Following initialization are for 8bit per color case */ /* * PLL/MPLL config, see section 24.7.22 in TRM * config, see section 24.7.22 */ if (sc->sc_mode.dot_clock*1000 <= 45250000) { dwc_hdmi_phy_i2c_write(sc, CPCE_CTRL_45_25, HDMI_PHY_I2C_CPCE_CTRL); dwc_hdmi_phy_i2c_write(sc, GMPCTRL_45_25, HDMI_PHY_I2C_GMPCTRL); } else if (sc->sc_mode.dot_clock*1000 <= 92500000) { dwc_hdmi_phy_i2c_write(sc, CPCE_CTRL_92_50, HDMI_PHY_I2C_CPCE_CTRL); dwc_hdmi_phy_i2c_write(sc, GMPCTRL_92_50, HDMI_PHY_I2C_GMPCTRL); } else if (sc->sc_mode.dot_clock*1000 <= 185000000) { dwc_hdmi_phy_i2c_write(sc, CPCE_CTRL_185, HDMI_PHY_I2C_CPCE_CTRL); dwc_hdmi_phy_i2c_write(sc, GMPCTRL_185, HDMI_PHY_I2C_GMPCTRL); } else { dwc_hdmi_phy_i2c_write(sc, CPCE_CTRL_370, HDMI_PHY_I2C_CPCE_CTRL); dwc_hdmi_phy_i2c_write(sc, GMPCTRL_370, HDMI_PHY_I2C_GMPCTRL); } /* * Values described in TRM section 34.9.2 PLL/MPLL Generic * Configuration Settings. Table 34-23. */ if (sc->sc_mode.dot_clock*1000 <= 54000000) { dwc_hdmi_phy_i2c_write(sc, 0x091c, HDMI_PHY_I2C_CURRCTRL); } else if (sc->sc_mode.dot_clock*1000 <= 58400000) { dwc_hdmi_phy_i2c_write(sc, 0x091c, HDMI_PHY_I2C_CURRCTRL); } else if (sc->sc_mode.dot_clock*1000 <= 72000000) { dwc_hdmi_phy_i2c_write(sc, 0x06dc, HDMI_PHY_I2C_CURRCTRL); } else if (sc->sc_mode.dot_clock*1000 <= 74250000) { dwc_hdmi_phy_i2c_write(sc, 0x06dc, HDMI_PHY_I2C_CURRCTRL); } else if (sc->sc_mode.dot_clock*1000 <= 118800000) { dwc_hdmi_phy_i2c_write(sc, 0x091c, HDMI_PHY_I2C_CURRCTRL); } else if (sc->sc_mode.dot_clock*1000 <= 216000000) { dwc_hdmi_phy_i2c_write(sc, 0x06dc, HDMI_PHY_I2C_CURRCTRL); } else { panic("Unsupported mode\n"); } dwc_hdmi_phy_i2c_write(sc, 0x0000, HDMI_PHY_I2C_PLLPHBYCTRL); dwc_hdmi_phy_i2c_write(sc, MSM_CTRL_FB_CLK, HDMI_PHY_I2C_MSM_CTRL); /* RESISTANCE TERM 133 Ohm */ dwc_hdmi_phy_i2c_write(sc, TXTERM_133, HDMI_PHY_I2C_TXTERM); /* REMOVE CLK TERM */ dwc_hdmi_phy_i2c_write(sc, CKCALCTRL_OVERRIDE, HDMI_PHY_I2C_CKCALCTRL); if (sc->sc_mode.dot_clock*1000 > 148500000) { dwc_hdmi_phy_i2c_write(sc,CKSYMTXCTRL_OVERRIDE | CKSYMTXCTRL_TX_SYMON | CKSYMTXCTRL_TX_TRBON | CKSYMTXCTRL_TX_CK_SYMON, HDMI_PHY_I2C_CKSYMTXCTRL); dwc_hdmi_phy_i2c_write(sc, VLEVCTRL_TX_LVL(9) | VLEVCTRL_CK_LVL(9), HDMI_PHY_I2C_VLEVCTRL); } else { dwc_hdmi_phy_i2c_write(sc,CKSYMTXCTRL_OVERRIDE | CKSYMTXCTRL_TX_SYMON | CKSYMTXCTRL_TX_TRAON | CKSYMTXCTRL_TX_CK_SYMON, HDMI_PHY_I2C_CKSYMTXCTRL); dwc_hdmi_phy_i2c_write(sc, VLEVCTRL_TX_LVL(13) | VLEVCTRL_CK_LVL(13), HDMI_PHY_I2C_VLEVCTRL); } dwc_hdmi_phy_enable_power(sc, 1); /* toggle TMDS enable */ dwc_hdmi_phy_enable_tmds(sc, 0); dwc_hdmi_phy_enable_tmds(sc, 1); /* gen2 tx power on */ dwc_hdmi_phy_gen2_txpwron(sc, 1); dwc_hdmi_phy_gen2_pddq(sc, 0); /*Wait for PHY PLL lock */ msec = 4; val = RD1(sc, HDMI_PHY_STAT0) & HDMI_PHY_TX_PHY_LOCK; while (val == 0) { DELAY(1000); if (msec-- == 0) { device_printf(sc->sc_dev, "PHY PLL not locked\n"); return (-1); } val = RD1(sc, HDMI_PHY_STAT0) & HDMI_PHY_TX_PHY_LOCK; } return true; } static void dwc_hdmi_phy_init(struct dwc_hdmi_softc *sc) { int i; /* HDMI Phy spec says to do the phy initialization sequence twice */ for (i = 0 ; i < 2 ; i++) { dwc_hdmi_phy_sel_data_en_pol(sc, 1); dwc_hdmi_phy_sel_interface_control(sc, 0); dwc_hdmi_phy_enable_tmds(sc, 0); dwc_hdmi_phy_enable_power(sc, 0); /* Enable CSC */ dwc_hdmi_phy_configure(sc); } } static void dwc_hdmi_enable_video_path(struct dwc_hdmi_softc *sc) { uint8_t clkdis; /* * Control period timing * Values are minimal according to HDMI spec 1.4a */ WR1(sc, HDMI_FC_CTRLDUR, 12); WR1(sc, HDMI_FC_EXCTRLDUR, 32); WR1(sc, HDMI_FC_EXCTRLSPAC, 1); /* * Bits to fill data lines not used to transmit preamble * for channels 0, 1, and 2 respectively */ WR1(sc, HDMI_FC_CH0PREAM, 0x0B); WR1(sc, HDMI_FC_CH1PREAM, 0x16); WR1(sc, HDMI_FC_CH2PREAM, 0x21); /* Save CEC clock */ clkdis = RD1(sc, HDMI_MC_CLKDIS) & HDMI_MC_CLKDIS_CECCLK_DISABLE; clkdis |= ~HDMI_MC_CLKDIS_CECCLK_DISABLE; /* Enable pixel clock and tmds data path */ clkdis &= ~HDMI_MC_CLKDIS_PIXELCLK_DISABLE; WR1(sc, HDMI_MC_CLKDIS, clkdis); clkdis &= ~HDMI_MC_CLKDIS_TMDSCLK_DISABLE; WR1(sc, HDMI_MC_CLKDIS, clkdis); } static void dwc_hdmi_configure_audio(struct dwc_hdmi_softc *sc) { unsigned int n; uint8_t val; if (sc->sc_has_audio == 0) return; /* The following values are for 48 kHz */ switch (sc->sc_mode.dot_clock) { case 25170: n = 6864; break; case 27020: n = 6144; break; case 74170: n = 11648; break; case 148350: n = 5824; break; default: n = 6144; break; } WR1(sc, HDMI_AUD_N1, (n >> 0) & 0xff); WR1(sc, HDMI_AUD_N2, (n >> 8) & 0xff); WR1(sc, HDMI_AUD_N3, (n >> 16) & 0xff); val = RD1(sc, HDMI_AUD_CTS3); val &= ~(HDMI_AUD_CTS3_N_SHIFT_MASK | HDMI_AUD_CTS3_CTS_MANUAL); WR1(sc, HDMI_AUD_CTS3, val); val = RD1(sc, HDMI_AUD_CONF0); val &= ~HDMI_AUD_CONF0_INTERFACE_MASK; val |= HDMI_AUD_CONF0_INTERFACE_IIS; val &= ~HDMI_AUD_CONF0_I2SINEN_MASK; val |= HDMI_AUD_CONF0_I2SINEN_CH2; WR1(sc, HDMI_AUD_CONF0, val); val = RD1(sc, HDMI_AUD_CONF1); val &= ~HDMI_AUD_CONF1_DATAMODE_MASK; val |= HDMI_AUD_CONF1_DATAMODE_IIS; val &= ~HDMI_AUD_CONF1_DATWIDTH_MASK; val |= HDMI_AUD_CONF1_DATWIDTH_16BIT; WR1(sc, HDMI_AUD_CONF1, val); WR1(sc, HDMI_AUD_INPUTCLKFS, HDMI_AUD_INPUTCLKFS_64); WR1(sc, HDMI_FC_AUDICONF0, 1 << 4); /* CC=1 */ WR1(sc, HDMI_FC_AUDICONF1, 0); WR1(sc, HDMI_FC_AUDICONF2, 0); /* CA=0 */ WR1(sc, HDMI_FC_AUDICONF3, 0); WR1(sc, HDMI_FC_AUDSV, 0xee); /* channels valid */ /* Enable audio clock */ val = RD1(sc, HDMI_MC_CLKDIS); val &= ~HDMI_MC_CLKDIS_AUDCLK_DISABLE; WR1(sc, HDMI_MC_CLKDIS, val); } static void dwc_hdmi_video_packetize(struct dwc_hdmi_softc *sc) { unsigned int color_depth = 0; unsigned int remap_size = HDMI_VP_REMAP_YCC422_16BIT; unsigned int output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_PP; uint8_t val; output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS; color_depth = 4; /* set the packetizer registers */ val = ((color_depth << HDMI_VP_PR_CD_COLOR_DEPTH_OFFSET) & HDMI_VP_PR_CD_COLOR_DEPTH_MASK); WR1(sc, HDMI_VP_PR_CD, val); val = RD1(sc, HDMI_VP_STUFF); val &= ~HDMI_VP_STUFF_PR_STUFFING_MASK; val |= HDMI_VP_STUFF_PR_STUFFING_STUFFING_MODE; WR1(sc, HDMI_VP_STUFF, val); val = RD1(sc, HDMI_VP_CONF); val &= ~(HDMI_VP_CONF_PR_EN_MASK | HDMI_VP_CONF_BYPASS_SELECT_MASK); val |= HDMI_VP_CONF_PR_EN_DISABLE | HDMI_VP_CONF_BYPASS_SELECT_VID_PACKETIZER; WR1(sc, HDMI_VP_CONF, val); val = RD1(sc, HDMI_VP_STUFF); val &= ~HDMI_VP_STUFF_IDEFAULT_PHASE_MASK; val |= 1 << HDMI_VP_STUFF_IDEFAULT_PHASE_OFFSET; WR1(sc, HDMI_VP_STUFF, val); WR1(sc, HDMI_VP_REMAP, remap_size); if (output_select == HDMI_VP_CONF_OUTPUT_SELECTOR_PP) { val = RD1(sc, HDMI_VP_CONF); val &= ~(HDMI_VP_CONF_BYPASS_EN_MASK | HDMI_VP_CONF_PP_EN_ENMASK | HDMI_VP_CONF_YCC422_EN_MASK); val |= HDMI_VP_CONF_BYPASS_EN_DISABLE | HDMI_VP_CONF_PP_EN_ENABLE | HDMI_VP_CONF_YCC422_EN_DISABLE; WR1(sc, HDMI_VP_CONF, val); } else if (output_select == HDMI_VP_CONF_OUTPUT_SELECTOR_YCC422) { val = RD1(sc, HDMI_VP_CONF); val &= ~(HDMI_VP_CONF_BYPASS_EN_MASK | HDMI_VP_CONF_PP_EN_ENMASK | HDMI_VP_CONF_YCC422_EN_MASK); val |= HDMI_VP_CONF_BYPASS_EN_DISABLE | HDMI_VP_CONF_PP_EN_DISABLE | HDMI_VP_CONF_YCC422_EN_ENABLE; WR1(sc, HDMI_VP_CONF, val); } else if (output_select == HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS) { val = RD1(sc, HDMI_VP_CONF); val &= ~(HDMI_VP_CONF_BYPASS_EN_MASK | HDMI_VP_CONF_PP_EN_ENMASK | HDMI_VP_CONF_YCC422_EN_MASK); val |= HDMI_VP_CONF_BYPASS_EN_ENABLE | HDMI_VP_CONF_PP_EN_DISABLE | HDMI_VP_CONF_YCC422_EN_DISABLE; WR1(sc, HDMI_VP_CONF, val); } else { return; } val = RD1(sc, HDMI_VP_STUFF); val &= ~(HDMI_VP_STUFF_PP_STUFFING_MASK | HDMI_VP_STUFF_YCC422_STUFFING_MASK); val |= HDMI_VP_STUFF_PP_STUFFING_STUFFING_MODE | HDMI_VP_STUFF_YCC422_STUFFING_STUFFING_MODE; WR1(sc, HDMI_VP_STUFF, val); val = RD1(sc, HDMI_VP_CONF); val &= ~HDMI_VP_CONF_OUTPUT_SELECTOR_MASK; val |= output_select; WR1(sc, HDMI_VP_CONF, val); } static void dwc_hdmi_video_sample(struct dwc_hdmi_softc *sc) { int color_format; uint8_t val; color_format = 0x01; val = HDMI_TX_INVID0_INTERNAL_DE_GENERATOR_DISABLE | ((color_format << HDMI_TX_INVID0_VIDEO_MAPPING_OFFSET) & HDMI_TX_INVID0_VIDEO_MAPPING_MASK); WR1(sc, HDMI_TX_INVID0, val); /* Enable TX stuffing: When DE is inactive, fix the output data to 0 */ val = HDMI_TX_INSTUFFING_BDBDATA_STUFFING_ENABLE | HDMI_TX_INSTUFFING_RCRDATA_STUFFING_ENABLE | HDMI_TX_INSTUFFING_GYDATA_STUFFING_ENABLE; WR1(sc, HDMI_TX_INSTUFFING, val); WR1(sc, HDMI_TX_GYDATA0, 0x0); WR1(sc, HDMI_TX_GYDATA1, 0x0); WR1(sc, HDMI_TX_RCRDATA0, 0x0); WR1(sc, HDMI_TX_RCRDATA1, 0x0); WR1(sc, HDMI_TX_BCBDATA0, 0x0); WR1(sc, HDMI_TX_BCBDATA1, 0x0); } static void dwc_hdmi_tx_hdcp_config(struct dwc_hdmi_softc *sc) { uint8_t de, val; de = HDMI_A_VIDPOLCFG_DATAENPOL_ACTIVE_HIGH; /* Disable RX detect */ val = RD1(sc, HDMI_A_HDCPCFG0); val &= ~HDMI_A_HDCPCFG0_RXDETECT_MASK; val |= HDMI_A_HDCPCFG0_RXDETECT_DISABLE; WR1(sc, HDMI_A_HDCPCFG0, val); /* Set polarity */ val = RD1(sc, HDMI_A_VIDPOLCFG); val &= ~HDMI_A_VIDPOLCFG_DATAENPOL_MASK; val |= de; WR1(sc, HDMI_A_VIDPOLCFG, val); /* Disable encryption */ val = RD1(sc, HDMI_A_HDCPCFG1); val &= ~HDMI_A_HDCPCFG1_ENCRYPTIONDISABLE_MASK; val |= HDMI_A_HDCPCFG1_ENCRYPTIONDISABLE_DISABLE; WR1(sc, HDMI_A_HDCPCFG1, val); } static int dwc_hdmi_set_mode(struct dwc_hdmi_softc *sc) { /* XXX */ sc->sc_has_audio = 1; dwc_hdmi_disable_overflow_interrupts(sc); dwc_hdmi_av_composer(sc); dwc_hdmi_phy_init(sc); dwc_hdmi_enable_video_path(sc); dwc_hdmi_configure_audio(sc); /* TODO: dwc_hdmi_config_avi(sc); */ dwc_hdmi_video_packetize(sc); /* TODO: dwc_hdmi_video_csc(sc); */ dwc_hdmi_video_sample(sc); dwc_hdmi_tx_hdcp_config(sc); dwc_hdmi_clear_overflow(sc); return (0); } static int hdmi_edid_read(struct dwc_hdmi_softc *sc, int block, uint8_t **edid, uint32_t *edid_len) { device_t i2c_dev; int result; uint8_t addr = block & 1 ? EDID_LENGTH : 0; uint8_t segment = block >> 1; struct iic_msg msg[] = { { I2C_DDC_SEGADDR, IIC_M_WR, 1, &segment }, { I2C_DDC_ADDR, IIC_M_WR, 1, &addr }, { I2C_DDC_ADDR, IIC_M_RD, EDID_LENGTH, sc->sc_edid } }; *edid = NULL; *edid_len = 0; i2c_dev = NULL; if (sc->sc_get_i2c_dev != NULL) i2c_dev = sc->sc_get_i2c_dev(sc->sc_dev); if (!i2c_dev) { device_printf(sc->sc_dev, "no DDC device found\n"); return (ENXIO); } if (bootverbose) device_printf(sc->sc_dev, "reading EDID from %s, block %d, addr %02x\n", device_get_nameunit(i2c_dev), block, I2C_DDC_ADDR/2); result = iicbus_request_bus(i2c_dev, sc->sc_dev, IIC_INTRWAIT); if (result) { device_printf(sc->sc_dev, "failed to request i2c bus: %d\n", result); return (result); } result = iicbus_transfer(i2c_dev, msg, 3); iicbus_release_bus(i2c_dev, sc->sc_dev); if (result) { device_printf(sc->sc_dev, "i2c transfer failed: %d\n", result); return (result); } else { *edid_len = sc->sc_edid_len; *edid = sc->sc_edid; } return (result); } static void dwc_hdmi_detect_cable(void *arg) { struct dwc_hdmi_softc *sc; uint32_t stat; sc = arg; stat = RD1(sc, HDMI_IH_PHY_STAT0); if ((stat & HDMI_IH_PHY_STAT0_HPD) != 0) { EVENTHANDLER_INVOKE(hdmi_event, sc->sc_dev, HDMI_EVENT_CONNECTED); } /* Finished with the interrupt hook */ config_intrhook_disestablish(&sc->sc_mode_hook); } int dwc_hdmi_init(device_t dev) { struct dwc_hdmi_softc *sc; int err; sc = device_get_softc(dev); err = 0; sc->sc_edid = malloc(EDID_LENGTH, M_DEVBUF, M_WAITOK | M_ZERO); sc->sc_edid_len = EDID_LENGTH; device_printf(sc->sc_dev, "HDMI controller %02x:%02x:%02x:%02x\n", RD1(sc, HDMI_DESIGN_ID), RD1(sc, HDMI_REVISION_ID), RD1(sc, HDMI_PRODUCT_ID0), RD1(sc, HDMI_PRODUCT_ID1)); WR1(sc, HDMI_PHY_POL0, HDMI_PHY_POL0_HPD); WR1(sc, HDMI_IH_PHY_STAT0, HDMI_IH_PHY_STAT0_HPD); sc->sc_mode_hook.ich_func = dwc_hdmi_detect_cable; sc->sc_mode_hook.ich_arg = sc; if (config_intrhook_establish(&sc->sc_mode_hook) != 0) { err = ENOMEM; goto out; } out: if (err != 0) { free(sc->sc_edid, M_DEVBUF); sc->sc_edid = NULL; } return (err); } static int dwc_hdmi_detect_hdmi_vsdb(uint8_t *edid) { int off, p, btag, blen; if (edid[EXT_TAG] != CEA_TAG_ID) return (0); off = edid[CEA_DATA_OFF]; /* CEA data block collection starts at byte 4 */ if (off <= CEA_DATA_START) return (0); /* Parse the CEA data blocks */ for (p = CEA_DATA_START; p < off;) { btag = BLOCK_TAG(edid[p]); blen = BLOCK_LEN(edid[p]); /* Make sure the length is sane */ if (p + blen + 1 > off) break; /* Look for a VSDB with the HDMI 24-bit IEEE registration ID */ if (btag == BLOCK_TAG_VSDB && blen >= HDMI_VSDB_MINLEN && memcmp(&edid[p + 1], HDMI_OUI, HDMI_OUI_LEN) == 0) return (1); /* Next data block */ p += (1 + blen); } /* Not found */ return (0); } static void dwc_hdmi_detect_hdmi(struct dwc_hdmi_softc *sc) { uint8_t *edid; uint32_t edid_len; int block; sc->sc_has_audio = 0; /* Scan through extension blocks, looking for a CEA-861 block */ for (block = 1; block <= sc->sc_edid_info.edid_ext_block_count; block++) { if (hdmi_edid_read(sc, block, &edid, &edid_len) != 0) return; if (dwc_hdmi_detect_hdmi_vsdb(edid) != 0) { if (bootverbose) device_printf(sc->sc_dev, "enabling audio support\n"); sc->sc_has_audio = (edid[CEA_DTD] & DTD_BASIC_AUDIO) != 0; return; } } } int dwc_hdmi_get_edid(device_t dev, uint8_t **edid, uint32_t *edid_len) { struct dwc_hdmi_softc *sc; int error; sc = device_get_softc(dev); memset(&sc->sc_edid_info, 0, sizeof(sc->sc_edid_info)); error = hdmi_edid_read(sc, 0, edid, edid_len); if (error != 0) return (error); edid_parse(*edid, &sc->sc_edid_info); return (0); } int dwc_hdmi_set_videomode(device_t dev, const struct videomode *mode) { struct dwc_hdmi_softc *sc; sc = device_get_softc(dev); memcpy(&sc->sc_mode, mode, sizeof(*mode)); dwc_hdmi_detect_hdmi(sc); dwc_hdmi_set_mode(sc); return (0); } Index: user/ngie/bug-237403/sys/dev/iscsi_initiator/iscsi.c =================================================================== --- user/ngie/bug-237403/sys/dev/iscsi_initiator/iscsi.c (revision 348028) +++ user/ngie/bug-237403/sys/dev/iscsi_initiator/iscsi.c (revision 348029) @@ -1,878 +1,881 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2005-2011 Daniel Braniss * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* | $Id: iscsi.c 752 2009-08-20 11:23:28Z danny $ */ #include __FBSDID("$FreeBSD$"); #include "opt_iscsi_initiator.h" #include #include +#ifdef DO_EVENTHANDLER +#include +#endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static char *iscsi_driver_version = "2.3.1"; static struct isc_softc *isc; MALLOC_DEFINE(M_ISCSI, "iSCSI", "iSCSI driver"); MALLOC_DEFINE(M_ISCSIBUF, "iSCbuf", "iSCSI buffers"); static MALLOC_DEFINE(M_TMP, "iSCtmp", "iSCSI tmp"); #ifdef ISCSI_INITIATOR_DEBUG int iscsi_debug = ISCSI_INITIATOR_DEBUG; SYSCTL_INT(_debug, OID_AUTO, iscsi_initiator, CTLFLAG_RW, &iscsi_debug, 0, "iSCSI driver debug flag"); struct mtx iscsi_dbg_mtx; #endif static int max_sessions = MAX_SESSIONS; SYSCTL_INT(_net, OID_AUTO, iscsi_initiator_max_sessions, CTLFLAG_RDTUN, &max_sessions, 0, "Max sessions allowed"); static int max_pdus = MAX_PDUS; SYSCTL_INT(_net, OID_AUTO, iscsi_initiator_max_pdus, CTLFLAG_RDTUN, &max_pdus, 0, "Max PDU pool"); static char isid[6+1] = { 0x80, 'D', 'I', 'B', '0', '0', 0 }; static int i_create_session(struct cdev *dev, int *ndev); static int i_ping(struct cdev *dev); static int i_send(struct cdev *dev, caddr_t arg, struct thread *td); static int i_recv(struct cdev *dev, caddr_t arg, struct thread *td); static int i_setsoc(isc_session_t *sp, int fd, struct thread *td); static int i_fullfeature(struct cdev *dev, int flag); static d_open_t iscsi_open; static d_close_t iscsi_close; static d_ioctl_t iscsi_ioctl; #ifdef ISCSI_INITIATOR_DEBUG static d_read_t iscsi_read; #endif static struct cdevsw iscsi_cdevsw = { .d_version = D_VERSION, .d_open = iscsi_open, .d_close = iscsi_close, .d_ioctl = iscsi_ioctl, #ifdef ISCSI_INITIATOR_DEBUG .d_read = iscsi_read, #endif .d_name = "iSCSI", }; static int iscsi_open(struct cdev *dev, int flags, int otype, struct thread *td) { debug_called(8); debug(7, "dev=%d", dev2unit(dev)); if(dev2unit(dev) > max_sessions) { // should not happen return ENODEV; } return 0; } static int iscsi_close(struct cdev *dev, int flag, int otyp, struct thread *td) { isc_session_t *sp; debug_called(8); debug(3, "session=%d flag=%x", dev2unit(dev), flag); if(dev2unit(dev) == max_sessions) { return 0; } sp = dev->si_drv2; if(sp != NULL) { sdebug(3, "sp->flags=%x", sp->flags ); /* | if still in full phase, this probably means | that something went really bad. | it could be a result from 'shutdown', in which case | we will ignore it (so buffers can be flushed). | the problem is that there is no way of differentiating | between a shutdown procedure and 'iscontrol' dying. */ if(sp->flags & ISC_FFPHASE) // delay in case this is a shutdown. tsleep(sp, PRIBIO, "isc-cls", 60*hz); ism_stop(sp); } debug(2, "done"); return 0; } static int iscsi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int mode, struct thread *td) { struct isc_softc *sc; isc_session_t *sp; isc_opt_t *opt; int error; debug_called(8); error = 0; if(dev2unit(dev) == max_sessions) { /* | non Session commands */ sc = dev->si_drv1; if(sc == NULL) return ENXIO; switch(cmd) { case ISCSISETSES: error = i_create_session(dev, (int *)arg); if(error == 0) break; default: error = ENXIO; } return error; } /* | session commands */ sp = dev->si_drv2; if(sp == NULL) return ENXIO; sdebug(6, "dev=%d cmd=%d", dev2unit(dev), (int)(cmd & 0xff)); switch(cmd) { case ISCSISETSOC: error = i_setsoc(sp, *(u_int *)arg, td); break; case ISCSISETOPT: opt = (isc_opt_t *)arg; error = i_setopt(sp, opt); break; case ISCSISEND: error = i_send(dev, arg, td); break; case ISCSIRECV: error = i_recv(dev, arg, td); break; case ISCSIPING: error = i_ping(dev); break; case ISCSISTART: error = sp->soc == NULL? ENOTCONN: i_fullfeature(dev, 1); if(error == 0) { sp->proc = td->td_proc; SYSCTL_ADD_INT(&sp->clist, SYSCTL_CHILDREN(sp->oid), OID_AUTO, "pid", CTLFLAG_RD, &sp->proc->p_pid, sizeof(pid_t), "control process id"); } break; case ISCSIRESTART: error = sp->soc == NULL? ENOTCONN: i_fullfeature(dev, 2); break; case ISCSISTOP: error = i_fullfeature(dev, 0); break; case ISCSISIGNAL: { int sig = *(int *)arg; if(sig < 0 || sig > _SIG_MAXSIG) error = EINVAL; else sp->signal = sig; break; } case ISCSIGETCAM: { iscsi_cam_t *cp = (iscsi_cam_t *)arg; error = ic_getCamVals(sp, cp); break; } default: error = ENOIOCTL; } return error; } static int iscsi_read(struct cdev *dev, struct uio *uio, int ioflag) { #ifdef ISCSI_INITIATOR_DEBUG struct isc_softc *sc; isc_session_t *sp; pduq_t *pq; char buf[1024]; sc = dev->si_drv1; sp = dev->si_drv2; if(dev2unit(dev) == max_sessions) { sprintf(buf, "/----- Session ------/\n"); uiomove(buf, strlen(buf), uio); int i = 0; TAILQ_FOREACH(sp, &sc->isc_sess, sp_link) { if(uio->uio_resid == 0) return 0; sprintf(buf, "%03d] '%s' '%s'\n", i++, sp->opt.targetAddress, sp->opt.targetName); uiomove(buf, strlen(buf), uio); } sprintf(buf, "free npdu_alloc=%d, npdu_max=%d\n", sc->npdu_alloc, sc->npdu_max); uiomove(buf, strlen(buf), uio); } else { int i = 0; struct socket *so = sp->soc; #define pukeit(i, pq) do {\ sprintf(buf, "%03d] %06x %02x %06x %06x %jd\n",\ i, ntohl(pq->pdu.ipdu.bhs.CmdSN),\ pq->pdu.ipdu.bhs.opcode, ntohl(pq->pdu.ipdu.bhs.itt),\ ntohl(pq->pdu.ipdu.bhs.ExpStSN),\ (intmax_t)pq->ts.sec);\ } while(0) sprintf(buf, "%d/%d /---- hld -----/\n", sp->stats.nhld, sp->stats.max_hld); uiomove(buf, strlen(buf), uio); TAILQ_FOREACH(pq, &sp->hld, pq_link) { if(uio->uio_resid == 0) return 0; pukeit(i, pq); i++; uiomove(buf, strlen(buf), uio); } sprintf(buf, "%d/%d /---- rsp -----/\n", sp->stats.nrsp, sp->stats.max_rsp); uiomove(buf, strlen(buf), uio); i = 0; TAILQ_FOREACH(pq, &sp->rsp, pq_link) { if(uio->uio_resid == 0) return 0; pukeit(i, pq); i++; uiomove(buf, strlen(buf), uio); } sprintf(buf, "%d/%d /---- csnd -----/\n", sp->stats.ncsnd, sp->stats.max_csnd); i = 0; uiomove(buf, strlen(buf), uio); TAILQ_FOREACH(pq, &sp->csnd, pq_link) { if(uio->uio_resid == 0) return 0; pukeit(i, pq); i++; uiomove(buf, strlen(buf), uio); } sprintf(buf, "%d/%d /---- wsnd -----/\n", sp->stats.nwsnd, sp->stats.max_wsnd); i = 0; uiomove(buf, strlen(buf), uio); TAILQ_FOREACH(pq, &sp->wsnd, pq_link) { if(uio->uio_resid == 0) return 0; pukeit(i, pq); i++; uiomove(buf, strlen(buf), uio); } sprintf(buf, "%d/%d /---- isnd -----/\n", sp->stats.nisnd, sp->stats.max_isnd); i = 0; uiomove(buf, strlen(buf), uio); TAILQ_FOREACH(pq, &sp->isnd, pq_link) { if(uio->uio_resid == 0) return 0; pukeit(i, pq); i++; uiomove(buf, strlen(buf), uio); } sprintf(buf, "/---- Stats ---/\n"); uiomove(buf, strlen(buf), uio); sprintf(buf, "recv=%d sent=%d\n", sp->stats.nrecv, sp->stats.nsent); uiomove(buf, strlen(buf), uio); sprintf(buf, "flags=%x pdus: alloc=%d max=%d\n", sp->flags, sc->npdu_alloc, sc->npdu_max); uiomove(buf, strlen(buf), uio); sprintf(buf, "cws=%d last cmd=%x exp=%x max=%x stat=%x itt=%x\n", sp->cws, sp->sn.cmd, sp->sn.expCmd, sp->sn.maxCmd, sp->sn.stat, sp->sn.itt); uiomove(buf, strlen(buf), uio); sprintf(buf, "/---- socket -----/\nso_count=%d so_state=%x\n", so->so_count, so->so_state); uiomove(buf, strlen(buf), uio); } #endif return 0; } static int i_ping(struct cdev *dev) { return 0; } /* | low level I/O */ static int i_setsoc(isc_session_t *sp, int fd, struct thread *td) { cap_rights_t rights; int error = 0; if(sp->soc != NULL) isc_stop_receiver(sp); error = getsock_cap(td, fd, cap_rights_init(&rights, CAP_SOCK_CLIENT), &sp->fp, NULL, NULL); if(error) return error; sp->soc = sp->fp->f_data; sp->td = td; isc_start_receiver(sp); return error; } static int i_send(struct cdev *dev, caddr_t arg, struct thread *td) { isc_session_t *sp = dev->si_drv2; caddr_t bp; pduq_t *pq; pdu_t *pp; int n, error; debug_called(8); if(sp->soc == NULL) return ENOTCONN; if((pq = pdu_alloc(sp->isc, M_NOWAIT)) == NULL) return EAGAIN; pp = &pq->pdu; pq->pdu = *(pdu_t *)arg; if((error = i_prepPDU(sp, pq)) != 0) goto out; bp = NULL; if((pq->len - sizeof(union ipdu_u)) > 0) { pq->buf = bp = malloc(pq->len - sizeof(union ipdu_u), M_ISCSIBUF, M_NOWAIT); if(pq->buf == NULL) { error = EAGAIN; goto out; } } else pq->buf = NULL; // just in case? sdebug(2, "len=%d ahs_len=%d ds_len=%d buf=%zu@%p", pq->len, pp->ahs_len, pp->ds_len, pq->len - sizeof(union ipdu_u), bp); if(pp->ahs_len) { // XXX: never tested, looks suspicious n = pp->ahs_len; error = copyin(pp->ahs_addr, bp, n); if(error != 0) { sdebug(3, "copyin ahs: error=%d", error); goto out; } pp->ahs_addr = (ahs_t *)bp; bp += n; } if(pp->ds_len) { n = pp->ds_len; error = copyin(pp->ds_addr, bp, n); if(error != 0) { sdebug(3, "copyin ds: error=%d", error); goto out; } pp->ds_addr = bp; bp += n; while(n & 03) { n++; *bp++ = 0; } } error = isc_qout(sp, pq); if(error == 0) wakeup(&sp->flags); // XXX: to 'push' proc_out ... out: if(error) pdu_free(sp->isc, pq); return error; } static int i_recv(struct cdev *dev, caddr_t arg, struct thread *td) { isc_session_t *sp = dev->si_drv2; pduq_t *pq; pdu_t *pp, *up; caddr_t bp; int error, mustfree, cnt; size_t need, have, n; debug_called(8); if(sp == NULL) return EIO; if(sp->soc == NULL) return ENOTCONN; cnt = 6; // XXX: maybe the user can request a time out? mtx_lock(&sp->rsp_mtx); while((pq = TAILQ_FIRST(&sp->rsp)) == NULL) { msleep(&sp->rsp, &sp->rsp_mtx, PRIBIO, "isc_rsp", hz*10); if(cnt-- == 0) break; // XXX: for now, needs work } if(pq != NULL) { sp->stats.nrsp--; TAILQ_REMOVE(&sp->rsp, pq, pq_link); } mtx_unlock(&sp->rsp_mtx); sdebug(6, "cnt=%d", cnt); if(pq == NULL) { error = ENOTCONN; sdebug(3, "error=%d sp->flags=%x ", error, sp->flags); return error; } up = (pdu_t *)arg; pp = &pq->pdu; up->ipdu = pp->ipdu; n = 0; up->ds_len = 0; up->ahs_len = 0; error = 0; if(pq->mp) { u_int len; // Grr... len = 0; if(pp->ahs_len) { len += pp->ahs_len; } if(pp->ds_len) { len += pp->ds_len; } mustfree = 0; if(len > pq->mp->m_len) { mustfree++; bp = malloc(len, M_TMP, M_WAITOK); sdebug(4, "need mbufcopy: %d", len); i_mbufcopy(pq->mp, bp, len); } else bp = mtod(pq->mp, caddr_t); if(pp->ahs_len) { need = pp->ahs_len; n = MIN(up->ahs_size, need); error = copyout(bp, (caddr_t)up->ahs_addr, n); up->ahs_len = n; bp += need; } if(!error && pp->ds_len) { need = pp->ds_len; if((have = up->ds_size) == 0) { have = up->ahs_size - n; up->ds_addr = (caddr_t)up->ahs_addr + n; } n = MIN(have, need); error = copyout(bp, (caddr_t)up->ds_addr, n); up->ds_len = n; } if(mustfree) free(bp, M_TMP); } sdebug(6, "len=%d ahs_len=%d ds_len=%d", pq->len, pp->ahs_len, pp->ds_len); pdu_free(sp->isc, pq); return error; } static int i_fullfeature(struct cdev *dev, int flag) { isc_session_t *sp = dev->si_drv2; int error; sdebug(2, "flag=%d", flag); error = 0; switch(flag) { case 0: // stop sp->flags &= ~ISC_FFPHASE; break; case 1: // start sp->flags |= ISC_FFPHASE; error = ic_init(sp); break; case 2: // restart sp->flags |= ISC_FFPHASE; ism_restart(sp); break; } return error; } static int i_create_session(struct cdev *dev, int *ndev) { struct isc_softc *sc = dev->si_drv1; isc_session_t *sp; int error, n; debug_called(8); sp = malloc(sizeof(isc_session_t), M_ISCSI, M_WAITOK | M_ZERO); if(sp == NULL) return ENOMEM; sx_xlock(&sc->unit_sx); if((n = alloc_unr(sc->unit)) < 0) { sx_unlock(&sc->unit_sx); free(sp, M_ISCSI); xdebug("too many sessions!"); return EPERM; } sx_unlock(&sc->unit_sx); mtx_lock(&sc->isc_mtx); TAILQ_INSERT_TAIL(&sc->isc_sess, sp, sp_link); isc->nsess++; mtx_unlock(&sc->isc_mtx); sp->dev = make_dev(&iscsi_cdevsw, n, UID_ROOT, GID_WHEEL, 0600, "iscsi%d", n); *ndev = sp->sid = n; sp->isc = sc; sp->dev->si_drv1 = sc; sp->dev->si_drv2 = sp; sp->opt.maxRecvDataSegmentLength = 8192; sp->opt.maxXmitDataSegmentLength = 8192; sp->opt.maxBurstLength = 65536; // 64k sp->opt.maxluns = ISCSI_MAX_LUNS; error = ism_start(sp); return error; } #ifdef notused static void iscsi_counters(isc_session_t *sp) { int h, r, s; pduq_t *pq; #define _puke(i, pq) do {\ debug(2, "%03d] %06x %02x %x %ld %jd %x\n",\ i, ntohl( pq->pdu.ipdu.bhs.CmdSN), \ pq->pdu.ipdu.bhs.opcode, ntohl(pq->pdu.ipdu.bhs.itt),\ (long)pq->ts.sec, pq->ts.frac, pq->flags);\ } while(0) h = r = s = 0; TAILQ_FOREACH(pq, &sp->hld, pq_link) { _puke(h, pq); h++; } TAILQ_FOREACH(pq, &sp->rsp, pq_link) r++; TAILQ_FOREACH(pq, &sp->csnd, pq_link) s++; TAILQ_FOREACH(pq, &sp->wsnd, pq_link) s++; TAILQ_FOREACH(pq, &sp->isnd, pq_link) s++; debug(2, "hld=%d rsp=%d snd=%d", h, r, s); } #endif static void iscsi_shutdown(void *v) { struct isc_softc *sc = v; isc_session_t *sp; int n; debug_called(8); if(sc == NULL) { xdebug("sc is NULL!"); return; } #ifdef DO_EVENTHANDLER if(sc->eh == NULL) debug(2, "sc->eh is NULL"); else { EVENTHANDLER_DEREGISTER(shutdown_pre_sync, sc->eh); debug(2, "done n=%d", sc->nsess); } #endif n = 0; TAILQ_FOREACH(sp, &sc->isc_sess, sp_link) { debug(2, "%2d] sp->flags=0x%08x", n, sp->flags); n++; } debug(2, "done"); } static void free_pdus(struct isc_softc *sc) { debug_called(8); if(sc->pdu_zone != NULL) { uma_zdestroy(sc->pdu_zone); sc->pdu_zone = NULL; } } static int iscsi_start(void) { debug_called(8); isc = malloc(sizeof(struct isc_softc), M_ISCSI, M_ZERO|M_WAITOK); mtx_init(&isc->isc_mtx, "iscsi-isc", NULL, MTX_DEF); TAILQ_INIT(&isc->isc_sess); /* | now init the free pdu list */ isc->pdu_zone = uma_zcreate("pdu", sizeof(pduq_t), NULL, NULL, NULL, NULL, 0, 0); uma_zone_set_max(isc->pdu_zone, max_pdus); isc->unit = new_unrhdr(0, max_sessions-1, NULL); sx_init(&isc->unit_sx, "iscsi sx"); #ifdef DO_EVENTHANDLER if((isc->eh = EVENTHANDLER_REGISTER(shutdown_pre_sync, iscsi_shutdown, sc, SHUTDOWN_PRI_DEFAULT-1)) == NULL) xdebug("shutdown event registration failed\n"); #endif /* | sysctl stuff */ sysctl_ctx_init(&isc->clist); isc->oid = SYSCTL_ADD_NODE(&isc->clist, SYSCTL_STATIC_CHILDREN(_net), OID_AUTO, "iscsi_initiator", CTLFLAG_RD, 0, "iSCSI Subsystem"); SYSCTL_ADD_STRING(&isc->clist, SYSCTL_CHILDREN(isc->oid), OID_AUTO, "driver_version", CTLFLAG_RD, iscsi_driver_version, 0, "iscsi driver version"); SYSCTL_ADD_STRING(&isc->clist, SYSCTL_CHILDREN(isc->oid), OID_AUTO, "isid", CTLFLAG_RW, isid, 6+1, "initiator part of the Session Identifier"); SYSCTL_ADD_INT(&isc->clist, SYSCTL_CHILDREN(isc->oid), OID_AUTO, "sessions", CTLFLAG_RD, &isc->nsess, sizeof(isc->nsess), "number of active session"); #ifdef ISCSI_INITIATOR_DEBUG mtx_init(&iscsi_dbg_mtx, "iscsi_dbg", NULL, MTX_DEF); #endif isc->dev = make_dev_credf(MAKEDEV_CHECKNAME, &iscsi_cdevsw, max_sessions, NULL, UID_ROOT, GID_WHEEL, 0600, "iscsi"); if (isc->dev == NULL) { xdebug("iscsi_initiator: make_dev_credf failed"); return (EEXIST); } isc->dev->si_drv1 = isc; printf("iscsi: version %s\n", iscsi_driver_version); return (0); } /* | Notes: | unload SHOULD fail if there is activity | activity: there is/are active session/s */ static void iscsi_stop(void) { isc_session_t *sp, *sp_tmp; debug_called(8); /* | go through all the sessions | Note: close should have done this ... */ TAILQ_FOREACH_SAFE(sp, &isc->isc_sess, sp_link, sp_tmp) { //XXX: check for activity ... ism_stop(sp); } mtx_destroy(&isc->isc_mtx); sx_destroy(&isc->unit_sx); free_pdus(isc); if(isc->dev) destroy_dev(isc->dev); if(sysctl_ctx_free(&isc->clist)) xdebug("sysctl_ctx_free failed"); iscsi_shutdown(isc); // XXX: check EVENTHANDLER_ ... #ifdef ISCSI_INITIATOR_DEBUG mtx_destroy(&iscsi_dbg_mtx); #endif free(isc, M_ISCSI); } static int iscsi_modevent(module_t mod, int what, void *arg) { int error = 0; debug_called(8); switch(what) { case MOD_LOAD: error = iscsi_start(); break; case MOD_QUIESCE: if(isc->nsess) { xdebug("iscsi module busy(nsess=%d), cannot unload", isc->nsess); log(LOG_ERR, "iscsi module busy, cannot unload"); } return isc->nsess; case MOD_SHUTDOWN: break; case MOD_UNLOAD: iscsi_stop(); break; default: break; } return (error); } moduledata_t iscsi_mod = { "iscsi_initiator", (modeventhand_t) iscsi_modevent, 0 }; #ifdef ISCSI_ROOT static void iscsi_rootconf(void) { #if 0 nfs_setup_diskless(); if (nfs_diskless_valid) rootdevnames[0] = "nfs:"; #endif printf("** iscsi_rootconf **\n"); } SYSINIT(cpu_rootconf1, SI_SUB_ROOT_CONF, SI_ORDER_FIRST, iscsi_rootconf, NULL) #endif DECLARE_MODULE(iscsi_initiator, iscsi_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE); MODULE_DEPEND(iscsi_initiator, cam, 1, 1, 1); Index: user/ngie/bug-237403/sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c =================================================================== --- user/ngie/bug-237403/sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c (revision 348028) +++ user/ngie/bug-237403/sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c (revision 348029) @@ -1,2942 +1,2943 @@ /* * Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include #include #include #include #ifdef CONFIG_NET_RX_BUSY_POLL #include #endif #include #include #include #include #include #include +#include #include #include #include "en.h" #include "en_port.h" NETDUMP_DEFINE(mlx4_en); static void mlx4_en_sysctl_stat(struct mlx4_en_priv *priv); static void mlx4_en_sysctl_conf(struct mlx4_en_priv *priv); #ifdef CONFIG_NET_RX_BUSY_POLL /* must be called with local_bh_disable()d */ static int mlx4_en_low_latency_recv(struct napi_struct *napi) { struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); struct net_device *dev = cq->dev; struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring]; int done; if (!priv->port_up) return LL_FLUSH_FAILED; if (!mlx4_en_cq_lock_poll(cq)) return LL_FLUSH_BUSY; done = mlx4_en_process_rx_cq(dev, cq, 4); #ifdef LL_EXTENDED_STATS if (likely(done)) rx_ring->cleaned += done; else rx_ring->misses++; #endif mlx4_en_cq_unlock_poll(cq); return done; } #endif /* CONFIG_NET_RX_BUSY_POLL */ #ifdef CONFIG_RFS_ACCEL struct mlx4_en_filter { struct list_head next; struct work_struct work; u8 ip_proto; __be32 src_ip; __be32 dst_ip; __be16 src_port; __be16 dst_port; int rxq_index; struct mlx4_en_priv *priv; u32 flow_id; /* RFS infrastructure id */ int id; /* mlx4_en driver id */ u64 reg_id; /* Flow steering API id */ u8 activated; /* Used to prevent expiry before filter * is attached */ struct hlist_node filter_chain; }; static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv); static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto) { switch (ip_proto) { case IPPROTO_UDP: return MLX4_NET_TRANS_RULE_ID_UDP; case IPPROTO_TCP: return MLX4_NET_TRANS_RULE_ID_TCP; default: return MLX4_NET_TRANS_RULE_NUM; } }; static void mlx4_en_filter_work(struct work_struct *work) { struct mlx4_en_filter *filter = container_of(work, struct mlx4_en_filter, work); struct mlx4_en_priv *priv = filter->priv; struct mlx4_spec_list spec_tcp_udp = { .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto), { .tcp_udp = { .dst_port = filter->dst_port, .dst_port_msk = (__force __be16)-1, .src_port = filter->src_port, .src_port_msk = (__force __be16)-1, }, }, }; struct mlx4_spec_list spec_ip = { .id = MLX4_NET_TRANS_RULE_ID_IPV4, { .ipv4 = { .dst_ip = filter->dst_ip, .dst_ip_msk = (__force __be32)-1, .src_ip = filter->src_ip, .src_ip_msk = (__force __be32)-1, }, }, }; struct mlx4_spec_list spec_eth = { .id = MLX4_NET_TRANS_RULE_ID_ETH, }; struct mlx4_net_trans_rule rule = { .list = LIST_HEAD_INIT(rule.list), .queue_mode = MLX4_NET_TRANS_Q_LIFO, .exclusive = 1, .allow_loopback = 1, .promisc_mode = MLX4_FS_REGULAR, .port = priv->port, .priority = MLX4_DOMAIN_RFS, }; int rc; __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) { en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n", filter->ip_proto); goto ignore; } list_add_tail(&spec_eth.list, &rule.list); list_add_tail(&spec_ip.list, &rule.list); list_add_tail(&spec_tcp_udp.list, &rule.list); rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn; memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN); memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); filter->activated = 0; if (filter->reg_id) { rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); if (rc && rc != -ENOENT) en_err(priv, "Error detaching flow. rc = %d\n", rc); } rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id); if (rc) en_err(priv, "Error attaching flow. err = %d\n", rc); ignore: mlx4_en_filter_rfs_expire(priv); filter->activated = 1; } static inline struct hlist_head * filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, __be16 src_port, __be16 dst_port) { unsigned long l; int bucket_idx; l = (__force unsigned long)src_port | ((__force unsigned long)dst_port << 2); l ^= (__force unsigned long)(src_ip ^ dst_ip); bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT); return &priv->filter_hash[bucket_idx]; } static struct mlx4_en_filter * mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip, __be32 dst_ip, u8 ip_proto, __be16 src_port, __be16 dst_port, u32 flow_id) { struct mlx4_en_filter *filter = NULL; filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC); if (!filter) return NULL; filter->priv = priv; filter->rxq_index = rxq_index; INIT_WORK(&filter->work, mlx4_en_filter_work); filter->src_ip = src_ip; filter->dst_ip = dst_ip; filter->ip_proto = ip_proto; filter->src_port = src_port; filter->dst_port = dst_port; filter->flow_id = flow_id; filter->id = priv->last_filter_id++ % RPS_NO_FILTER; list_add_tail(&filter->next, &priv->filters); hlist_add_head(&filter->filter_chain, filter_hash_bucket(priv, src_ip, dst_ip, src_port, dst_port)); return filter; } static void mlx4_en_filter_free(struct mlx4_en_filter *filter) { struct mlx4_en_priv *priv = filter->priv; int rc; list_del(&filter->next); rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); if (rc && rc != -ENOENT) en_err(priv, "Error detaching flow. rc = %d\n", rc); kfree(filter); } static inline struct mlx4_en_filter * mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, u8 ip_proto, __be16 src_port, __be16 dst_port) { struct mlx4_en_filter *filter; struct mlx4_en_filter *ret = NULL; hlist_for_each_entry(filter, filter_hash_bucket(priv, src_ip, dst_ip, src_port, dst_port), filter_chain) { if (filter->src_ip == src_ip && filter->dst_ip == dst_ip && filter->ip_proto == ip_proto && filter->src_port == src_port && filter->dst_port == dst_port) { ret = filter; break; } } return ret; } static int mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, u16 rxq_index, u32 flow_id) { struct mlx4_en_priv *priv = netdev_priv(net_dev); struct mlx4_en_filter *filter; const struct iphdr *ip; const __be16 *ports; u8 ip_proto; __be32 src_ip; __be32 dst_ip; __be16 src_port; __be16 dst_port; int nhoff = skb_network_offset(skb); int ret = 0; if (skb->protocol != htons(ETH_P_IP)) return -EPROTONOSUPPORT; ip = (const struct iphdr *)(skb->data + nhoff); if (ip_is_fragment(ip)) return -EPROTONOSUPPORT; if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP)) return -EPROTONOSUPPORT; ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); ip_proto = ip->protocol; src_ip = ip->saddr; dst_ip = ip->daddr; src_port = ports[0]; dst_port = ports[1]; spin_lock_bh(&priv->filters_lock); filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto, src_port, dst_port); if (filter) { if (filter->rxq_index == rxq_index) goto out; filter->rxq_index = rxq_index; } else { filter = mlx4_en_filter_alloc(priv, rxq_index, src_ip, dst_ip, ip_proto, src_port, dst_port, flow_id); if (!filter) { ret = -ENOMEM; goto err; } } queue_work(priv->mdev->workqueue, &filter->work); out: ret = filter->id; err: spin_unlock_bh(&priv->filters_lock); return ret; } void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv) { struct mlx4_en_filter *filter, *tmp; LIST_HEAD(del_list); spin_lock_bh(&priv->filters_lock); list_for_each_entry_safe(filter, tmp, &priv->filters, next) { list_move(&filter->next, &del_list); hlist_del(&filter->filter_chain); } spin_unlock_bh(&priv->filters_lock); list_for_each_entry_safe(filter, tmp, &del_list, next) { cancel_work_sync(&filter->work); mlx4_en_filter_free(filter); } } static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv) { struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL; LIST_HEAD(del_list); int i = 0; spin_lock_bh(&priv->filters_lock); list_for_each_entry_safe(filter, tmp, &priv->filters, next) { if (i > MLX4_EN_FILTER_EXPIRY_QUOTA) break; if (filter->activated && !work_pending(&filter->work) && rps_may_expire_flow(priv->dev, filter->rxq_index, filter->flow_id, filter->id)) { list_move(&filter->next, &del_list); hlist_del(&filter->filter_chain); } else last_filter = filter; i++; } if (last_filter && (&last_filter->next != priv->filters.next)) list_move(&priv->filters, &last_filter->next); spin_unlock_bh(&priv->filters_lock); list_for_each_entry_safe(filter, tmp, &del_list, next) mlx4_en_filter_free(filter); } #endif static void mlx4_en_vlan_rx_add_vid(void *arg, struct net_device *dev, u16 vid) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; int err; int idx; if (arg != priv) return; en_dbg(HW, priv, "adding VLAN:%d\n", vid); set_bit(vid, priv->active_vlans); /* Add VID to port VLAN filter */ mutex_lock(&mdev->state_lock); if (mdev->device_up && priv->port_up) { err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); if (err) en_err(priv, "Failed configuring VLAN filter\n"); } if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx)) en_dbg(HW, priv, "failed adding vlan %d\n", vid); mutex_unlock(&mdev->state_lock); } static void mlx4_en_vlan_rx_kill_vid(void *arg, struct net_device *dev, u16 vid) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; int err; if (arg != priv) return; en_dbg(HW, priv, "Killing VID:%d\n", vid); clear_bit(vid, priv->active_vlans); /* Remove VID from port VLAN filter */ mutex_lock(&mdev->state_lock); mlx4_unregister_vlan(mdev->dev, priv->port, vid); if (mdev->device_up && priv->port_up) { err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); if (err) en_err(priv, "Failed configuring VLAN filter\n"); } mutex_unlock(&mdev->state_lock); } static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr, int qpn, u64 *reg_id) { int err; if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN || priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) return 0; /* do nothing */ err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn, MLX4_DOMAIN_NIC, reg_id); if (err) { en_err(priv, "failed to add vxlan steering rule, err %d\n", err); return err; } en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, (long long)*reg_id); return 0; } static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv, unsigned char *mac, int *qpn, u64 *reg_id) { struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_dev *dev = mdev->dev; int err; switch (dev->caps.steering_mode) { case MLX4_STEERING_MODE_B0: { struct mlx4_qp qp; u8 gid[16] = {0}; qp.qpn = *qpn; memcpy(&gid[10], mac, ETH_ALEN); gid[5] = priv->port; err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH); break; } case MLX4_STEERING_MODE_DEVICE_MANAGED: { struct mlx4_spec_list spec_eth = { {NULL} }; __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); struct mlx4_net_trans_rule rule = { .queue_mode = MLX4_NET_TRANS_Q_FIFO, .exclusive = 0, .allow_loopback = 1, .promisc_mode = MLX4_FS_REGULAR, .priority = MLX4_DOMAIN_NIC, }; rule.port = priv->port; rule.qpn = *qpn; INIT_LIST_HEAD(&rule.list); spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH; memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN); memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); list_add_tail(&spec_eth.list, &rule.list); err = mlx4_flow_attach(dev, &rule, reg_id); break; } default: return -EINVAL; } if (err) en_warn(priv, "Failed Attaching Unicast\n"); return err; } static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv, unsigned char *mac, int qpn, u64 reg_id) { struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_dev *dev = mdev->dev; switch (dev->caps.steering_mode) { case MLX4_STEERING_MODE_B0: { struct mlx4_qp qp; u8 gid[16] = {0}; qp.qpn = qpn; memcpy(&gid[10], mac, ETH_ALEN); gid[5] = priv->port; mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH); break; } case MLX4_STEERING_MODE_DEVICE_MANAGED: { mlx4_flow_detach(dev, reg_id); break; } default: en_err(priv, "Invalid steering mode.\n"); } } static int mlx4_en_get_qp(struct mlx4_en_priv *priv) { struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_dev *dev = mdev->dev; int index = 0; int err = 0; int *qpn = &priv->base_qpn; u64 mac = mlx4_mac_to_u64(IF_LLADDR(priv->dev)); en_dbg(DRV, priv, "Registering MAC: %pM for adding\n", IF_LLADDR(priv->dev)); index = mlx4_register_mac(dev, priv->port, mac); if (index < 0) { err = index; en_err(priv, "Failed adding MAC: %pM\n", IF_LLADDR(priv->dev)); return err; } if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { int base_qpn = mlx4_get_base_qpn(dev, priv->port); *qpn = base_qpn + index; return 0; } err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP); en_dbg(DRV, priv, "Reserved qp %d\n", *qpn); if (err) { en_err(priv, "Failed to reserve qp for mac registration\n"); mlx4_unregister_mac(dev, priv->port, mac); return err; } return 0; } static void mlx4_en_put_qp(struct mlx4_en_priv *priv) { struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_dev *dev = mdev->dev; int qpn = priv->base_qpn; if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { u64 mac = mlx4_mac_to_u64(IF_LLADDR(priv->dev)); en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", IF_LLADDR(priv->dev)); mlx4_unregister_mac(dev, priv->port, mac); } else { en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n", priv->port, qpn); mlx4_qp_release_range(dev, qpn, 1); priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC; } } static void mlx4_en_clear_uclist(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_addr_list *tmp, *uc_to_del; list_for_each_entry_safe(uc_to_del, tmp, &priv->uc_list, list) { list_del(&uc_to_del->list); kfree(uc_to_del); } } static void mlx4_en_cache_uclist(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_addr_list *tmp; struct ifaddr *ifa; mlx4_en_clear_uclist(dev); if_addr_rlock(dev); CK_STAILQ_FOREACH(ifa, &dev->if_addrhead, ifa_link) { if (ifa->ifa_addr->sa_family != AF_LINK) continue; if (((struct sockaddr_dl *)ifa->ifa_addr)->sdl_alen != ETHER_ADDR_LEN) continue; tmp = kzalloc(sizeof(struct mlx4_en_addr_list), GFP_ATOMIC); if (tmp == NULL) { en_err(priv, "Failed to allocate address list\n"); break; } memcpy(tmp->addr, LLADDR((struct sockaddr_dl *)ifa->ifa_addr), ETH_ALEN); list_add_tail(&tmp->list, &priv->uc_list); } if_addr_runlock(dev); } static void mlx4_en_clear_mclist(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_addr_list *tmp, *mc_to_del; list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) { list_del(&mc_to_del->list); kfree(mc_to_del); } } static void mlx4_en_cache_mclist(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_addr_list *tmp; struct ifmultiaddr *ifma; mlx4_en_clear_mclist(dev); if_maddr_rlock(dev); CK_STAILQ_FOREACH(ifma, &dev->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (((struct sockaddr_dl *)ifma->ifma_addr)->sdl_alen != ETHER_ADDR_LEN) continue; tmp = kzalloc(sizeof(struct mlx4_en_addr_list), GFP_ATOMIC); if (tmp == NULL) { en_err(priv, "Failed to allocate address list\n"); break; } memcpy(tmp->addr, LLADDR((struct sockaddr_dl *)ifma->ifma_addr), ETH_ALEN); list_add_tail(&tmp->list, &priv->mc_list); } if_maddr_runlock(dev); } static void update_addr_list_flags(struct mlx4_en_priv *priv, struct list_head *dst, struct list_head *src) { struct mlx4_en_addr_list *dst_tmp, *src_tmp, *new_mc; bool found; /* Find all the entries that should be removed from dst, * These are the entries that are not found in src */ list_for_each_entry(dst_tmp, dst, list) { found = false; list_for_each_entry(src_tmp, src, list) { if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) { found = true; break; } } if (!found) dst_tmp->action = MLX4_ADDR_LIST_REM; } /* Add entries that exist in src but not in dst * mark them as need to add */ list_for_each_entry(src_tmp, src, list) { found = false; list_for_each_entry(dst_tmp, dst, list) { if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) { dst_tmp->action = MLX4_ADDR_LIST_NONE; found = true; break; } } if (!found) { new_mc = kmalloc(sizeof(struct mlx4_en_addr_list), GFP_KERNEL); if (!new_mc) { en_err(priv, "Failed to allocate current multicast list\n"); return; } memcpy(new_mc, src_tmp, sizeof(struct mlx4_en_addr_list)); new_mc->action = MLX4_ADDR_LIST_ADD; list_add_tail(&new_mc->list, dst); } } } static void mlx4_en_set_rx_mode(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); if (!priv->port_up) return; queue_work(priv->mdev->workqueue, &priv->rx_mode_task); } static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv, struct mlx4_en_dev *mdev) { int err = 0; if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) { priv->flags |= MLX4_EN_FLAG_PROMISC; /* Enable promiscouos mode */ switch (mdev->dev->caps.steering_mode) { case MLX4_STEERING_MODE_DEVICE_MANAGED: err = mlx4_flow_steer_promisc_add(mdev->dev, priv->port, priv->base_qpn, MLX4_FS_ALL_DEFAULT); if (err) en_err(priv, "Failed enabling promiscuous mode\n"); priv->flags |= MLX4_EN_FLAG_MC_PROMISC; break; case MLX4_STEERING_MODE_B0: err = mlx4_unicast_promisc_add(mdev->dev, priv->base_qpn, priv->port); if (err) en_err(priv, "Failed enabling unicast promiscuous mode\n"); /* Add the default qp number as multicast * promisc */ if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn, priv->port); if (err) en_err(priv, "Failed enabling multicast promiscuous mode\n"); priv->flags |= MLX4_EN_FLAG_MC_PROMISC; } break; case MLX4_STEERING_MODE_A0: err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 1); if (err) en_err(priv, "Failed enabling promiscuous mode\n"); break; } /* Disable port multicast filter (unconditionally) */ err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 0, MLX4_MCAST_DISABLE); if (err) en_err(priv, "Failed disabling multicast filter\n"); } } static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv, struct mlx4_en_dev *mdev) { int err = 0; priv->flags &= ~MLX4_EN_FLAG_PROMISC; /* Disable promiscouos mode */ switch (mdev->dev->caps.steering_mode) { case MLX4_STEERING_MODE_DEVICE_MANAGED: err = mlx4_flow_steer_promisc_remove(mdev->dev, priv->port, MLX4_FS_ALL_DEFAULT); if (err) en_err(priv, "Failed disabling promiscuous mode\n"); priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; break; case MLX4_STEERING_MODE_B0: err = mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn, priv->port); if (err) en_err(priv, "Failed disabling unicast promiscuous mode\n"); /* Disable Multicast promisc */ if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, priv->port); if (err) en_err(priv, "Failed disabling multicast promiscuous mode\n"); priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; } break; case MLX4_STEERING_MODE_A0: err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); if (err) en_err(priv, "Failed disabling promiscuous mode\n"); break; } } static void mlx4_en_do_multicast(struct mlx4_en_priv *priv, struct net_device *dev, struct mlx4_en_dev *mdev) { struct mlx4_en_addr_list *addr_list, *tmp; u8 mc_list[16] = {0}; int err = 0; u64 mcast_addr = 0; /* Enable/disable the multicast filter according to IFF_ALLMULTI */ if (dev->if_flags & IFF_ALLMULTI) { err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 0, MLX4_MCAST_DISABLE); if (err) en_err(priv, "Failed disabling multicast filter\n"); /* Add the default qp number as multicast promisc */ if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { switch (mdev->dev->caps.steering_mode) { case MLX4_STEERING_MODE_DEVICE_MANAGED: err = mlx4_flow_steer_promisc_add(mdev->dev, priv->port, priv->base_qpn, MLX4_FS_MC_DEFAULT); break; case MLX4_STEERING_MODE_B0: err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn, priv->port); break; case MLX4_STEERING_MODE_A0: break; } if (err) en_err(priv, "Failed entering multicast promisc mode\n"); priv->flags |= MLX4_EN_FLAG_MC_PROMISC; } } else { /* Disable Multicast promisc */ if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { switch (mdev->dev->caps.steering_mode) { case MLX4_STEERING_MODE_DEVICE_MANAGED: err = mlx4_flow_steer_promisc_remove(mdev->dev, priv->port, MLX4_FS_MC_DEFAULT); break; case MLX4_STEERING_MODE_B0: err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, priv->port); break; case MLX4_STEERING_MODE_A0: break; } if (err) en_err(priv, "Failed disabling multicast promiscuous mode\n"); priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; } err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 0, MLX4_MCAST_DISABLE); if (err) en_err(priv, "Failed disabling multicast filter\n"); /* Flush mcast filter and init it with broadcast address */ mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, 1, MLX4_MCAST_CONFIG); /* Update multicast list - we cache all addresses so they won't * change while HW is updated holding the command semaphor */ mlx4_en_cache_mclist(dev); list_for_each_entry(addr_list, &priv->mc_list, list) { mcast_addr = mlx4_mac_to_u64(addr_list->addr); mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, mcast_addr, 0, MLX4_MCAST_CONFIG); } err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 0, MLX4_MCAST_ENABLE); if (err) en_err(priv, "Failed enabling multicast filter\n"); update_addr_list_flags(priv, &priv->curr_mc_list, &priv->mc_list); list_for_each_entry_safe(addr_list, tmp, &priv->curr_mc_list, list) { if (addr_list->action == MLX4_ADDR_LIST_REM) { /* detach this address and delete from list */ memcpy(&mc_list[10], addr_list->addr, ETH_ALEN); mc_list[5] = priv->port; err = mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, MLX4_PROT_ETH, addr_list->reg_id); if (err) en_err(priv, "Fail to detach multicast address\n"); if (addr_list->tunnel_reg_id) { err = mlx4_flow_detach(priv->mdev->dev, addr_list->tunnel_reg_id); if (err) en_err(priv, "Failed to detach multicast address\n"); } /* remove from list */ list_del(&addr_list->list); kfree(addr_list); } else if (addr_list->action == MLX4_ADDR_LIST_ADD) { /* attach the address */ memcpy(&mc_list[10], addr_list->addr, ETH_ALEN); /* needed for B0 steering support */ mc_list[5] = priv->port; err = mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list, priv->port, 0, MLX4_PROT_ETH, &addr_list->reg_id); if (err) en_err(priv, "Fail to attach multicast address\n"); err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn, &addr_list->tunnel_reg_id); if (err) en_err(priv, "Failed to attach multicast address\n"); } } } } static void mlx4_en_do_unicast(struct mlx4_en_priv *priv, struct net_device *dev, struct mlx4_en_dev *mdev) { struct mlx4_en_addr_list *addr_list, *tmp; int err; /* Update unicast list */ mlx4_en_cache_uclist(dev); update_addr_list_flags(priv, &priv->curr_uc_list, &priv->uc_list); list_for_each_entry_safe(addr_list, tmp, &priv->curr_uc_list, list) { if (addr_list->action == MLX4_ADDR_LIST_REM) { mlx4_en_uc_steer_release(priv, addr_list->addr, priv->rss_map.indir_qp.qpn, addr_list->reg_id); /* remove from list */ list_del(&addr_list->list); kfree(addr_list); } else if (addr_list->action == MLX4_ADDR_LIST_ADD) { err = mlx4_en_uc_steer_add(priv, addr_list->addr, &priv->rss_map.indir_qp.qpn, &addr_list->reg_id); if (err) en_err(priv, "Fail to add unicast address\n"); } } } static void mlx4_en_do_set_rx_mode(struct work_struct *work) { struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, rx_mode_task); struct mlx4_en_dev *mdev = priv->mdev; struct net_device *dev = priv->dev; mutex_lock(&mdev->state_lock); if (!mdev->device_up) { en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n"); goto out; } if (!priv->port_up) { en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n"); goto out; } if (!mlx4_en_QUERY_PORT(mdev, priv->port)) { if (priv->port_state.link_state) { priv->last_link_state = MLX4_DEV_EVENT_PORT_UP; /* update netif baudrate */ priv->dev->if_baudrate = IF_Mbps(priv->port_state.link_speed); /* Important note: the following call for if_link_state_change * is needed for interface up scenario (start port, link state * change) */ if_link_state_change(priv->dev, LINK_STATE_UP); en_dbg(HW, priv, "Link Up\n"); } } /* Set unicast rules */ mlx4_en_do_unicast(priv, dev, mdev); /* Promsicuous mode: disable all filters */ if ((dev->if_flags & IFF_PROMISC) || (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) { mlx4_en_set_promisc_mode(priv, mdev); } else if (priv->flags & MLX4_EN_FLAG_PROMISC) { /* Not in promiscuous mode */ mlx4_en_clear_promisc_mode(priv, mdev); } /* Set multicast rules */ mlx4_en_do_multicast(priv, dev, mdev); out: mutex_unlock(&mdev->state_lock); } static void mlx4_en_watchdog_timeout(void *arg) { struct mlx4_en_priv *priv = arg; struct mlx4_en_dev *mdev = priv->mdev; en_dbg(DRV, priv, "Scheduling watchdog\n"); queue_work(mdev->workqueue, &priv->watchdog_task); if (priv->port_up) callout_reset(&priv->watchdog_timer, MLX4_EN_WATCHDOG_TIMEOUT, mlx4_en_watchdog_timeout, priv); } static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) { struct mlx4_en_cq *cq; int i; /* If we haven't received a specific coalescing setting * (module param), we set the moderation parameters as follows: * - moder_cnt is set to the number of mtu sized packets to * satisfy our coalescing target. * - moder_time is set to a fixed value. */ priv->rx_frames = MLX4_EN_RX_COAL_TARGET; priv->rx_usecs = MLX4_EN_RX_COAL_TIME; priv->tx_frames = MLX4_EN_TX_COAL_PKTS; priv->tx_usecs = MLX4_EN_TX_COAL_TIME; en_dbg(INTR, priv, "Default coalesing params for mtu: %u - " "rx_frames:%d rx_usecs:%d\n", (unsigned)priv->dev->if_mtu, priv->rx_frames, priv->rx_usecs); /* Setup cq moderation params */ for (i = 0; i < priv->rx_ring_num; i++) { cq = priv->rx_cq[i]; cq->moder_cnt = priv->rx_frames; cq->moder_time = priv->rx_usecs; priv->last_moder_time[i] = MLX4_EN_AUTO_CONF; priv->last_moder_packets[i] = 0; priv->last_moder_bytes[i] = 0; } for (i = 0; i < priv->tx_ring_num; i++) { cq = priv->tx_cq[i]; cq->moder_cnt = priv->tx_frames; cq->moder_time = priv->tx_usecs; } /* Reset auto-moderation params */ priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW; priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW; priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH; priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH; priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL; priv->adaptive_rx_coal = 1; priv->last_moder_jiffies = 0; priv->last_moder_tx_packets = 0; } static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) { unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies); struct mlx4_en_cq *cq; unsigned long packets; unsigned long rate; unsigned long avg_pkt_size; unsigned long rx_packets; unsigned long rx_bytes; unsigned long rx_pkt_diff; int moder_time; int ring, err; if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ) return; for (ring = 0; ring < priv->rx_ring_num; ring++) { spin_lock(&priv->stats_lock); rx_packets = priv->rx_ring[ring]->packets; rx_bytes = priv->rx_ring[ring]->bytes; spin_unlock(&priv->stats_lock); rx_pkt_diff = ((unsigned long) (rx_packets - priv->last_moder_packets[ring])); packets = rx_pkt_diff; rate = packets * HZ / period; avg_pkt_size = packets ? ((unsigned long) (rx_bytes - priv->last_moder_bytes[ring])) / packets : 0; /* Apply auto-moderation only when packet rate * exceeds a rate that it matters */ if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) && avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) { if (rate < priv->pkt_rate_low) moder_time = priv->rx_usecs_low; else if (rate > priv->pkt_rate_high) moder_time = priv->rx_usecs_high; else moder_time = (rate - priv->pkt_rate_low) * (priv->rx_usecs_high - priv->rx_usecs_low) / (priv->pkt_rate_high - priv->pkt_rate_low) + priv->rx_usecs_low; } else { moder_time = priv->rx_usecs_low; } if (moder_time != priv->last_moder_time[ring]) { priv->last_moder_time[ring] = moder_time; cq = priv->rx_cq[ring]; cq->moder_time = moder_time; cq->moder_cnt = priv->rx_frames; err = mlx4_en_set_cq_moder(priv, cq); if (err) en_err(priv, "Failed modifying moderation for cq:%d\n", ring); } priv->last_moder_packets[ring] = rx_packets; priv->last_moder_bytes[ring] = rx_bytes; } priv->last_moder_jiffies = jiffies; } static void mlx4_en_do_get_stats(struct work_struct *work) { struct delayed_work *delay = to_delayed_work(work); struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, stats_task); struct mlx4_en_dev *mdev = priv->mdev; int err; mutex_lock(&mdev->state_lock); if (mdev->device_up) { if (priv->port_up) { if (mlx4_is_slave(mdev->dev)) err = mlx4_en_get_vport_stats(mdev, priv->port); else err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); if (err) en_dbg(HW, priv, "Could not update stats\n"); mlx4_en_auto_moderation(priv); } queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); } mutex_unlock(&mdev->state_lock); } /* mlx4_en_service_task - Run service task for tasks that needed to be done * periodically */ static void mlx4_en_service_task(struct work_struct *work) { struct delayed_work *delay = to_delayed_work(work); struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, service_task); struct mlx4_en_dev *mdev = priv->mdev; mutex_lock(&mdev->state_lock); if (mdev->device_up) { queue_delayed_work(mdev->workqueue, &priv->service_task, SERVICE_TASK_DELAY); } mutex_unlock(&mdev->state_lock); } static void mlx4_en_linkstate(struct work_struct *work) { struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, linkstate_task); struct mlx4_en_dev *mdev = priv->mdev; int linkstate = priv->link_state; mutex_lock(&mdev->state_lock); /* If observable port state changed set carrier state and * report to system log */ if (priv->last_link_state != linkstate) { if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) { en_info(priv, "Link Down\n"); if_link_state_change(priv->dev, LINK_STATE_DOWN); /* update netif baudrate */ priv->dev->if_baudrate = 0; /* make sure the port is up before notifying the OS. * This is tricky since we get here on INIT_PORT and * in such case we can't tell the OS the port is up. * To solve this there is a call to if_link_state_change * in set_rx_mode. * */ } else if (priv->port_up && (linkstate == MLX4_DEV_EVENT_PORT_UP)){ if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) en_info(priv, "Query port failed\n"); priv->dev->if_baudrate = IF_Mbps(priv->port_state.link_speed); en_info(priv, "Link Up\n"); if_link_state_change(priv->dev, LINK_STATE_UP); } } priv->last_link_state = linkstate; mutex_unlock(&mdev->state_lock); } int mlx4_en_start_port(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_cq *cq; struct mlx4_en_tx_ring *tx_ring; int rx_index = 0; int tx_index = 0; int err = 0; int i; int j; u8 mc_list[16] = {0}; if (priv->port_up) { en_dbg(DRV, priv, "start port called while port already up\n"); return 0; } INIT_LIST_HEAD(&priv->mc_list); INIT_LIST_HEAD(&priv->uc_list); INIT_LIST_HEAD(&priv->curr_mc_list); INIT_LIST_HEAD(&priv->curr_uc_list); INIT_LIST_HEAD(&priv->ethtool_list); /* Calculate Rx buf size */ dev->if_mtu = min(dev->if_mtu, priv->max_mtu); mlx4_en_calc_rx_buf(dev); en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_mb_size); /* Configure rx cq's and rings */ err = mlx4_en_activate_rx_rings(priv); if (err) { en_err(priv, "Failed to activate RX rings\n"); return err; } for (i = 0; i < priv->rx_ring_num; i++) { cq = priv->rx_cq[i]; mlx4_en_cq_init_lock(cq); err = mlx4_en_activate_cq(priv, cq, i); if (err) { en_err(priv, "Failed activating Rx CQ\n"); goto cq_err; } for (j = 0; j < cq->size; j++) cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK; err = mlx4_en_set_cq_moder(priv, cq); if (err) { en_err(priv, "Failed setting cq moderation parameters"); mlx4_en_deactivate_cq(priv, cq); goto cq_err; } mlx4_en_arm_cq(priv, cq); priv->rx_ring[i]->cqn = cq->mcq.cqn; ++rx_index; } /* Set qp number */ en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port); err = mlx4_en_get_qp(priv); if (err) { en_err(priv, "Failed getting eth qp\n"); goto cq_err; } mdev->mac_removed[priv->port] = 0; priv->counter_index = mlx4_get_default_counter_index(mdev->dev, priv->port); err = mlx4_en_config_rss_steer(priv); if (err) { en_err(priv, "Failed configuring rss steering\n"); goto mac_err; } err = mlx4_en_create_drop_qp(priv); if (err) goto rss_err; /* Configure tx cq's and rings */ for (i = 0; i < priv->tx_ring_num; i++) { /* Configure cq */ cq = priv->tx_cq[i]; err = mlx4_en_activate_cq(priv, cq, i); if (err) { en_err(priv, "Failed activating Tx CQ\n"); goto tx_err; } err = mlx4_en_set_cq_moder(priv, cq); if (err) { en_err(priv, "Failed setting cq moderation parameters"); mlx4_en_deactivate_cq(priv, cq); goto tx_err; } en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i); cq->buf->wqe_index = cpu_to_be16(0xffff); /* Configure ring */ tx_ring = priv->tx_ring[i]; err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, i / priv->num_tx_rings_p_up); if (err) { en_err(priv, "Failed activating Tx ring %d\n", i); mlx4_en_deactivate_cq(priv, cq); goto tx_err; } /* Arm CQ for TX completions */ mlx4_en_arm_cq(priv, cq); /* Set initial ownership of all Tx TXBBs to SW (1) */ for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE) *((u32 *) (tx_ring->buf + j)) = INIT_OWNER_BIT; ++tx_index; } /* Configure port */ err = mlx4_SET_PORT_general(mdev->dev, priv->port, priv->rx_mb_size, priv->prof->tx_pause, priv->prof->tx_ppp, priv->prof->rx_pause, priv->prof->rx_ppp); if (err) { en_err(priv, "Failed setting port general configurations for port %d, with error %d\n", priv->port, err); goto tx_err; } /* Set default qp number */ err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); if (err) { en_err(priv, "Failed setting default qp numbers\n"); goto tx_err; } /* Init port */ en_dbg(HW, priv, "Initializing port\n"); err = mlx4_INIT_PORT(mdev->dev, priv->port); if (err) { en_err(priv, "Failed Initializing port\n"); goto tx_err; } /* Attach rx QP to bradcast address */ memset(&mc_list[10], 0xff, ETH_ALEN); mc_list[5] = priv->port; /* needed for B0 steering support */ if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list, priv->port, 0, MLX4_PROT_ETH, &priv->broadcast_id)) mlx4_warn(mdev, "Failed Attaching Broadcast\n"); /* Must redo promiscuous mode setup. */ priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC); /* Schedule multicast task to populate multicast list */ queue_work(mdev->workqueue, &priv->rx_mode_task); priv->port_up = true; /* Enable the queues. */ dev->if_drv_flags &= ~IFF_DRV_OACTIVE; dev->if_drv_flags |= IFF_DRV_RUNNING; #ifdef CONFIG_DEBUG_FS mlx4_en_create_debug_files(priv); #endif callout_reset(&priv->watchdog_timer, MLX4_EN_WATCHDOG_TIMEOUT, mlx4_en_watchdog_timeout, priv); return 0; tx_err: while (tx_index--) { mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]); mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]); } mlx4_en_destroy_drop_qp(priv); rss_err: mlx4_en_release_rss_steer(priv); mac_err: mlx4_en_put_qp(priv); cq_err: while (rx_index--) mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]); for (i = 0; i < priv->rx_ring_num; i++) mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); return err; /* need to close devices */ } void mlx4_en_stop_port(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_addr_list *addr_list, *tmp; int i; u8 mc_list[16] = {0}; if (!priv->port_up) { en_dbg(DRV, priv, "stop port called while port already down\n"); return; } #ifdef CONFIG_DEBUG_FS mlx4_en_delete_debug_files(priv); #endif /* close port*/ mlx4_CLOSE_PORT(mdev->dev, priv->port); /* Set port as not active */ priv->port_up = false; priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev); /* Promsicuous mode */ if (mdev->dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC); mlx4_flow_steer_promisc_remove(mdev->dev, priv->port, MLX4_FS_ALL_DEFAULT); mlx4_flow_steer_promisc_remove(mdev->dev, priv->port, MLX4_FS_MC_DEFAULT); } else if (priv->flags & MLX4_EN_FLAG_PROMISC) { priv->flags &= ~MLX4_EN_FLAG_PROMISC; /* Disable promiscouos mode */ mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn, priv->port); /* Disable Multicast promisc */ if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, priv->port); priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; } } /* Detach All unicasts */ list_for_each_entry(addr_list, &priv->curr_uc_list, list) { mlx4_en_uc_steer_release(priv, addr_list->addr, priv->rss_map.indir_qp.qpn, addr_list->reg_id); } mlx4_en_clear_uclist(dev); list_for_each_entry_safe(addr_list, tmp, &priv->curr_uc_list, list) { list_del(&addr_list->list); kfree(addr_list); } /* Detach All multicasts */ memset(&mc_list[10], 0xff, ETH_ALEN); mc_list[5] = priv->port; /* needed for B0 steering support */ mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, MLX4_PROT_ETH, priv->broadcast_id); list_for_each_entry(addr_list, &priv->curr_mc_list, list) { memcpy(&mc_list[10], addr_list->addr, ETH_ALEN); mc_list[5] = priv->port; mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, MLX4_PROT_ETH, addr_list->reg_id); } mlx4_en_clear_mclist(dev); list_for_each_entry_safe(addr_list, tmp, &priv->curr_mc_list, list) { list_del(&addr_list->list); kfree(addr_list); } /* Flush multicast filter */ mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); mlx4_en_destroy_drop_qp(priv); /* Free TX Rings */ for (i = 0; i < priv->tx_ring_num; i++) { mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]); mlx4_en_deactivate_cq(priv, priv->tx_cq[i]); } msleep(10); for (i = 0; i < priv->tx_ring_num; i++) mlx4_en_free_tx_buf(dev, priv->tx_ring[i]); /* Free RSS qps */ mlx4_en_release_rss_steer(priv); /* Unregister Mac address for the port */ mlx4_en_put_qp(priv); mdev->mac_removed[priv->port] = 1; /* Free RX Rings */ for (i = 0; i < priv->rx_ring_num; i++) { struct mlx4_en_cq *cq = priv->rx_cq[i]; mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); mlx4_en_deactivate_cq(priv, cq); } callout_stop(&priv->watchdog_timer); dev->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); } static void mlx4_en_restart(struct work_struct *work) { struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, watchdog_task); struct mlx4_en_dev *mdev = priv->mdev; struct net_device *dev = priv->dev; struct mlx4_en_tx_ring *ring; int i; if (priv->blocked == 0 || priv->port_up == 0) return; for (i = 0; i < priv->tx_ring_num; i++) { int watchdog_time; ring = priv->tx_ring[i]; watchdog_time = READ_ONCE(ring->watchdog_time); if (watchdog_time != 0 && time_after(ticks, ring->watchdog_time)) goto reset; } return; reset: priv->port_stats.tx_timeout++; en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); mutex_lock(&mdev->state_lock); if (priv->port_up) { mlx4_en_stop_port(dev); //for (i = 0; i < priv->tx_ring_num; i++) // netdev_tx_reset_queue(priv->tx_ring[i]->tx_queue); if (mlx4_en_start_port(dev)) en_err(priv, "Failed restarting port %d\n", priv->port); } mutex_unlock(&mdev->state_lock); } static void mlx4_en_clear_stats(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; int i; if (!mlx4_is_slave(mdev->dev)) if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) en_dbg(HW, priv, "Failed dumping statistics\n"); memset(&priv->pstats, 0, sizeof(priv->pstats)); memset(&priv->pkstats, 0, sizeof(priv->pkstats)); memset(&priv->port_stats, 0, sizeof(priv->port_stats)); memset(&priv->vport_stats, 0, sizeof(priv->vport_stats)); for (i = 0; i < priv->tx_ring_num; i++) { priv->tx_ring[i]->bytes = 0; priv->tx_ring[i]->packets = 0; priv->tx_ring[i]->tx_csum = 0; priv->tx_ring[i]->oversized_packets = 0; } for (i = 0; i < priv->rx_ring_num; i++) { priv->rx_ring[i]->bytes = 0; priv->rx_ring[i]->packets = 0; priv->rx_ring[i]->csum_ok = 0; priv->rx_ring[i]->csum_none = 0; } } static void mlx4_en_open(void* arg) { struct mlx4_en_priv *priv; struct mlx4_en_dev *mdev; struct net_device *dev; int err = 0; priv = arg; mdev = priv->mdev; dev = priv->dev; mutex_lock(&mdev->state_lock); if (!mdev->device_up) { en_err(priv, "Cannot open - device down/disabled\n"); goto out; } /* Reset HW statistics and SW counters */ mlx4_en_clear_stats(dev); err = mlx4_en_start_port(dev); if (err) en_err(priv, "Failed starting port:%d\n", priv->port); out: mutex_unlock(&mdev->state_lock); return; } void mlx4_en_free_resources(struct mlx4_en_priv *priv) { int i; #ifdef CONFIG_RFS_ACCEL if (priv->dev->rx_cpu_rmap) { free_irq_cpu_rmap(priv->dev->rx_cpu_rmap); priv->dev->rx_cpu_rmap = NULL; } #endif for (i = 0; i < priv->tx_ring_num; i++) { if (priv->tx_ring && priv->tx_ring[i]) mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); if (priv->tx_cq && priv->tx_cq[i]) mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); } for (i = 0; i < priv->rx_ring_num; i++) { if (priv->rx_ring[i]) mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], priv->prof->rx_ring_size); if (priv->rx_cq[i]) mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); } if (priv->stat_sysctl != NULL) sysctl_ctx_free(&priv->stat_ctx); } int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) { struct mlx4_en_port_profile *prof = priv->prof; int i; int node = 0; /* Create rx Rings */ for (i = 0; i < priv->rx_ring_num; i++) { if (mlx4_en_create_cq(priv, &priv->rx_cq[i], prof->rx_ring_size, i, RX, node)) goto err; if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i], prof->rx_ring_size, node)) goto err; } /* Create tx Rings */ for (i = 0; i < priv->tx_ring_num; i++) { if (mlx4_en_create_cq(priv, &priv->tx_cq[i], prof->tx_ring_size, i, TX, node)) goto err; if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], prof->tx_ring_size, TXBB_SIZE, node, i)) goto err; } #ifdef CONFIG_RFS_ACCEL priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->rx_ring_num); if (!priv->dev->rx_cpu_rmap) goto err; #endif /* Re-create stat sysctls in case the number of rings changed. */ mlx4_en_sysctl_stat(priv); return 0; err: en_err(priv, "Failed to allocate NIC resources\n"); for (i = 0; i < priv->rx_ring_num; i++) { if (priv->rx_ring[i]) mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], prof->rx_ring_size); if (priv->rx_cq[i]) mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); } for (i = 0; i < priv->tx_ring_num; i++) { if (priv->tx_ring[i]) mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); if (priv->tx_cq[i]) mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); } priv->port_up = false; return -ENOMEM; } struct en_port_attribute { struct attribute attr; ssize_t (*show)(struct en_port *, struct en_port_attribute *, char *buf); ssize_t (*store)(struct en_port *, struct en_port_attribute *, char *buf, size_t count); }; #define PORT_ATTR_RO(_name) \ struct en_port_attribute en_port_attr_##_name = __ATTR_RO(_name) #define EN_PORT_ATTR(_name, _mode, _show, _store) \ struct en_port_attribute en_port_attr_##_name = __ATTR(_name, _mode, _show, _store) void mlx4_en_destroy_netdev(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); /* don't allow more IOCTLs */ priv->gone = 1; /* XXX wait a bit to allow IOCTL handlers to complete */ pause("W", hz); if (priv->vlan_attach != NULL) EVENTHANDLER_DEREGISTER(vlan_config, priv->vlan_attach); if (priv->vlan_detach != NULL) EVENTHANDLER_DEREGISTER(vlan_unconfig, priv->vlan_detach); mutex_lock(&mdev->state_lock); mlx4_en_stop_port(dev); mutex_unlock(&mdev->state_lock); /* Unregister device - this will close the port if it was up */ if (priv->registered) ether_ifdetach(dev); if (priv->allocated) mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE); cancel_delayed_work(&priv->stats_task); cancel_delayed_work(&priv->service_task); /* flush any pending task for this netdev */ flush_workqueue(mdev->workqueue); callout_drain(&priv->watchdog_timer); /* Detach the netdev so tasks would not attempt to access it */ mutex_lock(&mdev->state_lock); mdev->pndev[priv->port] = NULL; mutex_unlock(&mdev->state_lock); mlx4_en_free_resources(priv); /* freeing the sysctl conf cannot be called from within mlx4_en_free_resources */ if (priv->conf_sysctl != NULL) sysctl_ctx_free(&priv->conf_ctx); kfree(priv->tx_ring); kfree(priv->tx_cq); kfree(priv); if_free(dev); } static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; int err = 0; en_dbg(DRV, priv, "Change MTU called - current:%u new:%u\n", (unsigned)dev->if_mtu, (unsigned)new_mtu); if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) { en_err(priv, "Bad MTU size:%d, max %u.\n", new_mtu, priv->max_mtu); return -EPERM; } mutex_lock(&mdev->state_lock); dev->if_mtu = new_mtu; if (dev->if_drv_flags & IFF_DRV_RUNNING) { if (!mdev->device_up) { /* NIC is probably restarting - let watchdog task reset * * the port */ en_dbg(DRV, priv, "Change MTU called with card down!?\n"); } else { mlx4_en_stop_port(dev); err = mlx4_en_start_port(dev); if (err) { en_err(priv, "Failed restarting port:%d\n", priv->port); queue_work(mdev->workqueue, &priv->watchdog_task); } } } mutex_unlock(&mdev->state_lock); return 0; } static int mlx4_en_calc_media(struct mlx4_en_priv *priv) { int trans_type; int active; active = IFM_ETHER; if (priv->last_link_state == MLX4_DEV_EVENT_PORT_DOWN) return (active); active |= IFM_FDX; trans_type = priv->port_state.transceiver; /* XXX I don't know all of the transceiver values. */ switch (priv->port_state.link_speed) { case 100: active |= IFM_100_T; break; case 1000: active |= IFM_1000_T; break; case 10000: if (trans_type > 0 && trans_type <= 0xC) active |= IFM_10G_SR; else if (trans_type == 0x80 || trans_type == 0) active |= IFM_10G_CX4; break; case 40000: active |= IFM_40G_CR4; break; } if (priv->prof->tx_pause) active |= IFM_ETH_TXPAUSE; if (priv->prof->rx_pause) active |= IFM_ETH_RXPAUSE; return (active); } static void mlx4_en_media_status(struct ifnet *dev, struct ifmediareq *ifmr) { struct mlx4_en_priv *priv; priv = dev->if_softc; ifmr->ifm_status = IFM_AVALID; if (priv->last_link_state != MLX4_DEV_EVENT_PORT_DOWN) ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active = mlx4_en_calc_media(priv); return; } static int mlx4_en_media_change(struct ifnet *dev) { struct mlx4_en_priv *priv; struct ifmedia *ifm; int rxpause; int txpause; int error; priv = dev->if_softc; ifm = &priv->media; rxpause = txpause = 0; error = 0; if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); switch (IFM_SUBTYPE(ifm->ifm_media)) { case IFM_AUTO: break; case IFM_10G_SR: case IFM_10G_CX4: case IFM_1000_T: case IFM_40G_CR4: if ((IFM_SUBTYPE(ifm->ifm_media) == IFM_SUBTYPE(mlx4_en_calc_media(priv))) && (ifm->ifm_media & IFM_FDX)) break; /* Fallthrough */ default: printf("%s: Only auto media type\n", if_name(dev)); return (EINVAL); } /* Allow user to set/clear pause */ if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_RXPAUSE) rxpause = 1; if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_TXPAUSE) txpause = 1; if (priv->prof->tx_pause != txpause || priv->prof->rx_pause != rxpause) { priv->prof->tx_pause = txpause; priv->prof->rx_pause = rxpause; error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port, priv->rx_mb_size + ETHER_CRC_LEN, priv->prof->tx_pause, priv->prof->tx_ppp, priv->prof->rx_pause, priv->prof->rx_ppp); } return (error); } static int mlx4_en_ioctl(struct ifnet *dev, u_long command, caddr_t data) { struct mlx4_en_priv *priv; struct mlx4_en_dev *mdev; struct ifreq *ifr; int error; int mask; struct ifrsskey *ifrk; const u32 *key; struct ifrsshash *ifrh; u8 rss_mask; error = 0; mask = 0; priv = dev->if_softc; /* check if detaching */ if (priv == NULL || priv->gone != 0) return (ENXIO); mdev = priv->mdev; ifr = (struct ifreq *) data; switch (command) { case SIOCSIFMTU: error = -mlx4_en_change_mtu(dev, ifr->ifr_mtu); break; case SIOCSIFFLAGS: if (dev->if_flags & IFF_UP) { if ((dev->if_drv_flags & IFF_DRV_RUNNING) == 0) { mutex_lock(&mdev->state_lock); mlx4_en_start_port(dev); mutex_unlock(&mdev->state_lock); } else { mlx4_en_set_rx_mode(dev); } } else { mutex_lock(&mdev->state_lock); if (dev->if_drv_flags & IFF_DRV_RUNNING) { mlx4_en_stop_port(dev); if_link_state_change(dev, LINK_STATE_DOWN); } mutex_unlock(&mdev->state_lock); } break; case SIOCADDMULTI: case SIOCDELMULTI: mlx4_en_set_rx_mode(dev); break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: error = ifmedia_ioctl(dev, ifr, &priv->media, command); break; case SIOCSIFCAP: mutex_lock(&mdev->state_lock); mask = ifr->ifr_reqcap ^ dev->if_capenable; if (mask & IFCAP_TXCSUM) { dev->if_capenable ^= IFCAP_TXCSUM; dev->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); if (IFCAP_TSO4 & dev->if_capenable && !(IFCAP_TXCSUM & dev->if_capenable)) { dev->if_capenable &= ~IFCAP_TSO4; dev->if_hwassist &= ~CSUM_IP_TSO; if_printf(dev, "tso4 disabled due to -txcsum.\n"); } } if (mask & IFCAP_TXCSUM_IPV6) { dev->if_capenable ^= IFCAP_TXCSUM_IPV6; dev->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); if (IFCAP_TSO6 & dev->if_capenable && !(IFCAP_TXCSUM_IPV6 & dev->if_capenable)) { dev->if_capenable &= ~IFCAP_TSO6; dev->if_hwassist &= ~CSUM_IP6_TSO; if_printf(dev, "tso6 disabled due to -txcsum6.\n"); } } if (mask & IFCAP_RXCSUM) dev->if_capenable ^= IFCAP_RXCSUM; if (mask & IFCAP_RXCSUM_IPV6) dev->if_capenable ^= IFCAP_RXCSUM_IPV6; if (mask & IFCAP_TSO4) { if (!(IFCAP_TSO4 & dev->if_capenable) && !(IFCAP_TXCSUM & dev->if_capenable)) { if_printf(dev, "enable txcsum first.\n"); error = EAGAIN; goto out; } dev->if_capenable ^= IFCAP_TSO4; dev->if_hwassist ^= CSUM_IP_TSO; } if (mask & IFCAP_TSO6) { if (!(IFCAP_TSO6 & dev->if_capenable) && !(IFCAP_TXCSUM_IPV6 & dev->if_capenable)) { if_printf(dev, "enable txcsum6 first.\n"); error = EAGAIN; goto out; } dev->if_capenable ^= IFCAP_TSO6; dev->if_hwassist ^= CSUM_IP6_TSO; } if (mask & IFCAP_LRO) dev->if_capenable ^= IFCAP_LRO; if (mask & IFCAP_VLAN_HWTAGGING) dev->if_capenable ^= IFCAP_VLAN_HWTAGGING; if (mask & IFCAP_VLAN_HWFILTER) dev->if_capenable ^= IFCAP_VLAN_HWFILTER; if (mask & IFCAP_WOL_MAGIC) dev->if_capenable ^= IFCAP_WOL_MAGIC; if (dev->if_drv_flags & IFF_DRV_RUNNING) mlx4_en_start_port(dev); out: mutex_unlock(&mdev->state_lock); VLAN_CAPABILITIES(dev); break; #if __FreeBSD_version >= 1100036 case SIOCGI2C: { struct ifi2creq i2c; error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); if (error) break; if (i2c.len > sizeof(i2c.data)) { error = EINVAL; break; } /* * Note that we ignore i2c.addr here. The driver hardcodes * the address to 0x50, while standard expects it to be 0xA0. */ error = mlx4_get_module_info(mdev->dev, priv->port, i2c.offset, i2c.len, i2c.data); if (error < 0) { error = -error; break; } error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c)); break; } #endif case SIOCGIFRSSKEY: ifrk = (struct ifrsskey *)data; ifrk->ifrk_func = RSS_FUNC_TOEPLITZ; mutex_lock(&mdev->state_lock); key = mlx4_en_get_rss_key(priv, &ifrk->ifrk_keylen); if (ifrk->ifrk_keylen > RSS_KEYLEN) error = EINVAL; else memcpy(ifrk->ifrk_key, key, ifrk->ifrk_keylen); mutex_unlock(&mdev->state_lock); break; case SIOCGIFRSSHASH: mutex_lock(&mdev->state_lock); rss_mask = mlx4_en_get_rss_mask(priv); mutex_unlock(&mdev->state_lock); ifrh = (struct ifrsshash *)data; ifrh->ifrh_func = RSS_FUNC_TOEPLITZ; ifrh->ifrh_types = 0; if (rss_mask & MLX4_RSS_IPV4) ifrh->ifrh_types |= RSS_TYPE_IPV4; if (rss_mask & MLX4_RSS_TCP_IPV4) ifrh->ifrh_types |= RSS_TYPE_TCP_IPV4; if (rss_mask & MLX4_RSS_IPV6) ifrh->ifrh_types |= RSS_TYPE_IPV6; if (rss_mask & MLX4_RSS_TCP_IPV6) ifrh->ifrh_types |= RSS_TYPE_TCP_IPV6; if (rss_mask & MLX4_RSS_UDP_IPV4) ifrh->ifrh_types |= RSS_TYPE_UDP_IPV4; if (rss_mask & MLX4_RSS_UDP_IPV6) ifrh->ifrh_types |= RSS_TYPE_UDP_IPV6; break; default: error = ether_ioctl(dev, command, data); break; } return (error); } int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, struct mlx4_en_port_profile *prof) { struct net_device *dev; struct mlx4_en_priv *priv; uint8_t dev_addr[ETHER_ADDR_LEN]; int err; int i; priv = kzalloc(sizeof(*priv), GFP_KERNEL); dev = priv->dev = if_alloc(IFT_ETHER); if (dev == NULL) { en_err(priv, "Net device allocation failed\n"); kfree(priv); return -ENOMEM; } dev->if_softc = priv; if_initname(dev, "mlxen", (device_get_unit( mdev->pdev->dev.bsddev) * MLX4_MAX_PORTS) + port - 1); dev->if_mtu = ETHERMTU; dev->if_init = mlx4_en_open; dev->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; dev->if_ioctl = mlx4_en_ioctl; dev->if_transmit = mlx4_en_transmit; dev->if_qflush = mlx4_en_qflush; dev->if_snd.ifq_maxlen = prof->tx_ring_size; /* * Initialize driver private data */ priv->counter_index = 0xff; spin_lock_init(&priv->stats_lock); INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); INIT_WORK(&priv->watchdog_task, mlx4_en_restart); INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); callout_init(&priv->watchdog_timer, 1); #ifdef CONFIG_RFS_ACCEL INIT_LIST_HEAD(&priv->filters); spin_lock_init(&priv->filters_lock); #endif priv->msg_enable = MLX4_EN_MSG_LEVEL; priv->dev = dev; priv->mdev = mdev; priv->ddev = &mdev->pdev->dev; priv->prof = prof; priv->port = port; priv->port_up = false; priv->flags = prof->flags; priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up; priv->tx_ring_num = prof->tx_ring_num; priv->tx_ring = kcalloc(MAX_TX_RINGS, sizeof(struct mlx4_en_tx_ring *), GFP_KERNEL); if (!priv->tx_ring) { err = -ENOMEM; goto out; } priv->tx_cq = kcalloc(sizeof(struct mlx4_en_cq *), MAX_TX_RINGS, GFP_KERNEL); if (!priv->tx_cq) { err = -ENOMEM; goto out; } priv->rx_ring_num = prof->rx_ring_num; priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0; priv->mac_index = -1; priv->last_ifq_jiffies = 0; priv->if_counters_rx_errors = 0; priv->if_counters_rx_no_buffer = 0; #ifdef CONFIG_MLX4_EN_DCB if (!mlx4_is_slave(priv->mdev->dev)) { priv->dcbx_cap = DCB_CAP_DCBX_HOST; priv->flags |= MLX4_EN_FLAG_DCB_ENABLED; if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) { dev->dcbnl_ops = &mlx4_en_dcbnl_ops; } else { en_info(priv, "QoS disabled - no HW support\n"); dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops; } } #endif /* Query for default mac and max mtu */ priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; priv->mac = mdev->dev->caps.def_mac[priv->port]; if (ILLEGAL_MAC(priv->mac)) { #if BITS_PER_LONG == 64 en_err(priv, "Port: %d, invalid mac burned: 0x%lx, quiting\n", priv->port, priv->mac); #elif BITS_PER_LONG == 32 en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n", priv->port, priv->mac); #endif err = -EINVAL; goto out; } mlx4_en_sysctl_conf(priv); err = mlx4_en_alloc_resources(priv); if (err) goto out; /* Allocate page for receive rings */ err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE); if (err) { en_err(priv, "Failed to allocate page for rx qps\n"); goto out; } priv->allocated = 1; /* * Set driver features */ dev->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6; dev->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; dev->if_capabilities |= IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWFILTER; dev->if_capabilities |= IFCAP_LINKSTATE | IFCAP_JUMBO_MTU; dev->if_capabilities |= IFCAP_LRO; dev->if_capabilities |= IFCAP_HWSTATS; if (mdev->LSO_support) dev->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_VLAN_HWTSO; #if __FreeBSD_version >= 1100000 /* set TSO limits so that we don't have to drop TX packets */ dev->if_hw_tsomax = MLX4_EN_TX_MAX_PAYLOAD_SIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) /* hdr */; dev->if_hw_tsomaxsegcount = MLX4_EN_TX_MAX_MBUF_FRAGS - 1 /* hdr */; dev->if_hw_tsomaxsegsize = MLX4_EN_TX_MAX_MBUF_SIZE; #endif dev->if_capenable = dev->if_capabilities; dev->if_hwassist = 0; if (dev->if_capenable & (IFCAP_TSO4 | IFCAP_TSO6)) dev->if_hwassist |= CSUM_TSO; if (dev->if_capenable & IFCAP_TXCSUM) dev->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP); if (dev->if_capenable & IFCAP_TXCSUM_IPV6) dev->if_hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); /* Register for VLAN events */ priv->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, mlx4_en_vlan_rx_add_vid, priv, EVENTHANDLER_PRI_FIRST); priv->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, mlx4_en_vlan_rx_kill_vid, priv, EVENTHANDLER_PRI_FIRST); mdev->pndev[priv->port] = dev; priv->last_link_state = MLX4_DEV_EVENT_PORT_DOWN; mlx4_en_set_default_moderation(priv); /* Set default MAC */ for (i = 0; i < ETHER_ADDR_LEN; i++) dev_addr[ETHER_ADDR_LEN - 1 - i] = (u8) (priv->mac >> (8 * i)); ether_ifattach(dev, dev_addr); if_link_state_change(dev, LINK_STATE_DOWN); ifmedia_init(&priv->media, IFM_IMASK | IFM_ETH_FMASK, mlx4_en_media_change, mlx4_en_media_status); ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_1000_T, 0, NULL); ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_10G_SR, 0, NULL); ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_10G_CX4, 0, NULL); ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_40G_CR4, 0, NULL); ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO); NETDUMP_SET(dev, mlx4_en); en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); priv->registered = 1; en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); priv->rx_mb_size = dev->if_mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN; err = mlx4_SET_PORT_general(mdev->dev, priv->port, priv->rx_mb_size, prof->tx_pause, prof->tx_ppp, prof->rx_pause, prof->rx_ppp); if (err) { en_err(priv, "Failed setting port general configurations " "for port %d, with error %d\n", priv->port, err); goto out; } /* Init port */ en_warn(priv, "Initializing port\n"); err = mlx4_INIT_PORT(mdev->dev, priv->port); if (err) { en_err(priv, "Failed Initializing port\n"); goto out; } queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) queue_delayed_work(mdev->workqueue, &priv->service_task, SERVICE_TASK_DELAY); return 0; out: mlx4_en_destroy_netdev(dev); return err; } static int mlx4_en_set_ring_size(struct net_device *dev, int rx_size, int tx_size) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; int port_up = 0; int err = 0; rx_size = roundup_pow_of_two(rx_size); rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE); rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE); tx_size = roundup_pow_of_two(tx_size); tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE); tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE); if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size) && tx_size == priv->tx_ring[0]->size) return 0; mutex_lock(&mdev->state_lock); if (priv->port_up) { port_up = 1; mlx4_en_stop_port(dev); } mlx4_en_free_resources(priv); priv->prof->tx_ring_size = tx_size; priv->prof->rx_ring_size = rx_size; err = mlx4_en_alloc_resources(priv); if (err) { en_err(priv, "Failed reallocating port resources\n"); goto out; } if (port_up) { err = mlx4_en_start_port(dev); if (err) en_err(priv, "Failed starting port\n"); } out: mutex_unlock(&mdev->state_lock); return err; } static int mlx4_en_set_rx_ring_size(SYSCTL_HANDLER_ARGS) { struct mlx4_en_priv *priv; int size; int error; priv = arg1; size = priv->prof->rx_ring_size; error = sysctl_handle_int(oidp, &size, 0, req); if (error || !req->newptr) return (error); error = -mlx4_en_set_ring_size(priv->dev, size, priv->prof->tx_ring_size); return (error); } static int mlx4_en_set_tx_ring_size(SYSCTL_HANDLER_ARGS) { struct mlx4_en_priv *priv; int size; int error; priv = arg1; size = priv->prof->tx_ring_size; error = sysctl_handle_int(oidp, &size, 0, req); if (error || !req->newptr) return (error); error = -mlx4_en_set_ring_size(priv->dev, priv->prof->rx_ring_size, size); return (error); } static int mlx4_en_get_module_info(struct net_device *dev, struct ethtool_modinfo *modinfo) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; int ret; u8 data[4]; /* Read first 2 bytes to get Module & REV ID */ ret = mlx4_get_module_info(mdev->dev, priv->port, 0/*offset*/, 2/*size*/, data); if (ret < 2) { en_err(priv, "Failed to read eeprom module first two bytes, error: 0x%x\n", -ret); return -EIO; } switch (data[0] /* identifier */) { case MLX4_MODULE_ID_QSFP: modinfo->type = ETH_MODULE_SFF_8436; modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; break; case MLX4_MODULE_ID_QSFP_PLUS: if (data[1] >= 0x3) { /* revision id */ modinfo->type = ETH_MODULE_SFF_8636; modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; } else { modinfo->type = ETH_MODULE_SFF_8436; modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; } break; case MLX4_MODULE_ID_QSFP28: modinfo->type = ETH_MODULE_SFF_8636; modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; break; case MLX4_MODULE_ID_SFP: modinfo->type = ETH_MODULE_SFF_8472; modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; break; default: en_err(priv, "mlx4_en_get_module_info : Not recognized cable type\n"); return -EINVAL; } return 0; } static int mlx4_en_get_module_eeprom(struct net_device *dev, struct ethtool_eeprom *ee, u8 *data) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; int offset = ee->offset; int i = 0, ret; if (ee->len == 0) return -EINVAL; memset(data, 0, ee->len); while (i < ee->len) { en_dbg(DRV, priv, "mlx4_get_module_info i(%d) offset(%d) len(%d)\n", i, offset, ee->len - i); ret = mlx4_get_module_info(mdev->dev, priv->port, offset, ee->len - i, data + i); if (!ret) /* Done reading */ return 0; if (ret < 0) { en_err(priv, "mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n", i, offset, ee->len - i, ret); return -1; } i += ret; offset += ret; } return 0; } static void mlx4_en_print_eeprom(u8 *data, __u32 len) { int i; int j = 0; int row = 0; const int NUM_OF_BYTES = 16; printf("\nOffset\t\tValues\n"); printf("------\t\t------\n"); while(row < len){ printf("0x%04x\t\t",row); for(i=0; i < NUM_OF_BYTES; i++){ printf("%02x ", data[j]); row++; j++; } printf("\n"); } } /* Read cable EEPROM module information by first inspecting the first * two bytes to get the length and then read the rest of the information. * The information is printed to dmesg. */ static int mlx4_en_read_eeprom(SYSCTL_HANDLER_ARGS) { u8* data; int error; int result = 0; struct mlx4_en_priv *priv; struct net_device *dev; struct ethtool_modinfo modinfo; struct ethtool_eeprom ee; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { priv = arg1; dev = priv->dev; data = kmalloc(PAGE_SIZE, GFP_KERNEL); error = mlx4_en_get_module_info(dev, &modinfo); if (error) { en_err(priv, "mlx4_en_get_module_info returned with error - FAILED (0x%x)\n", -error); goto out; } ee.len = modinfo.eeprom_len; ee.offset = 0; error = mlx4_en_get_module_eeprom(dev, &ee, data); if (error) { en_err(priv, "mlx4_en_get_module_eeprom returned with error - FAILED (0x%x)\n", -error); /* Continue printing partial information in case of an error */ } /* EEPROM information will be printed in dmesg */ mlx4_en_print_eeprom(data, ee.len); out: kfree(data); } /* Return zero to prevent sysctl failure. */ return (0); } static int mlx4_en_set_tx_ppp(SYSCTL_HANDLER_ARGS) { struct mlx4_en_priv *priv; int ppp; int error; priv = arg1; ppp = priv->prof->tx_ppp; error = sysctl_handle_int(oidp, &ppp, 0, req); if (error || !req->newptr) return (error); if (ppp > 0xff || ppp < 0) return (-EINVAL); priv->prof->tx_ppp = ppp; error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port, priv->rx_mb_size + ETHER_CRC_LEN, priv->prof->tx_pause, priv->prof->tx_ppp, priv->prof->rx_pause, priv->prof->rx_ppp); return (error); } static int mlx4_en_set_rx_ppp(SYSCTL_HANDLER_ARGS) { struct mlx4_en_priv *priv; struct mlx4_en_dev *mdev; int ppp; int error; int port_up; port_up = 0; priv = arg1; mdev = priv->mdev; ppp = priv->prof->rx_ppp; error = sysctl_handle_int(oidp, &ppp, 0, req); if (error || !req->newptr) return (error); if (ppp > 0xff || ppp < 0) return (-EINVAL); /* See if we have to change the number of tx queues. */ if (!ppp != !priv->prof->rx_ppp) { mutex_lock(&mdev->state_lock); if (priv->port_up) { port_up = 1; mlx4_en_stop_port(priv->dev); } mlx4_en_free_resources(priv); priv->prof->rx_ppp = ppp; error = -mlx4_en_alloc_resources(priv); if (error) en_err(priv, "Failed reallocating port resources\n"); if (error == 0 && port_up) { error = -mlx4_en_start_port(priv->dev); if (error) en_err(priv, "Failed starting port\n"); } mutex_unlock(&mdev->state_lock); return (error); } priv->prof->rx_ppp = ppp; error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port, priv->rx_mb_size + ETHER_CRC_LEN, priv->prof->tx_pause, priv->prof->tx_ppp, priv->prof->rx_pause, priv->prof->rx_ppp); return (error); } static void mlx4_en_sysctl_conf(struct mlx4_en_priv *priv) { struct net_device *dev; struct sysctl_ctx_list *ctx; struct sysctl_oid *node; struct sysctl_oid_list *node_list; struct sysctl_oid *coal; struct sysctl_oid_list *coal_list; const char *pnameunit; dev = priv->dev; ctx = &priv->conf_ctx; pnameunit = device_get_nameunit(priv->mdev->pdev->dev.bsddev); sysctl_ctx_init(ctx); priv->conf_sysctl = SYSCTL_ADD_NODE(ctx, SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, dev->if_xname, CTLFLAG_RD, 0, "mlx4 10gig ethernet"); node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(priv->conf_sysctl), OID_AUTO, "conf", CTLFLAG_RD, NULL, "Configuration"); node_list = SYSCTL_CHILDREN(node); SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "msg_enable", CTLFLAG_RW, &priv->msg_enable, 0, "Driver message enable bitfield"); SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "rx_rings", CTLFLAG_RD, &priv->rx_ring_num, 0, "Number of receive rings"); SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_rings", CTLFLAG_RD, &priv->tx_ring_num, 0, "Number of transmit rings"); SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "rx_size", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, mlx4_en_set_rx_ring_size, "I", "Receive ring size"); SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "tx_size", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, mlx4_en_set_tx_ring_size, "I", "Transmit ring size"); SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "tx_ppp", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, mlx4_en_set_tx_ppp, "I", "TX Per-priority pause"); SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "rx_ppp", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, mlx4_en_set_rx_ppp, "I", "RX Per-priority pause"); SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "port_num", CTLFLAG_RD, &priv->port, 0, "Port Number"); SYSCTL_ADD_STRING(ctx, node_list, OID_AUTO, "device_name", CTLFLAG_RD, __DECONST(void *, pnameunit), 0, "PCI device name"); /* Add coalescer configuration. */ coal = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO, "coalesce", CTLFLAG_RD, NULL, "Interrupt coalesce configuration"); coal_list = SYSCTL_CHILDREN(coal); SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "pkt_rate_low", CTLFLAG_RW, &priv->pkt_rate_low, 0, "Packets per-second for minimum delay"); SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "rx_usecs_low", CTLFLAG_RW, &priv->rx_usecs_low, 0, "Minimum RX delay in micro-seconds"); SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "pkt_rate_high", CTLFLAG_RW, &priv->pkt_rate_high, 0, "Packets per-second for maximum delay"); SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "rx_usecs_high", CTLFLAG_RW, &priv->rx_usecs_high, 0, "Maximum RX delay in micro-seconds"); SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "sample_interval", CTLFLAG_RW, &priv->sample_interval, 0, "adaptive frequency in units of HZ ticks"); SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "adaptive_rx_coal", CTLFLAG_RW, &priv->adaptive_rx_coal, 0, "Enable adaptive rx coalescing"); /* EEPROM support */ SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "eeprom_info", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, mlx4_en_read_eeprom, "I", "EEPROM information"); } static void mlx4_en_sysctl_stat(struct mlx4_en_priv *priv) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *node_list; struct sysctl_oid *ring_node; struct sysctl_oid_list *ring_list; struct mlx4_en_tx_ring *tx_ring; struct mlx4_en_rx_ring *rx_ring; char namebuf[128]; int i; ctx = &priv->stat_ctx; sysctl_ctx_init(ctx); priv->stat_sysctl = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(priv->conf_sysctl), OID_AUTO, "stat", CTLFLAG_RD, NULL, "Statistics"); node_list = SYSCTL_CHILDREN(priv->stat_sysctl); #ifdef MLX4_EN_PERF_STAT SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_poll", CTLFLAG_RD, &priv->pstats.tx_poll, "TX Poll calls"); SYSCTL_ADD_QUAD(ctx, node_list, OID_AUTO, "tx_pktsz_avg", CTLFLAG_RD, &priv->pstats.tx_pktsz_avg, "TX average packet size"); SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "inflight_avg", CTLFLAG_RD, &priv->pstats.inflight_avg, "TX average packets in-flight"); SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_coal_avg", CTLFLAG_RD, &priv->pstats.tx_coal_avg, "TX average coalesced completions"); SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "rx_coal_avg", CTLFLAG_RD, &priv->pstats.rx_coal_avg, "RX average coalesced completions"); #endif SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tso_packets", CTLFLAG_RD, &priv->port_stats.tso_packets, 0, "TSO packets sent"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "queue_stopped", CTLFLAG_RD, &priv->port_stats.queue_stopped, 0, "Queue full"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "wake_queue", CTLFLAG_RD, &priv->port_stats.wake_queue, 0, "Queue resumed after full"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_timeout", CTLFLAG_RD, &priv->port_stats.tx_timeout, 0, "Transmit timeouts"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_oversized_packets", CTLFLAG_RD, &priv->port_stats.oversized_packets, 0, "TX oversized packets, m_defrag failed"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_alloc_failed", CTLFLAG_RD, &priv->port_stats.rx_alloc_failed, 0, "RX failed to allocate mbuf"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_chksum_good", CTLFLAG_RD, &priv->port_stats.rx_chksum_good, 0, "RX checksum offload success"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_chksum_none", CTLFLAG_RD, &priv->port_stats.rx_chksum_none, 0, "RX without checksum offload"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_chksum_offload", CTLFLAG_RD, &priv->port_stats.tx_chksum_offload, 0, "TX checksum offloads"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "defrag_attempts", CTLFLAG_RD, &priv->port_stats.defrag_attempts, 0, "Oversized chains defragged"); /* Could strdup the names and add in a loop. This is simpler. */ SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_bytes", CTLFLAG_RD, &priv->pkstats.rx_bytes, 0, "RX Bytes"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_packets", CTLFLAG_RD, &priv->pkstats.rx_packets, 0, "RX packets"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_multicast_packets", CTLFLAG_RD, &priv->pkstats.rx_multicast_packets, 0, "RX Multicast Packets"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_broadcast_packets", CTLFLAG_RD, &priv->pkstats.rx_broadcast_packets, 0, "RX Broadcast Packets"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_errors", CTLFLAG_RD, &priv->pkstats.rx_errors, 0, "RX Errors"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_dropped", CTLFLAG_RD, &priv->pkstats.rx_dropped, 0, "RX Dropped"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_length_errors", CTLFLAG_RD, &priv->pkstats.rx_length_errors, 0, "RX Length Errors"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_over_errors", CTLFLAG_RD, &priv->pkstats.rx_over_errors, 0, "RX Over Errors"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_crc_errors", CTLFLAG_RD, &priv->pkstats.rx_crc_errors, 0, "RX CRC Errors"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_jabbers", CTLFLAG_RD, &priv->pkstats.rx_jabbers, 0, "RX Jabbers"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_in_range_length_error", CTLFLAG_RD, &priv->pkstats.rx_in_range_length_error, 0, "RX IN_Range Length Error"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_out_range_length_error", CTLFLAG_RD, &priv->pkstats.rx_out_range_length_error, 0, "RX Out Range Length Error"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_lt_64_bytes_packets", CTLFLAG_RD, &priv->pkstats.rx_lt_64_bytes_packets, 0, "RX Lt 64 Bytes Packets"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_127_bytes_packets", CTLFLAG_RD, &priv->pkstats.rx_127_bytes_packets, 0, "RX 127 bytes Packets"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_255_bytes_packets", CTLFLAG_RD, &priv->pkstats.rx_255_bytes_packets, 0, "RX 255 bytes Packets"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_511_bytes_packets", CTLFLAG_RD, &priv->pkstats.rx_511_bytes_packets, 0, "RX 511 bytes Packets"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_1023_bytes_packets", CTLFLAG_RD, &priv->pkstats.rx_1023_bytes_packets, 0, "RX 1023 bytes Packets"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_1518_bytes_packets", CTLFLAG_RD, &priv->pkstats.rx_1518_bytes_packets, 0, "RX 1518 bytes Packets"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_1522_bytes_packets", CTLFLAG_RD, &priv->pkstats.rx_1522_bytes_packets, 0, "RX 1522 bytes Packets"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_1548_bytes_packets", CTLFLAG_RD, &priv->pkstats.rx_1548_bytes_packets, 0, "RX 1548 bytes Packets"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "rx_gt_1548_bytes_packets", CTLFLAG_RD, &priv->pkstats.rx_gt_1548_bytes_packets, 0, "RX Greater Then 1548 bytes Packets"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_packets", CTLFLAG_RD, &priv->pkstats.tx_packets, 0, "TX packets"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_bytes", CTLFLAG_RD, &priv->pkstats.tx_bytes, 0, "TX Bytes"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_multicast_packets", CTLFLAG_RD, &priv->pkstats.tx_multicast_packets, 0, "TX Multicast Packets"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_broadcast_packets", CTLFLAG_RD, &priv->pkstats.tx_broadcast_packets, 0, "TX Broadcast Packets"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_errors", CTLFLAG_RD, &priv->pkstats.tx_errors, 0, "TX Errors"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_dropped", CTLFLAG_RD, &priv->pkstats.tx_dropped, 0, "TX Dropped"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_lt_64_bytes_packets", CTLFLAG_RD, &priv->pkstats.tx_lt_64_bytes_packets, 0, "TX Less Then 64 Bytes Packets"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_127_bytes_packets", CTLFLAG_RD, &priv->pkstats.tx_127_bytes_packets, 0, "TX 127 Bytes Packets"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_255_bytes_packets", CTLFLAG_RD, &priv->pkstats.tx_255_bytes_packets, 0, "TX 255 Bytes Packets"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_511_bytes_packets", CTLFLAG_RD, &priv->pkstats.tx_511_bytes_packets, 0, "TX 511 Bytes Packets"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_1023_bytes_packets", CTLFLAG_RD, &priv->pkstats.tx_1023_bytes_packets, 0, "TX 1023 Bytes Packets"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_1518_bytes_packets", CTLFLAG_RD, &priv->pkstats.tx_1518_bytes_packets, 0, "TX 1518 Bytes Packets"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_1522_bytes_packets", CTLFLAG_RD, &priv->pkstats.tx_1522_bytes_packets, 0, "TX 1522 Bytes Packets"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_1548_bytes_packets", CTLFLAG_RD, &priv->pkstats.tx_1548_bytes_packets, 0, "TX 1548 Bytes Packets"); SYSCTL_ADD_U64(ctx, node_list, OID_AUTO, "tx_gt_1548_bytes_packets", CTLFLAG_RD, &priv->pkstats.tx_gt_1548_bytes_packets, 0, "TX Greater Then 1548 Bytes Packets"); for (i = 0; i < priv->tx_ring_num; i++) { tx_ring = priv->tx_ring[i]; snprintf(namebuf, sizeof(namebuf), "tx_ring%d", i); ring_node = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "TX Ring"); ring_list = SYSCTL_CHILDREN(ring_node); SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "packets", CTLFLAG_RD, &tx_ring->packets, 0, "TX packets"); SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "bytes", CTLFLAG_RD, &tx_ring->bytes, 0, "TX bytes"); SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "tso_packets", CTLFLAG_RD, &tx_ring->tso_packets, 0, "TSO packets"); SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "defrag_attempts", CTLFLAG_RD, &tx_ring->defrag_attempts, 0, "Oversized chains defragged"); } for (i = 0; i < priv->rx_ring_num; i++) { rx_ring = priv->rx_ring[i]; snprintf(namebuf, sizeof(namebuf), "rx_ring%d", i); ring_node = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "RX Ring"); ring_list = SYSCTL_CHILDREN(ring_node); SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "packets", CTLFLAG_RD, &rx_ring->packets, 0, "RX packets"); SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "bytes", CTLFLAG_RD, &rx_ring->bytes, 0, "RX bytes"); SYSCTL_ADD_U64(ctx, ring_list, OID_AUTO, "error", CTLFLAG_RD, &rx_ring->errors, 0, "RX soft errors"); } } #ifdef NETDUMP static void mlx4_en_netdump_init(struct ifnet *dev, int *nrxr, int *ncl, int *clsize) { struct mlx4_en_priv *priv; priv = if_getsoftc(dev); mutex_lock(&priv->mdev->state_lock); *nrxr = priv->rx_ring_num; *ncl = NETDUMP_MAX_IN_FLIGHT; *clsize = priv->rx_mb_size; mutex_unlock(&priv->mdev->state_lock); } static void mlx4_en_netdump_event(struct ifnet *dev, enum netdump_ev event) { } static int mlx4_en_netdump_transmit(struct ifnet *dev, struct mbuf *m) { struct mlx4_en_priv *priv; int err; priv = if_getsoftc(dev); if ((if_getdrvflags(dev) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING || !priv->link_state) return (ENOENT); err = mlx4_en_xmit(priv, 0, &m); if (err != 0 && m != NULL) m_freem(m); return (err); } static int mlx4_en_netdump_poll(struct ifnet *dev, int count) { struct mlx4_en_priv *priv; priv = if_getsoftc(dev); if ((if_getdrvflags(dev) & IFF_DRV_RUNNING) == 0 || !priv->link_state) return (ENOENT); mlx4_poll_interrupts(priv->mdev->dev); return (0); } #endif /* NETDUMP */ Index: user/ngie/bug-237403/sys/dev/mlx5/mlx5_en/mlx5_en_main.c =================================================================== --- user/ngie/bug-237403/sys/dev/mlx5/mlx5_en/mlx5_en_main.c (revision 348028) +++ user/ngie/bug-237403/sys/dev/mlx5/mlx5_en/mlx5_en_main.c (revision 348029) @@ -1,4489 +1,4490 @@ /*- * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include "en.h" +#include #include #include #ifndef ETH_DRIVER_VERSION #define ETH_DRIVER_VERSION "3.5.1" #endif #define DRIVER_RELDATE "April 2019" static const char mlx5e_version[] = "mlx5en: Mellanox Ethernet driver " ETH_DRIVER_VERSION " (" DRIVER_RELDATE ")\n"; static int mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs); struct mlx5e_channel_param { struct mlx5e_rq_param rq; struct mlx5e_sq_param sq; struct mlx5e_cq_param rx_cq; struct mlx5e_cq_param tx_cq; }; struct media { u32 subtype; u64 baudrate; }; static const struct media mlx5e_mode_table[MLX5E_LINK_SPEEDS_NUMBER][MLX5E_LINK_MODES_NUMBER] = { [MLX5E_1000BASE_CX_SGMII][MLX5E_SGMII] = { .subtype = IFM_1000_CX_SGMII, .baudrate = IF_Mbps(1000ULL), }, [MLX5E_1000BASE_KX][MLX5E_KX] = { .subtype = IFM_1000_KX, .baudrate = IF_Mbps(1000ULL), }, [MLX5E_10GBASE_CX4][MLX5E_CX4] = { .subtype = IFM_10G_CX4, .baudrate = IF_Gbps(10ULL), }, [MLX5E_10GBASE_KX4][MLX5E_KX4] = { .subtype = IFM_10G_KX4, .baudrate = IF_Gbps(10ULL), }, [MLX5E_10GBASE_KR][MLX5E_KR] = { .subtype = IFM_10G_KR, .baudrate = IF_Gbps(10ULL), }, [MLX5E_20GBASE_KR2][MLX5E_KR2] = { .subtype = IFM_20G_KR2, .baudrate = IF_Gbps(20ULL), }, [MLX5E_40GBASE_CR4][MLX5E_CR4] = { .subtype = IFM_40G_CR4, .baudrate = IF_Gbps(40ULL), }, [MLX5E_40GBASE_KR4][MLX5E_KR4] = { .subtype = IFM_40G_KR4, .baudrate = IF_Gbps(40ULL), }, [MLX5E_56GBASE_R4][MLX5E_R] = { .subtype = IFM_56G_R4, .baudrate = IF_Gbps(56ULL), }, [MLX5E_10GBASE_CR][MLX5E_CR1] = { .subtype = IFM_10G_CR1, .baudrate = IF_Gbps(10ULL), }, [MLX5E_10GBASE_SR][MLX5E_SR] = { .subtype = IFM_10G_SR, .baudrate = IF_Gbps(10ULL), }, [MLX5E_10GBASE_ER_LR][MLX5E_ER] = { .subtype = IFM_10G_ER, .baudrate = IF_Gbps(10ULL), }, [MLX5E_10GBASE_ER_LR][MLX5E_LR] = { .subtype = IFM_10G_LR, .baudrate = IF_Gbps(10ULL), }, [MLX5E_40GBASE_SR4][MLX5E_SR4] = { .subtype = IFM_40G_SR4, .baudrate = IF_Gbps(40ULL), }, [MLX5E_40GBASE_LR4_ER4][MLX5E_LR4] = { .subtype = IFM_40G_LR4, .baudrate = IF_Gbps(40ULL), }, [MLX5E_40GBASE_LR4_ER4][MLX5E_ER4] = { .subtype = IFM_40G_ER4, .baudrate = IF_Gbps(40ULL), }, [MLX5E_100GBASE_CR4][MLX5E_CR4] = { .subtype = IFM_100G_CR4, .baudrate = IF_Gbps(100ULL), }, [MLX5E_100GBASE_SR4][MLX5E_SR4] = { .subtype = IFM_100G_SR4, .baudrate = IF_Gbps(100ULL), }, [MLX5E_100GBASE_KR4][MLX5E_KR4] = { .subtype = IFM_100G_KR4, .baudrate = IF_Gbps(100ULL), }, [MLX5E_100GBASE_LR4][MLX5E_LR4] = { .subtype = IFM_100G_LR4, .baudrate = IF_Gbps(100ULL), }, [MLX5E_100BASE_TX][MLX5E_TX] = { .subtype = IFM_100_TX, .baudrate = IF_Mbps(100ULL), }, [MLX5E_1000BASE_T][MLX5E_T] = { .subtype = IFM_1000_T, .baudrate = IF_Mbps(1000ULL), }, [MLX5E_10GBASE_T][MLX5E_T] = { .subtype = IFM_10G_T, .baudrate = IF_Gbps(10ULL), }, [MLX5E_25GBASE_CR][MLX5E_CR] = { .subtype = IFM_25G_CR, .baudrate = IF_Gbps(25ULL), }, [MLX5E_25GBASE_KR][MLX5E_KR] = { .subtype = IFM_25G_KR, .baudrate = IF_Gbps(25ULL), }, [MLX5E_25GBASE_SR][MLX5E_SR] = { .subtype = IFM_25G_SR, .baudrate = IF_Gbps(25ULL), }, [MLX5E_50GBASE_CR2][MLX5E_CR2] = { .subtype = IFM_50G_CR2, .baudrate = IF_Gbps(50ULL), }, [MLX5E_50GBASE_KR2][MLX5E_KR2] = { .subtype = IFM_50G_KR2, .baudrate = IF_Gbps(50ULL), }, }; static const struct media mlx5e_ext_mode_table[MLX5E_EXT_LINK_SPEEDS_NUMBER][MLX5E_LINK_MODES_NUMBER] = { [MLX5E_SGMII_100M][MLX5E_SGMII] = { .subtype = IFM_100_SGMII, .baudrate = IF_Mbps(100), }, [MLX5E_1000BASE_X_SGMII][MLX5E_KX] = { .subtype = IFM_1000_KX, .baudrate = IF_Mbps(1000), }, [MLX5E_1000BASE_X_SGMII][MLX5E_CX_SGMII] = { .subtype = IFM_1000_CX_SGMII, .baudrate = IF_Mbps(1000), }, [MLX5E_1000BASE_X_SGMII][MLX5E_CX] = { .subtype = IFM_1000_CX, .baudrate = IF_Mbps(1000), }, [MLX5E_1000BASE_X_SGMII][MLX5E_LX] = { .subtype = IFM_1000_LX, .baudrate = IF_Mbps(1000), }, [MLX5E_1000BASE_X_SGMII][MLX5E_SX] = { .subtype = IFM_1000_SX, .baudrate = IF_Mbps(1000), }, [MLX5E_1000BASE_X_SGMII][MLX5E_T] = { .subtype = IFM_1000_T, .baudrate = IF_Mbps(1000), }, [MLX5E_5GBASE_R][MLX5E_T] = { .subtype = IFM_5000_T, .baudrate = IF_Mbps(5000), }, [MLX5E_5GBASE_R][MLX5E_KR] = { .subtype = IFM_5000_KR, .baudrate = IF_Mbps(5000), }, [MLX5E_5GBASE_R][MLX5E_KR1] = { .subtype = IFM_5000_KR1, .baudrate = IF_Mbps(5000), }, [MLX5E_5GBASE_R][MLX5E_KR_S] = { .subtype = IFM_5000_KR_S, .baudrate = IF_Mbps(5000), }, [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_ER] = { .subtype = IFM_10G_ER, .baudrate = IF_Gbps(10ULL), }, [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_KR] = { .subtype = IFM_10G_KR, .baudrate = IF_Gbps(10ULL), }, [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_LR] = { .subtype = IFM_10G_LR, .baudrate = IF_Gbps(10ULL), }, [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_SR] = { .subtype = IFM_10G_SR, .baudrate = IF_Gbps(10ULL), }, [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_T] = { .subtype = IFM_10G_T, .baudrate = IF_Gbps(10ULL), }, [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_AOC] = { .subtype = IFM_10G_AOC, .baudrate = IF_Gbps(10ULL), }, [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_CR1] = { .subtype = IFM_10G_CR1, .baudrate = IF_Gbps(10ULL), }, [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_CR4] = { .subtype = IFM_40G_CR4, .baudrate = IF_Gbps(40ULL), }, [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_KR4] = { .subtype = IFM_40G_KR4, .baudrate = IF_Gbps(40ULL), }, [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_LR4] = { .subtype = IFM_40G_LR4, .baudrate = IF_Gbps(40ULL), }, [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_SR4] = { .subtype = IFM_40G_SR4, .baudrate = IF_Gbps(40ULL), }, [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_ER4] = { .subtype = IFM_40G_ER4, .baudrate = IF_Gbps(40ULL), }, [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_CR] = { .subtype = IFM_25G_CR, .baudrate = IF_Gbps(25ULL), }, [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_KR] = { .subtype = IFM_25G_KR, .baudrate = IF_Gbps(25ULL), }, [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_SR] = { .subtype = IFM_25G_SR, .baudrate = IF_Gbps(25ULL), }, [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_ACC] = { .subtype = IFM_25G_ACC, .baudrate = IF_Gbps(25ULL), }, [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_AOC] = { .subtype = IFM_25G_AOC, .baudrate = IF_Gbps(25ULL), }, [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_CR1] = { .subtype = IFM_25G_CR1, .baudrate = IF_Gbps(25ULL), }, [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_CR_S] = { .subtype = IFM_25G_CR_S, .baudrate = IF_Gbps(25ULL), }, [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_KR1] = { .subtype = IFM_5000_KR1, .baudrate = IF_Gbps(25ULL), }, [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_KR_S] = { .subtype = IFM_25G_KR_S, .baudrate = IF_Gbps(25ULL), }, [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_LR] = { .subtype = IFM_25G_LR, .baudrate = IF_Gbps(25ULL), }, [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_T] = { .subtype = IFM_25G_T, .baudrate = IF_Gbps(25ULL), }, [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_CR2] = { .subtype = IFM_50G_CR2, .baudrate = IF_Gbps(50ULL), }, [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_KR2] = { .subtype = IFM_50G_KR2, .baudrate = IF_Gbps(50ULL), }, [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_SR2] = { .subtype = IFM_50G_SR2, .baudrate = IF_Gbps(50ULL), }, [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_LR2] = { .subtype = IFM_50G_LR2, .baudrate = IF_Gbps(50ULL), }, [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_LR] = { .subtype = IFM_50G_LR, .baudrate = IF_Gbps(50ULL), }, [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_SR] = { .subtype = IFM_50G_SR, .baudrate = IF_Gbps(50ULL), }, [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_CP] = { .subtype = IFM_50G_CP, .baudrate = IF_Gbps(50ULL), }, [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_FR] = { .subtype = IFM_50G_FR, .baudrate = IF_Gbps(50ULL), }, [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_KR_PAM4] = { .subtype = IFM_50G_KR_PAM4, .baudrate = IF_Gbps(50ULL), }, [MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_CR4] = { .subtype = IFM_100G_CR4, .baudrate = IF_Gbps(100ULL), }, [MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_KR4] = { .subtype = IFM_100G_KR4, .baudrate = IF_Gbps(100ULL), }, [MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_LR4] = { .subtype = IFM_100G_LR4, .baudrate = IF_Gbps(100ULL), }, [MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_SR4] = { .subtype = IFM_100G_SR4, .baudrate = IF_Gbps(100ULL), }, [MLX5E_100GAUI_2_100GBASE_CR2_KR2][MLX5E_SR2] = { .subtype = IFM_100G_SR2, .baudrate = IF_Gbps(100ULL), }, [MLX5E_100GAUI_2_100GBASE_CR2_KR2][MLX5E_CP2] = { .subtype = IFM_100G_CP2, .baudrate = IF_Gbps(100ULL), }, [MLX5E_100GAUI_2_100GBASE_CR2_KR2][MLX5E_KR2_PAM4] = { .subtype = IFM_100G_KR2_PAM4, .baudrate = IF_Gbps(100ULL), }, [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_DR4] = { .subtype = IFM_200G_DR4, .baudrate = IF_Gbps(200ULL), }, [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_LR4] = { .subtype = IFM_200G_LR4, .baudrate = IF_Gbps(200ULL), }, [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_SR4] = { .subtype = IFM_200G_SR4, .baudrate = IF_Gbps(200ULL), }, [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_FR4] = { .subtype = IFM_200G_FR4, .baudrate = IF_Gbps(200ULL), }, [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_CR4_PAM4] = { .subtype = IFM_200G_CR4_PAM4, .baudrate = IF_Gbps(200ULL), }, [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_KR4_PAM4] = { .subtype = IFM_200G_KR4_PAM4, .baudrate = IF_Gbps(200ULL), }, }; MALLOC_DEFINE(M_MLX5EN, "MLX5EN", "MLX5 Ethernet"); static void mlx5e_update_carrier(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; u32 out[MLX5_ST_SZ_DW(ptys_reg)]; u32 eth_proto_oper; int error; u8 port_state; u8 is_er_type; u8 i, j; bool ext; struct media media_entry = {}; port_state = mlx5_query_vport_state(mdev, MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0); if (port_state == VPORT_STATE_UP) { priv->media_status_last |= IFM_ACTIVE; } else { priv->media_status_last &= ~IFM_ACTIVE; priv->media_active_last = IFM_ETHER; if_link_state_change(priv->ifp, LINK_STATE_DOWN); return; } error = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1); if (error) { priv->media_active_last = IFM_ETHER; priv->ifp->if_baudrate = 1; if_printf(priv->ifp, "%s: query port ptys failed: " "0x%x\n", __func__, error); return; } ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper); i = ilog2(eth_proto_oper); for (j = 0; j != MLX5E_LINK_MODES_NUMBER; j++) { media_entry = ext ? mlx5e_ext_mode_table[i][j] : mlx5e_mode_table[i][j]; if (media_entry.baudrate != 0) break; } if (media_entry.subtype == 0) { if_printf(priv->ifp, "%s: Could not find operational " "media subtype\n", __func__); return; } switch (media_entry.subtype) { case IFM_10G_ER: error = mlx5_query_pddr_range_info(mdev, 1, &is_er_type); if (error != 0) { if_printf(priv->ifp, "%s: query port pddr failed: %d\n", __func__, error); } if (error != 0 || is_er_type == 0) media_entry.subtype = IFM_10G_LR; break; case IFM_40G_LR4: error = mlx5_query_pddr_range_info(mdev, 1, &is_er_type); if (error != 0) { if_printf(priv->ifp, "%s: query port pddr failed: %d\n", __func__, error); } if (error == 0 && is_er_type != 0) media_entry.subtype = IFM_40G_ER4; break; } priv->media_active_last = media_entry.subtype | IFM_ETHER | IFM_FDX; priv->ifp->if_baudrate = media_entry.baudrate; if_link_state_change(priv->ifp, LINK_STATE_UP); } static void mlx5e_media_status(struct ifnet *dev, struct ifmediareq *ifmr) { struct mlx5e_priv *priv = dev->if_softc; ifmr->ifm_status = priv->media_status_last; ifmr->ifm_active = priv->media_active_last | (priv->params.rx_pauseframe_control ? IFM_ETH_RXPAUSE : 0) | (priv->params.tx_pauseframe_control ? IFM_ETH_TXPAUSE : 0); } static u32 mlx5e_find_link_mode(u32 subtype, bool ext) { u32 i; u32 j; u32 link_mode = 0; u32 speeds_num = 0; struct media media_entry = {}; switch (subtype) { case IFM_10G_LR: subtype = IFM_10G_ER; break; case IFM_40G_ER4: subtype = IFM_40G_LR4; break; } speeds_num = ext ? MLX5E_EXT_LINK_SPEEDS_NUMBER : MLX5E_LINK_SPEEDS_NUMBER; for (i = 0; i != speeds_num; i++) { for (j = 0; j < MLX5E_LINK_MODES_NUMBER ; ++j) { media_entry = ext ? mlx5e_ext_mode_table[i][j] : mlx5e_mode_table[i][j]; if (media_entry.baudrate == 0) continue; if (media_entry.subtype == subtype) { link_mode |= MLX5E_PROT_MASK(i); } } } return (link_mode); } static int mlx5e_set_port_pause_and_pfc(struct mlx5e_priv *priv) { return (mlx5_set_port_pause_and_pfc(priv->mdev, 1, priv->params.rx_pauseframe_control, priv->params.tx_pauseframe_control, priv->params.rx_priority_flow_control, priv->params.tx_priority_flow_control)); } static int mlx5e_set_port_pfc(struct mlx5e_priv *priv) { int error; if (priv->gone != 0) { error = -ENXIO; } else if (priv->params.rx_pauseframe_control || priv->params.tx_pauseframe_control) { if_printf(priv->ifp, "Global pauseframes must be disabled before " "enabling PFC.\n"); error = -EINVAL; } else { error = mlx5e_set_port_pause_and_pfc(priv); } return (error); } static int mlx5e_media_change(struct ifnet *dev) { struct mlx5e_priv *priv = dev->if_softc; struct mlx5_core_dev *mdev = priv->mdev; u32 eth_proto_cap; u32 link_mode; u32 out[MLX5_ST_SZ_DW(ptys_reg)]; int was_opened; int locked; int error; bool ext; locked = PRIV_LOCKED(priv); if (!locked) PRIV_LOCK(priv); if (IFM_TYPE(priv->media.ifm_media) != IFM_ETHER) { error = EINVAL; goto done; } error = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1); if (error != 0) { if_printf(dev, "Query port media capability failed\n"); goto done; } ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); link_mode = mlx5e_find_link_mode(IFM_SUBTYPE(priv->media.ifm_media), ext); /* query supported capabilities */ eth_proto_cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_capability); /* check for autoselect */ if (IFM_SUBTYPE(priv->media.ifm_media) == IFM_AUTO) { link_mode = eth_proto_cap; if (link_mode == 0) { if_printf(dev, "Port media capability is zero\n"); error = EINVAL; goto done; } } else { link_mode = link_mode & eth_proto_cap; if (link_mode == 0) { if_printf(dev, "Not supported link mode requested\n"); error = EINVAL; goto done; } } if (priv->media.ifm_media & (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { /* check if PFC is enabled */ if (priv->params.rx_priority_flow_control || priv->params.tx_priority_flow_control) { if_printf(dev, "PFC must be disabled before enabling global pauseframes.\n"); error = EINVAL; goto done; } } /* update pauseframe control bits */ priv->params.rx_pauseframe_control = (priv->media.ifm_media & IFM_ETH_RXPAUSE) ? 1 : 0; priv->params.tx_pauseframe_control = (priv->media.ifm_media & IFM_ETH_TXPAUSE) ? 1 : 0; /* check if device is opened */ was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); /* reconfigure the hardware */ mlx5_set_port_status(mdev, MLX5_PORT_DOWN); mlx5_set_port_proto(mdev, link_mode, MLX5_PTYS_EN, ext); error = -mlx5e_set_port_pause_and_pfc(priv); if (was_opened) mlx5_set_port_status(mdev, MLX5_PORT_UP); done: if (!locked) PRIV_UNLOCK(priv); return (error); } static void mlx5e_update_carrier_work(struct work_struct *work) { struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, update_carrier_work); PRIV_LOCK(priv); if (test_bit(MLX5E_STATE_OPENED, &priv->state)) mlx5e_update_carrier(priv); PRIV_UNLOCK(priv); } #define MLX5E_PCIE_PERF_GET_64(a,b,c,d,e,f) \ s_debug->c = MLX5_GET64(mpcnt_reg, out, counter_set.f.c); #define MLX5E_PCIE_PERF_GET_32(a,b,c,d,e,f) \ s_debug->c = MLX5_GET(mpcnt_reg, out, counter_set.f.c); static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_port_stats_debug *s_debug = &priv->stats.port_stats_debug; const unsigned sz = MLX5_ST_SZ_BYTES(mpcnt_reg); void *out; void *in; int err; /* allocate firmware request structures */ in = mlx5_vzalloc(sz); out = mlx5_vzalloc(sz); if (in == NULL || out == NULL) goto free_out; MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP); err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0); if (err != 0) goto free_out; MLX5E_PCIE_PERFORMANCE_COUNTERS_64(MLX5E_PCIE_PERF_GET_64) MLX5E_PCIE_PERFORMANCE_COUNTERS_32(MLX5E_PCIE_PERF_GET_32) MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP); err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0); if (err != 0) goto free_out; MLX5E_PCIE_TIMERS_AND_STATES_COUNTERS_32(MLX5E_PCIE_PERF_GET_32) MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_LANE_COUNTERS_GROUP); err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0); if (err != 0) goto free_out; MLX5E_PCIE_LANE_COUNTERS_32(MLX5E_PCIE_PERF_GET_32) free_out: /* free firmware request structures */ kvfree(in); kvfree(out); } /* * This function reads the physical port counters from the firmware * using a pre-defined layout defined by various MLX5E_PPORT_XXX() * macros. The output is converted from big-endian 64-bit values into * host endian ones and stored in the "priv->stats.pport" structure. */ static void mlx5e_update_pport_counters(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_pport_stats *s = &priv->stats.pport; struct mlx5e_port_stats_debug *s_debug = &priv->stats.port_stats_debug; u32 *in; u32 *out; const u64 *ptr; unsigned sz = MLX5_ST_SZ_BYTES(ppcnt_reg); unsigned x; unsigned y; unsigned z; /* allocate firmware request structures */ in = mlx5_vzalloc(sz); out = mlx5_vzalloc(sz); if (in == NULL || out == NULL) goto free_out; /* * Get pointer to the 64-bit counter set which is located at a * fixed offset in the output firmware request structure: */ ptr = (const uint64_t *)MLX5_ADDR_OF(ppcnt_reg, out, counter_set); MLX5_SET(ppcnt_reg, in, local_port, 1); /* read IEEE802_3 counter group using predefined counter layout */ MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP); mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); for (x = 0, y = MLX5E_PPORT_PER_PRIO_STATS_NUM; x != MLX5E_PPORT_IEEE802_3_STATS_NUM; x++, y++) s->arg[y] = be64toh(ptr[x]); /* read RFC2819 counter group using predefined counter layout */ MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP); mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); for (x = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM; x++, y++) s->arg[y] = be64toh(ptr[x]); for (y = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM + MLX5E_PPORT_RFC2819_STATS_DEBUG_NUM; x++, y++) s_debug->arg[y] = be64toh(ptr[x]); /* read RFC2863 counter group using predefined counter layout */ MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP); mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); for (x = 0; x != MLX5E_PPORT_RFC2863_STATS_DEBUG_NUM; x++, y++) s_debug->arg[y] = be64toh(ptr[x]); /* read physical layer stats counter group using predefined counter layout */ MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP); mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); for (x = 0; x != MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG_NUM; x++, y++) s_debug->arg[y] = be64toh(ptr[x]); /* read Extended Ethernet counter group using predefined counter layout */ MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP); mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); for (x = 0; x != MLX5E_PPORT_ETHERNET_EXTENDED_STATS_DEBUG_NUM; x++, y++) s_debug->arg[y] = be64toh(ptr[x]); /* read Extended Statistical Group */ if (MLX5_CAP_GEN(mdev, pcam_reg) && MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) && MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters)) { /* read Extended Statistical counter group using predefined counter layout */ MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP); mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); for (x = 0; x != MLX5E_PPORT_STATISTICAL_DEBUG_NUM; x++, y++) s_debug->arg[y] = be64toh(ptr[x]); } /* read PCIE counters */ mlx5e_update_pcie_counters(priv); /* read per-priority counters */ MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP); /* iterate all the priorities */ for (y = z = 0; z != MLX5E_PPORT_PER_PRIO_STATS_NUM_PRIO; z++) { MLX5_SET(ppcnt_reg, in, prio_tc, z); mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); /* read per priority stats counter group using predefined counter layout */ for (x = 0; x != (MLX5E_PPORT_PER_PRIO_STATS_NUM / MLX5E_PPORT_PER_PRIO_STATS_NUM_PRIO); x++, y++) s->arg[y] = be64toh(ptr[x]); } free_out: /* free firmware request structures */ kvfree(in); kvfree(out); } static void mlx5e_grp_vnic_env_update_stats(struct mlx5e_priv *priv) { u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {}; u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {}; if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard)) return; MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV); MLX5_SET(query_vnic_env_in, in, op_mod, 0); MLX5_SET(query_vnic_env_in, in, other_vport, 0); if (mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out)) != 0) return; priv->stats.vport.rx_steer_missed_packets = MLX5_GET64(query_vnic_env_out, out, vport_env.nic_receive_steering_discard); } /* * This function is called regularly to collect all statistics * counters from the firmware. The values can be viewed through the * sysctl interface. Execution is serialized using the priv's global * configuration lock. */ static void mlx5e_update_stats_locked(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_vport_stats *s = &priv->stats.vport; struct mlx5e_sq_stats *sq_stats; struct buf_ring *sq_br; #if (__FreeBSD_version < 1100000) struct ifnet *ifp = priv->ifp; #endif u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)]; u32 *out; int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out); u64 tso_packets = 0; u64 tso_bytes = 0; u64 tx_queue_dropped = 0; u64 tx_defragged = 0; u64 tx_offload_none = 0; u64 lro_packets = 0; u64 lro_bytes = 0; u64 sw_lro_queued = 0; u64 sw_lro_flushed = 0; u64 rx_csum_none = 0; u64 rx_wqe_err = 0; u64 rx_packets = 0; u64 rx_bytes = 0; u32 rx_out_of_buffer = 0; int i; int j; out = mlx5_vzalloc(outlen); if (out == NULL) goto free_out; /* Collect firts the SW counters and then HW for consistency */ for (i = 0; i < priv->params.num_channels; i++) { struct mlx5e_channel *pch = priv->channel + i; struct mlx5e_rq *rq = &pch->rq; struct mlx5e_rq_stats *rq_stats = &pch->rq.stats; /* collect stats from LRO */ rq_stats->sw_lro_queued = rq->lro.lro_queued; rq_stats->sw_lro_flushed = rq->lro.lro_flushed; sw_lro_queued += rq_stats->sw_lro_queued; sw_lro_flushed += rq_stats->sw_lro_flushed; lro_packets += rq_stats->lro_packets; lro_bytes += rq_stats->lro_bytes; rx_csum_none += rq_stats->csum_none; rx_wqe_err += rq_stats->wqe_err; rx_packets += rq_stats->packets; rx_bytes += rq_stats->bytes; for (j = 0; j < priv->num_tc; j++) { sq_stats = &pch->sq[j].stats; sq_br = pch->sq[j].br; tso_packets += sq_stats->tso_packets; tso_bytes += sq_stats->tso_bytes; tx_queue_dropped += sq_stats->dropped; if (sq_br != NULL) tx_queue_dropped += sq_br->br_drops; tx_defragged += sq_stats->defragged; tx_offload_none += sq_stats->csum_offload_none; } } /* update counters */ s->tso_packets = tso_packets; s->tso_bytes = tso_bytes; s->tx_queue_dropped = tx_queue_dropped; s->tx_defragged = tx_defragged; s->lro_packets = lro_packets; s->lro_bytes = lro_bytes; s->sw_lro_queued = sw_lro_queued; s->sw_lro_flushed = sw_lro_flushed; s->rx_csum_none = rx_csum_none; s->rx_wqe_err = rx_wqe_err; s->rx_packets = rx_packets; s->rx_bytes = rx_bytes; mlx5e_grp_vnic_env_update_stats(priv); /* HW counters */ memset(in, 0, sizeof(in)); MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER); MLX5_SET(query_vport_counter_in, in, op_mod, 0); MLX5_SET(query_vport_counter_in, in, other_vport, 0); memset(out, 0, outlen); /* get number of out-of-buffer drops first */ if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0 && mlx5_vport_query_out_of_rx_buffer(mdev, priv->counter_set_id, &rx_out_of_buffer) == 0) { s->rx_out_of_buffer = rx_out_of_buffer; } /* get port statistics */ if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen) == 0) { #define MLX5_GET_CTR(out, x) \ MLX5_GET64(query_vport_counter_out, out, x) s->rx_error_packets = MLX5_GET_CTR(out, received_errors.packets); s->rx_error_bytes = MLX5_GET_CTR(out, received_errors.octets); s->tx_error_packets = MLX5_GET_CTR(out, transmit_errors.packets); s->tx_error_bytes = MLX5_GET_CTR(out, transmit_errors.octets); s->rx_unicast_packets = MLX5_GET_CTR(out, received_eth_unicast.packets); s->rx_unicast_bytes = MLX5_GET_CTR(out, received_eth_unicast.octets); s->tx_unicast_packets = MLX5_GET_CTR(out, transmitted_eth_unicast.packets); s->tx_unicast_bytes = MLX5_GET_CTR(out, transmitted_eth_unicast.octets); s->rx_multicast_packets = MLX5_GET_CTR(out, received_eth_multicast.packets); s->rx_multicast_bytes = MLX5_GET_CTR(out, received_eth_multicast.octets); s->tx_multicast_packets = MLX5_GET_CTR(out, transmitted_eth_multicast.packets); s->tx_multicast_bytes = MLX5_GET_CTR(out, transmitted_eth_multicast.octets); s->rx_broadcast_packets = MLX5_GET_CTR(out, received_eth_broadcast.packets); s->rx_broadcast_bytes = MLX5_GET_CTR(out, received_eth_broadcast.octets); s->tx_broadcast_packets = MLX5_GET_CTR(out, transmitted_eth_broadcast.packets); s->tx_broadcast_bytes = MLX5_GET_CTR(out, transmitted_eth_broadcast.octets); s->tx_packets = s->tx_unicast_packets + s->tx_multicast_packets + s->tx_broadcast_packets; s->tx_bytes = s->tx_unicast_bytes + s->tx_multicast_bytes + s->tx_broadcast_bytes; /* Update calculated offload counters */ s->tx_csum_offload = s->tx_packets - tx_offload_none; s->rx_csum_good = s->rx_packets - s->rx_csum_none; } /* Get physical port counters */ mlx5e_update_pport_counters(priv); s->tx_jumbo_packets = priv->stats.port_stats_debug.tx_stat_p1519to2047octets + priv->stats.port_stats_debug.tx_stat_p2048to4095octets + priv->stats.port_stats_debug.tx_stat_p4096to8191octets + priv->stats.port_stats_debug.tx_stat_p8192to10239octets; #if (__FreeBSD_version < 1100000) /* no get_counters interface in fbsd 10 */ ifp->if_ipackets = s->rx_packets; ifp->if_ierrors = priv->stats.pport.in_range_len_errors + priv->stats.pport.out_of_range_len + priv->stats.pport.too_long_errors + priv->stats.pport.check_seq_err + priv->stats.pport.alignment_err; ifp->if_iqdrops = s->rx_out_of_buffer; ifp->if_opackets = s->tx_packets; ifp->if_oerrors = priv->stats.port_stats_debug.out_discards; ifp->if_snd.ifq_drops = s->tx_queue_dropped; ifp->if_ibytes = s->rx_bytes; ifp->if_obytes = s->tx_bytes; ifp->if_collisions = priv->stats.pport.collisions; #endif free_out: kvfree(out); /* Update diagnostics, if any */ if (priv->params_ethtool.diag_pci_enable || priv->params_ethtool.diag_general_enable) { int error = mlx5_core_get_diagnostics_full(mdev, priv->params_ethtool.diag_pci_enable ? &priv->params_pci : NULL, priv->params_ethtool.diag_general_enable ? &priv->params_general : NULL); if (error != 0) if_printf(priv->ifp, "Failed reading diagnostics: %d\n", error); } } static void mlx5e_update_stats_work(struct work_struct *work) { struct mlx5e_priv *priv; priv = container_of(work, struct mlx5e_priv, update_stats_work); PRIV_LOCK(priv); if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0) mlx5e_update_stats_locked(priv); PRIV_UNLOCK(priv); } static void mlx5e_update_stats(void *arg) { struct mlx5e_priv *priv = arg; queue_work(priv->wq, &priv->update_stats_work); callout_reset(&priv->watchdog, hz, &mlx5e_update_stats, priv); } static void mlx5e_async_event_sub(struct mlx5e_priv *priv, enum mlx5_dev_event event) { switch (event) { case MLX5_DEV_EVENT_PORT_UP: case MLX5_DEV_EVENT_PORT_DOWN: queue_work(priv->wq, &priv->update_carrier_work); break; default: break; } } static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, enum mlx5_dev_event event, unsigned long param) { struct mlx5e_priv *priv = vpriv; mtx_lock(&priv->async_events_mtx); if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state)) mlx5e_async_event_sub(priv, event); mtx_unlock(&priv->async_events_mtx); } static void mlx5e_enable_async_events(struct mlx5e_priv *priv) { set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state); } static void mlx5e_disable_async_events(struct mlx5e_priv *priv) { mtx_lock(&priv->async_events_mtx); clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state); mtx_unlock(&priv->async_events_mtx); } static void mlx5e_calibration_callout(void *arg); static int mlx5e_calibration_duration = 20; static int mlx5e_fast_calibration = 1; static int mlx5e_normal_calibration = 30; static SYSCTL_NODE(_hw_mlx5, OID_AUTO, calibr, CTLFLAG_RW, 0, "MLX5 timestamp calibration parameteres"); SYSCTL_INT(_hw_mlx5_calibr, OID_AUTO, duration, CTLFLAG_RWTUN, &mlx5e_calibration_duration, 0, "Duration of initial calibration"); SYSCTL_INT(_hw_mlx5_calibr, OID_AUTO, fast, CTLFLAG_RWTUN, &mlx5e_fast_calibration, 0, "Recalibration interval during initial calibration"); SYSCTL_INT(_hw_mlx5_calibr, OID_AUTO, normal, CTLFLAG_RWTUN, &mlx5e_normal_calibration, 0, "Recalibration interval during normal operations"); /* * Ignites the calibration process. */ static void mlx5e_reset_calibration_callout(struct mlx5e_priv *priv) { if (priv->clbr_done == 0) mlx5e_calibration_callout(priv); else callout_reset_curcpu(&priv->tstmp_clbr, (priv->clbr_done < mlx5e_calibration_duration ? mlx5e_fast_calibration : mlx5e_normal_calibration) * hz, mlx5e_calibration_callout, priv); } static uint64_t mlx5e_timespec2usec(const struct timespec *ts) { return ((uint64_t)ts->tv_sec * 1000000000 + ts->tv_nsec); } static uint64_t mlx5e_hw_clock(struct mlx5e_priv *priv) { struct mlx5_init_seg *iseg; uint32_t hw_h, hw_h1, hw_l; iseg = priv->mdev->iseg; do { hw_h = ioread32be(&iseg->internal_timer_h); hw_l = ioread32be(&iseg->internal_timer_l); hw_h1 = ioread32be(&iseg->internal_timer_h); } while (hw_h1 != hw_h); return (((uint64_t)hw_h << 32) | hw_l); } /* * The calibration callout, it runs either in the context of the * thread which enables calibration, or in callout. It takes the * snapshot of system and adapter clocks, then advances the pointers to * the calibration point to allow rx path to read the consistent data * lockless. */ static void mlx5e_calibration_callout(void *arg) { struct mlx5e_priv *priv; struct mlx5e_clbr_point *next, *curr; struct timespec ts; int clbr_curr_next; priv = arg; curr = &priv->clbr_points[priv->clbr_curr]; clbr_curr_next = priv->clbr_curr + 1; if (clbr_curr_next >= nitems(priv->clbr_points)) clbr_curr_next = 0; next = &priv->clbr_points[clbr_curr_next]; next->base_prev = curr->base_curr; next->clbr_hw_prev = curr->clbr_hw_curr; next->clbr_hw_curr = mlx5e_hw_clock(priv); if (((next->clbr_hw_curr - curr->clbr_hw_curr) >> MLX5E_TSTMP_PREC) == 0) { if (priv->clbr_done != 0) { if_printf(priv->ifp, "HW failed tstmp frozen %#jx %#jx," "disabling\n", next->clbr_hw_curr, curr->clbr_hw_prev); priv->clbr_done = 0; } atomic_store_rel_int(&curr->clbr_gen, 0); return; } nanouptime(&ts); next->base_curr = mlx5e_timespec2usec(&ts); curr->clbr_gen = 0; atomic_thread_fence_rel(); priv->clbr_curr = clbr_curr_next; atomic_store_rel_int(&next->clbr_gen, ++(priv->clbr_gen)); if (priv->clbr_done < mlx5e_calibration_duration) priv->clbr_done++; mlx5e_reset_calibration_callout(priv); } static const char *mlx5e_rq_stats_desc[] = { MLX5E_RQ_STATS(MLX5E_STATS_DESC) }; static int mlx5e_create_rq(struct mlx5e_channel *c, struct mlx5e_rq_param *param, struct mlx5e_rq *rq) { struct mlx5e_priv *priv = c->priv; struct mlx5_core_dev *mdev = priv->mdev; char buffer[16]; void *rqc = param->rqc; void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq); int wq_sz; int err; int i; u32 nsegs, wqe_sz; err = mlx5e_get_wqe_sz(priv, &wqe_sz, &nsegs); if (err != 0) goto done; /* Create DMA descriptor TAG */ if ((err = -bus_dma_tag_create( bus_get_dma_tag(mdev->pdev->dev.bsddev), 1, /* any alignment */ 0, /* no boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ nsegs * MLX5E_MAX_RX_BYTES, /* maxsize */ nsegs, /* nsegments */ nsegs * MLX5E_MAX_RX_BYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &rq->dma_tag))) goto done; err = mlx5_wq_ll_create(mdev, ¶m->wq, rqc_wq, &rq->wq, &rq->wq_ctrl); if (err) goto err_free_dma_tag; rq->wq.db = &rq->wq.db[MLX5_RCV_DBR]; err = mlx5e_get_wqe_sz(priv, &rq->wqe_sz, &rq->nsegs); if (err != 0) goto err_rq_wq_destroy; wq_sz = mlx5_wq_ll_get_size(&rq->wq); err = -tcp_lro_init_args(&rq->lro, c->tag.m_snd_tag.ifp, TCP_LRO_ENTRIES, wq_sz); if (err) goto err_rq_wq_destroy; rq->mbuf = malloc(wq_sz * sizeof(rq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO); for (i = 0; i != wq_sz; i++) { struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i); int j; err = -bus_dmamap_create(rq->dma_tag, 0, &rq->mbuf[i].dma_map); if (err != 0) { while (i--) bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map); goto err_rq_mbuf_free; } /* set value for constant fields */ for (j = 0; j < rq->nsegs; j++) wqe->data[j].lkey = c->mkey_be; } INIT_WORK(&rq->dim.work, mlx5e_dim_work); if (priv->params.rx_cq_moderation_mode < 2) { rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_DISABLED; } else { void *cqc = container_of(param, struct mlx5e_channel_param, rq)->rx_cq.cqc; switch (MLX5_GET(cqc, cqc, cq_period_mode)) { case MLX5_CQ_PERIOD_MODE_START_FROM_EQE: rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE; break; case MLX5_CQ_PERIOD_MODE_START_FROM_CQE: rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE; break; default: rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_DISABLED; break; } } rq->ifp = c->tag.m_snd_tag.ifp; rq->channel = c; rq->ix = c->ix; snprintf(buffer, sizeof(buffer), "rxstat%d", c->ix); mlx5e_create_stats(&rq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), buffer, mlx5e_rq_stats_desc, MLX5E_RQ_STATS_NUM, rq->stats.arg); return (0); err_rq_mbuf_free: free(rq->mbuf, M_MLX5EN); tcp_lro_free(&rq->lro); err_rq_wq_destroy: mlx5_wq_destroy(&rq->wq_ctrl); err_free_dma_tag: bus_dma_tag_destroy(rq->dma_tag); done: return (err); } static void mlx5e_destroy_rq(struct mlx5e_rq *rq) { int wq_sz; int i; /* destroy all sysctl nodes */ sysctl_ctx_free(&rq->stats.ctx); /* free leftover LRO packets, if any */ tcp_lro_free(&rq->lro); wq_sz = mlx5_wq_ll_get_size(&rq->wq); for (i = 0; i != wq_sz; i++) { if (rq->mbuf[i].mbuf != NULL) { bus_dmamap_unload(rq->dma_tag, rq->mbuf[i].dma_map); m_freem(rq->mbuf[i].mbuf); } bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map); } free(rq->mbuf, M_MLX5EN); mlx5_wq_destroy(&rq->wq_ctrl); } static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param) { struct mlx5e_channel *c = rq->channel; struct mlx5e_priv *priv = c->priv; struct mlx5_core_dev *mdev = priv->mdev; void *in; void *rqc; void *wq; int inlen; int err; inlen = MLX5_ST_SZ_BYTES(create_rq_in) + sizeof(u64) * rq->wq_ctrl.buf.npages; in = mlx5_vzalloc(inlen); if (in == NULL) return (-ENOMEM); rqc = MLX5_ADDR_OF(create_rq_in, in, ctx); wq = MLX5_ADDR_OF(rqc, rqc, wq); memcpy(rqc, param->rqc, sizeof(param->rqc)); MLX5_SET(rqc, rqc, cqn, c->rq.cq.mcq.cqn); MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); MLX5_SET(rqc, rqc, flush_in_error_en, 1); if (priv->counter_set_id >= 0) MLX5_SET(rqc, rqc, counter_set_id, priv->counter_set_id); MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift - PAGE_SHIFT); MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma); mlx5_fill_page_array(&rq->wq_ctrl.buf, (__be64 *) MLX5_ADDR_OF(wq, wq, pas)); err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn); kvfree(in); return (err); } static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state) { struct mlx5e_channel *c = rq->channel; struct mlx5e_priv *priv = c->priv; struct mlx5_core_dev *mdev = priv->mdev; void *in; void *rqc; int inlen; int err; inlen = MLX5_ST_SZ_BYTES(modify_rq_in); in = mlx5_vzalloc(inlen); if (in == NULL) return (-ENOMEM); rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); MLX5_SET(modify_rq_in, in, rqn, rq->rqn); MLX5_SET(modify_rq_in, in, rq_state, curr_state); MLX5_SET(rqc, rqc, state, next_state); err = mlx5_core_modify_rq(mdev, in, inlen); kvfree(in); return (err); } static void mlx5e_disable_rq(struct mlx5e_rq *rq) { struct mlx5e_channel *c = rq->channel; struct mlx5e_priv *priv = c->priv; struct mlx5_core_dev *mdev = priv->mdev; mlx5_core_destroy_rq(mdev, rq->rqn); } static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq) { struct mlx5e_channel *c = rq->channel; struct mlx5e_priv *priv = c->priv; struct mlx5_wq_ll *wq = &rq->wq; int i; for (i = 0; i < 1000; i++) { if (wq->cur_sz >= priv->params.min_rx_wqes) return (0); msleep(4); } return (-ETIMEDOUT); } static int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_rq_param *param, struct mlx5e_rq *rq) { int err; err = mlx5e_create_rq(c, param, rq); if (err) return (err); err = mlx5e_enable_rq(rq, param); if (err) goto err_destroy_rq; err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); if (err) goto err_disable_rq; c->rq.enabled = 1; return (0); err_disable_rq: mlx5e_disable_rq(rq); err_destroy_rq: mlx5e_destroy_rq(rq); return (err); } static void mlx5e_close_rq(struct mlx5e_rq *rq) { mtx_lock(&rq->mtx); rq->enabled = 0; callout_stop(&rq->watchdog); mtx_unlock(&rq->mtx); callout_drain(&rq->watchdog); mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR); } static void mlx5e_close_rq_wait(struct mlx5e_rq *rq) { mlx5e_disable_rq(rq); mlx5e_close_cq(&rq->cq); cancel_work_sync(&rq->dim.work); mlx5e_destroy_rq(rq); } void mlx5e_free_sq_db(struct mlx5e_sq *sq) { int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); int x; for (x = 0; x != wq_sz; x++) { if (sq->mbuf[x].mbuf != NULL) { bus_dmamap_unload(sq->dma_tag, sq->mbuf[x].dma_map); m_freem(sq->mbuf[x].mbuf); } bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map); } free(sq->mbuf, M_MLX5EN); } int mlx5e_alloc_sq_db(struct mlx5e_sq *sq) { int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); int err; int x; sq->mbuf = malloc(wq_sz * sizeof(sq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO); /* Create DMA descriptor MAPs */ for (x = 0; x != wq_sz; x++) { err = -bus_dmamap_create(sq->dma_tag, 0, &sq->mbuf[x].dma_map); if (err != 0) { while (x--) bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map); free(sq->mbuf, M_MLX5EN); return (err); } } return (0); } static const char *mlx5e_sq_stats_desc[] = { MLX5E_SQ_STATS(MLX5E_STATS_DESC) }; void mlx5e_update_sq_inline(struct mlx5e_sq *sq) { sq->max_inline = sq->priv->params.tx_max_inline; sq->min_inline_mode = sq->priv->params.tx_min_inline_mode; /* * Check if trust state is DSCP or if inline mode is NONE which * indicates CX-5 or newer hardware. */ if (sq->priv->params_ethtool.trust_state != MLX5_QPTS_TRUST_PCP || sq->min_inline_mode == MLX5_INLINE_MODE_NONE) { if (MLX5_CAP_ETH(sq->priv->mdev, wqe_vlan_insert)) sq->min_insert_caps = MLX5E_INSERT_VLAN | MLX5E_INSERT_NON_VLAN; else sq->min_insert_caps = MLX5E_INSERT_NON_VLAN; } else { sq->min_insert_caps = 0; } } static void mlx5e_refresh_sq_inline_sub(struct mlx5e_priv *priv, struct mlx5e_channel *c) { int i; for (i = 0; i != c->num_tc; i++) { mtx_lock(&c->sq[i].lock); mlx5e_update_sq_inline(&c->sq[i]); mtx_unlock(&c->sq[i].lock); } } void mlx5e_refresh_sq_inline(struct mlx5e_priv *priv) { int i; /* check if channels are closed */ if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) return; for (i = 0; i < priv->params.num_channels; i++) mlx5e_refresh_sq_inline_sub(priv, &priv->channel[i]); } static int mlx5e_create_sq(struct mlx5e_channel *c, int tc, struct mlx5e_sq_param *param, struct mlx5e_sq *sq) { struct mlx5e_priv *priv = c->priv; struct mlx5_core_dev *mdev = priv->mdev; char buffer[16]; void *sqc = param->sqc; void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq); int err; /* Create DMA descriptor TAG */ if ((err = -bus_dma_tag_create( bus_get_dma_tag(mdev->pdev->dev.bsddev), 1, /* any alignment */ 0, /* no boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MLX5E_MAX_TX_PAYLOAD_SIZE, /* maxsize */ MLX5E_MAX_TX_MBUF_FRAGS, /* nsegments */ MLX5E_MAX_TX_MBUF_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &sq->dma_tag))) goto done; err = mlx5_alloc_map_uar(mdev, &sq->uar); if (err) goto err_free_dma_tag; err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl); if (err) goto err_unmap_free_uar; sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2; err = mlx5e_alloc_sq_db(sq); if (err) goto err_sq_wq_destroy; sq->mkey_be = c->mkey_be; sq->ifp = priv->ifp; sq->priv = priv; sq->tc = tc; mlx5e_update_sq_inline(sq); snprintf(buffer, sizeof(buffer), "txstat%dtc%d", c->ix, tc); mlx5e_create_stats(&sq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), buffer, mlx5e_sq_stats_desc, MLX5E_SQ_STATS_NUM, sq->stats.arg); return (0); err_sq_wq_destroy: mlx5_wq_destroy(&sq->wq_ctrl); err_unmap_free_uar: mlx5_unmap_free_uar(mdev, &sq->uar); err_free_dma_tag: bus_dma_tag_destroy(sq->dma_tag); done: return (err); } static void mlx5e_destroy_sq(struct mlx5e_sq *sq) { /* destroy all sysctl nodes */ sysctl_ctx_free(&sq->stats.ctx); mlx5e_free_sq_db(sq); mlx5_wq_destroy(&sq->wq_ctrl); mlx5_unmap_free_uar(sq->priv->mdev, &sq->uar); } int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param, int tis_num) { void *in; void *sqc; void *wq; int inlen; int err; inlen = MLX5_ST_SZ_BYTES(create_sq_in) + sizeof(u64) * sq->wq_ctrl.buf.npages; in = mlx5_vzalloc(inlen); if (in == NULL) return (-ENOMEM); sqc = MLX5_ADDR_OF(create_sq_in, in, ctx); wq = MLX5_ADDR_OF(sqc, sqc, wq); memcpy(sqc, param->sqc, sizeof(param->sqc)); MLX5_SET(sqc, sqc, tis_num_0, tis_num); MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn); MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); MLX5_SET(sqc, sqc, tis_lst_sz, 1); MLX5_SET(sqc, sqc, flush_in_error_en, 1); MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); MLX5_SET(wq, wq, uar_page, sq->uar.index); MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - PAGE_SHIFT); MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma); mlx5_fill_page_array(&sq->wq_ctrl.buf, (__be64 *) MLX5_ADDR_OF(wq, wq, pas)); err = mlx5_core_create_sq(sq->priv->mdev, in, inlen, &sq->sqn); kvfree(in); return (err); } int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state) { void *in; void *sqc; int inlen; int err; inlen = MLX5_ST_SZ_BYTES(modify_sq_in); in = mlx5_vzalloc(inlen); if (in == NULL) return (-ENOMEM); sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); MLX5_SET(modify_sq_in, in, sqn, sq->sqn); MLX5_SET(modify_sq_in, in, sq_state, curr_state); MLX5_SET(sqc, sqc, state, next_state); err = mlx5_core_modify_sq(sq->priv->mdev, in, inlen); kvfree(in); return (err); } void mlx5e_disable_sq(struct mlx5e_sq *sq) { mlx5_core_destroy_sq(sq->priv->mdev, sq->sqn); } static int mlx5e_open_sq(struct mlx5e_channel *c, int tc, struct mlx5e_sq_param *param, struct mlx5e_sq *sq) { int err; err = mlx5e_create_sq(c, tc, param, sq); if (err) return (err); err = mlx5e_enable_sq(sq, param, c->priv->tisn[tc]); if (err) goto err_destroy_sq; err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY); if (err) goto err_disable_sq; WRITE_ONCE(sq->running, 1); return (0); err_disable_sq: mlx5e_disable_sq(sq); err_destroy_sq: mlx5e_destroy_sq(sq); return (err); } static void mlx5e_sq_send_nops_locked(struct mlx5e_sq *sq, int can_sleep) { /* fill up remainder with NOPs */ while (sq->cev_counter != 0) { while (!mlx5e_sq_has_room_for(sq, 1)) { if (can_sleep != 0) { mtx_unlock(&sq->lock); msleep(4); mtx_lock(&sq->lock); } else { goto done; } } /* send a single NOP */ mlx5e_send_nop(sq, 1); atomic_thread_fence_rel(); } done: /* Check if we need to write the doorbell */ if (likely(sq->doorbell.d64 != 0)) { mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0); sq->doorbell.d64 = 0; } } void mlx5e_sq_cev_timeout(void *arg) { struct mlx5e_sq *sq = arg; mtx_assert(&sq->lock, MA_OWNED); /* check next state */ switch (sq->cev_next_state) { case MLX5E_CEV_STATE_SEND_NOPS: /* fill TX ring with NOPs, if any */ mlx5e_sq_send_nops_locked(sq, 0); /* check if completed */ if (sq->cev_counter == 0) { sq->cev_next_state = MLX5E_CEV_STATE_INITIAL; return; } break; default: /* send NOPs on next timeout */ sq->cev_next_state = MLX5E_CEV_STATE_SEND_NOPS; break; } /* restart timer */ callout_reset_curcpu(&sq->cev_callout, hz, mlx5e_sq_cev_timeout, sq); } void mlx5e_drain_sq(struct mlx5e_sq *sq) { int error; struct mlx5_core_dev *mdev= sq->priv->mdev; /* * Check if already stopped. * * NOTE: Serialization of this function is managed by the * caller ensuring the priv's state lock is locked or in case * of rate limit support, a single thread manages drain and * resume of SQs. The "running" variable can therefore safely * be read without any locks. */ if (READ_ONCE(sq->running) == 0) return; /* don't put more packets into the SQ */ WRITE_ONCE(sq->running, 0); /* serialize access to DMA rings */ mtx_lock(&sq->lock); /* teardown event factor timer, if any */ sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS; callout_stop(&sq->cev_callout); /* send dummy NOPs in order to flush the transmit ring */ mlx5e_sq_send_nops_locked(sq, 1); mtx_unlock(&sq->lock); /* make sure it is safe to free the callout */ callout_drain(&sq->cev_callout); /* wait till SQ is empty or link is down */ mtx_lock(&sq->lock); while (sq->cc != sq->pc && (sq->priv->media_status_last & IFM_ACTIVE) != 0 && mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) { mtx_unlock(&sq->lock); msleep(1); sq->cq.mcq.comp(&sq->cq.mcq); mtx_lock(&sq->lock); } mtx_unlock(&sq->lock); /* error out remaining requests */ error = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR); if (error != 0) { if_printf(sq->ifp, "mlx5e_modify_sq() from RDY to ERR failed: %d\n", error); } /* wait till SQ is empty */ mtx_lock(&sq->lock); while (sq->cc != sq->pc && mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) { mtx_unlock(&sq->lock); msleep(1); sq->cq.mcq.comp(&sq->cq.mcq); mtx_lock(&sq->lock); } mtx_unlock(&sq->lock); } static void mlx5e_close_sq_wait(struct mlx5e_sq *sq) { mlx5e_drain_sq(sq); mlx5e_disable_sq(sq); mlx5e_destroy_sq(sq); } static int mlx5e_create_cq(struct mlx5e_priv *priv, struct mlx5e_cq_param *param, struct mlx5e_cq *cq, mlx5e_cq_comp_t *comp, int eq_ix) { struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_cq *mcq = &cq->mcq; int eqn_not_used; int irqn; int err; u32 i; param->wq.buf_numa_node = 0; param->wq.db_numa_node = 0; err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq, &cq->wq_ctrl); if (err) return (err); mlx5_vector2eqn(mdev, eq_ix, &eqn_not_used, &irqn); mcq->cqe_sz = 64; mcq->set_ci_db = cq->wq_ctrl.db.db; mcq->arm_db = cq->wq_ctrl.db.db + 1; *mcq->set_ci_db = 0; *mcq->arm_db = 0; mcq->vector = eq_ix; mcq->comp = comp; mcq->event = mlx5e_cq_error_event; mcq->irqn = irqn; mcq->uar = &priv->cq_uar; for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) { struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i); cqe->op_own = 0xf1; } cq->priv = priv; return (0); } static void mlx5e_destroy_cq(struct mlx5e_cq *cq) { mlx5_wq_destroy(&cq->wq_ctrl); } static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param, int eq_ix) { struct mlx5_core_cq *mcq = &cq->mcq; void *in; void *cqc; int inlen; int irqn_not_used; int eqn; int err; inlen = MLX5_ST_SZ_BYTES(create_cq_in) + sizeof(u64) * cq->wq_ctrl.buf.npages; in = mlx5_vzalloc(inlen); if (in == NULL) return (-ENOMEM); cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); memcpy(cqc, param->cqc, sizeof(param->cqc)); mlx5_fill_page_array(&cq->wq_ctrl.buf, (__be64 *) MLX5_ADDR_OF(create_cq_in, in, pas)); mlx5_vector2eqn(cq->priv->mdev, eq_ix, &eqn, &irqn_not_used); MLX5_SET(cqc, cqc, c_eqn, eqn); MLX5_SET(cqc, cqc, uar_page, mcq->uar->index); MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - PAGE_SHIFT); MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); err = mlx5_core_create_cq(cq->priv->mdev, mcq, in, inlen); kvfree(in); if (err) return (err); mlx5e_cq_arm(cq, MLX5_GET_DOORBELL_LOCK(&cq->priv->doorbell_lock)); return (0); } static void mlx5e_disable_cq(struct mlx5e_cq *cq) { mlx5_core_destroy_cq(cq->priv->mdev, &cq->mcq); } int mlx5e_open_cq(struct mlx5e_priv *priv, struct mlx5e_cq_param *param, struct mlx5e_cq *cq, mlx5e_cq_comp_t *comp, int eq_ix) { int err; err = mlx5e_create_cq(priv, param, cq, comp, eq_ix); if (err) return (err); err = mlx5e_enable_cq(cq, param, eq_ix); if (err) goto err_destroy_cq; return (0); err_destroy_cq: mlx5e_destroy_cq(cq); return (err); } void mlx5e_close_cq(struct mlx5e_cq *cq) { mlx5e_disable_cq(cq); mlx5e_destroy_cq(cq); } static int mlx5e_open_tx_cqs(struct mlx5e_channel *c, struct mlx5e_channel_param *cparam) { int err; int tc; for (tc = 0; tc < c->num_tc; tc++) { /* open completion queue */ err = mlx5e_open_cq(c->priv, &cparam->tx_cq, &c->sq[tc].cq, &mlx5e_tx_cq_comp, c->ix); if (err) goto err_close_tx_cqs; } return (0); err_close_tx_cqs: for (tc--; tc >= 0; tc--) mlx5e_close_cq(&c->sq[tc].cq); return (err); } static void mlx5e_close_tx_cqs(struct mlx5e_channel *c) { int tc; for (tc = 0; tc < c->num_tc; tc++) mlx5e_close_cq(&c->sq[tc].cq); } static int mlx5e_open_sqs(struct mlx5e_channel *c, struct mlx5e_channel_param *cparam) { int err; int tc; for (tc = 0; tc < c->num_tc; tc++) { err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]); if (err) goto err_close_sqs; } return (0); err_close_sqs: for (tc--; tc >= 0; tc--) mlx5e_close_sq_wait(&c->sq[tc]); return (err); } static void mlx5e_close_sqs_wait(struct mlx5e_channel *c) { int tc; for (tc = 0; tc < c->num_tc; tc++) mlx5e_close_sq_wait(&c->sq[tc]); } static void mlx5e_chan_mtx_init(struct mlx5e_channel *c) { int tc; mtx_init(&c->rq.mtx, "mlx5rx", MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&c->rq.watchdog, &c->rq.mtx, 0); for (tc = 0; tc < c->num_tc; tc++) { struct mlx5e_sq *sq = c->sq + tc; mtx_init(&sq->lock, "mlx5tx", MTX_NETWORK_LOCK " TX", MTX_DEF); mtx_init(&sq->comp_lock, "mlx5comp", MTX_NETWORK_LOCK " TX", MTX_DEF); callout_init_mtx(&sq->cev_callout, &sq->lock, 0); sq->cev_factor = c->priv->params_ethtool.tx_completion_fact; /* ensure the TX completion event factor is not zero */ if (sq->cev_factor == 0) sq->cev_factor = 1; } } static void mlx5e_chan_mtx_destroy(struct mlx5e_channel *c) { int tc; mtx_destroy(&c->rq.mtx); for (tc = 0; tc < c->num_tc; tc++) { mtx_destroy(&c->sq[tc].lock); mtx_destroy(&c->sq[tc].comp_lock); } } static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, struct mlx5e_channel_param *cparam, struct mlx5e_channel *c) { int err; memset(c, 0, sizeof(*c)); c->priv = priv; c->ix = ix; /* setup send tag */ c->tag.m_snd_tag.ifp = priv->ifp; c->tag.type = IF_SND_TAG_TYPE_UNLIMITED; c->mkey_be = cpu_to_be32(priv->mr.key); c->num_tc = priv->num_tc; /* init mutexes */ mlx5e_chan_mtx_init(c); /* open transmit completion queue */ err = mlx5e_open_tx_cqs(c, cparam); if (err) goto err_free; /* open receive completion queue */ err = mlx5e_open_cq(c->priv, &cparam->rx_cq, &c->rq.cq, &mlx5e_rx_cq_comp, c->ix); if (err) goto err_close_tx_cqs; err = mlx5e_open_sqs(c, cparam); if (err) goto err_close_rx_cq; err = mlx5e_open_rq(c, &cparam->rq, &c->rq); if (err) goto err_close_sqs; /* poll receive queue initially */ c->rq.cq.mcq.comp(&c->rq.cq.mcq); return (0); err_close_sqs: mlx5e_close_sqs_wait(c); err_close_rx_cq: mlx5e_close_cq(&c->rq.cq); err_close_tx_cqs: mlx5e_close_tx_cqs(c); err_free: /* destroy mutexes */ mlx5e_chan_mtx_destroy(c); return (err); } static void mlx5e_close_channel(struct mlx5e_channel *c) { mlx5e_close_rq(&c->rq); } static void mlx5e_close_channel_wait(struct mlx5e_channel *c) { mlx5e_close_rq_wait(&c->rq); mlx5e_close_sqs_wait(c); mlx5e_close_tx_cqs(c); /* destroy mutexes */ mlx5e_chan_mtx_destroy(c); } static int mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs) { u32 r, n; r = priv->params.hw_lro_en ? priv->params.lro_wqe_sz : MLX5E_SW2MB_MTU(priv->ifp->if_mtu); if (r > MJUM16BYTES) return (-ENOMEM); if (r > MJUM9BYTES) r = MJUM16BYTES; else if (r > MJUMPAGESIZE) r = MJUM9BYTES; else if (r > MCLBYTES) r = MJUMPAGESIZE; else r = MCLBYTES; /* * n + 1 must be a power of two, because stride size must be. * Stride size is 16 * (n + 1), as the first segment is * control. */ for (n = howmany(r, MLX5E_MAX_RX_BYTES); !powerof2(n + 1); n++) ; if (n > MLX5E_MAX_BUSDMA_RX_SEGS) return (-ENOMEM); *wqe_sz = r; *nsegs = n; return (0); } static void mlx5e_build_rq_param(struct mlx5e_priv *priv, struct mlx5e_rq_param *param) { void *rqc = param->rqc; void *wq = MLX5_ADDR_OF(rqc, rqc, wq); u32 wqe_sz, nsegs; mlx5e_get_wqe_sz(priv, &wqe_sz, &nsegs); MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe) + nsegs * sizeof(struct mlx5_wqe_data_seg))); MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size); MLX5_SET(wq, wq, pd, priv->pdn); param->wq.buf_numa_node = 0; param->wq.db_numa_node = 0; param->wq.linear = 1; } static void mlx5e_build_sq_param(struct mlx5e_priv *priv, struct mlx5e_sq_param *param) { void *sqc = param->sqc; void *wq = MLX5_ADDR_OF(sqc, sqc, wq); MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size); MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); MLX5_SET(wq, wq, pd, priv->pdn); param->wq.buf_numa_node = 0; param->wq.db_numa_node = 0; param->wq.linear = 1; } static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv, struct mlx5e_cq_param *param) { void *cqc = param->cqc; MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index); } static void mlx5e_get_default_profile(struct mlx5e_priv *priv, int mode, struct net_dim_cq_moder *ptr) { *ptr = net_dim_get_profile(mode, MLX5E_DIM_DEFAULT_PROFILE); /* apply LRO restrictions */ if (priv->params.hw_lro_en && ptr->pkts > MLX5E_DIM_MAX_RX_CQ_MODERATION_PKTS_WITH_LRO) { ptr->pkts = MLX5E_DIM_MAX_RX_CQ_MODERATION_PKTS_WITH_LRO; } } static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, struct mlx5e_cq_param *param) { struct net_dim_cq_moder curr; void *cqc = param->cqc; /* * We use MLX5_CQE_FORMAT_HASH because the RX hash mini CQE * format is more beneficial for FreeBSD use case. * * Adding support for MLX5_CQE_FORMAT_CSUM will require changes * in mlx5e_decompress_cqe. */ if (priv->params.cqe_zipping_en) { MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_HASH); MLX5_SET(cqc, cqc, cqe_compression_en, 1); } MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size); switch (priv->params.rx_cq_moderation_mode) { case 0: MLX5_SET(cqc, cqc, cq_period, priv->params.rx_cq_moderation_usec); MLX5_SET(cqc, cqc, cq_max_count, priv->params.rx_cq_moderation_pkts); MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); break; case 1: MLX5_SET(cqc, cqc, cq_period, priv->params.rx_cq_moderation_usec); MLX5_SET(cqc, cqc, cq_max_count, priv->params.rx_cq_moderation_pkts); if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE); else MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); break; case 2: mlx5e_get_default_profile(priv, NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE, &curr); MLX5_SET(cqc, cqc, cq_period, curr.usec); MLX5_SET(cqc, cqc, cq_max_count, curr.pkts); MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); break; case 3: mlx5e_get_default_profile(priv, NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE, &curr); MLX5_SET(cqc, cqc, cq_period, curr.usec); MLX5_SET(cqc, cqc, cq_max_count, curr.pkts); if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE); else MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); break; default: break; } mlx5e_dim_build_cq_param(priv, param); mlx5e_build_common_cq_param(priv, param); } static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv, struct mlx5e_cq_param *param) { void *cqc = param->cqc; MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size); MLX5_SET(cqc, cqc, cq_period, priv->params.tx_cq_moderation_usec); MLX5_SET(cqc, cqc, cq_max_count, priv->params.tx_cq_moderation_pkts); switch (priv->params.tx_cq_moderation_mode) { case 0: MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); break; default: if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE); else MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); break; } mlx5e_build_common_cq_param(priv, param); } static void mlx5e_build_channel_param(struct mlx5e_priv *priv, struct mlx5e_channel_param *cparam) { memset(cparam, 0, sizeof(*cparam)); mlx5e_build_rq_param(priv, &cparam->rq); mlx5e_build_sq_param(priv, &cparam->sq); mlx5e_build_rx_cq_param(priv, &cparam->rx_cq); mlx5e_build_tx_cq_param(priv, &cparam->tx_cq); } static int mlx5e_open_channels(struct mlx5e_priv *priv) { struct mlx5e_channel_param cparam; int err; int i; int j; mlx5e_build_channel_param(priv, &cparam); for (i = 0; i < priv->params.num_channels; i++) { err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]); if (err) goto err_close_channels; } for (j = 0; j < priv->params.num_channels; j++) { err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j].rq); if (err) goto err_close_channels; } return (0); err_close_channels: while (i--) { mlx5e_close_channel(&priv->channel[i]); mlx5e_close_channel_wait(&priv->channel[i]); } return (err); } static void mlx5e_close_channels(struct mlx5e_priv *priv) { int i; for (i = 0; i < priv->params.num_channels; i++) mlx5e_close_channel(&priv->channel[i]); for (i = 0; i < priv->params.num_channels; i++) mlx5e_close_channel_wait(&priv->channel[i]); } static int mlx5e_refresh_sq_params(struct mlx5e_priv *priv, struct mlx5e_sq *sq) { if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) { uint8_t cq_mode; switch (priv->params.tx_cq_moderation_mode) { case 0: case 2: cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE; break; default: cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE; break; } return (mlx5_core_modify_cq_moderation_mode(priv->mdev, &sq->cq.mcq, priv->params.tx_cq_moderation_usec, priv->params.tx_cq_moderation_pkts, cq_mode)); } return (mlx5_core_modify_cq_moderation(priv->mdev, &sq->cq.mcq, priv->params.tx_cq_moderation_usec, priv->params.tx_cq_moderation_pkts)); } static int mlx5e_refresh_rq_params(struct mlx5e_priv *priv, struct mlx5e_rq *rq) { if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) { uint8_t cq_mode; uint8_t dim_mode; int retval; switch (priv->params.rx_cq_moderation_mode) { case 0: case 2: cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE; dim_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE; break; default: cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE; dim_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE; break; } /* tear down dynamic interrupt moderation */ mtx_lock(&rq->mtx); rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_DISABLED; mtx_unlock(&rq->mtx); /* wait for dynamic interrupt moderation work task, if any */ cancel_work_sync(&rq->dim.work); if (priv->params.rx_cq_moderation_mode >= 2) { struct net_dim_cq_moder curr; mlx5e_get_default_profile(priv, dim_mode, &curr); retval = mlx5_core_modify_cq_moderation_mode(priv->mdev, &rq->cq.mcq, curr.usec, curr.pkts, cq_mode); /* set dynamic interrupt moderation mode and zero defaults */ mtx_lock(&rq->mtx); rq->dim.mode = dim_mode; rq->dim.state = 0; rq->dim.profile_ix = MLX5E_DIM_DEFAULT_PROFILE; mtx_unlock(&rq->mtx); } else { retval = mlx5_core_modify_cq_moderation_mode(priv->mdev, &rq->cq.mcq, priv->params.rx_cq_moderation_usec, priv->params.rx_cq_moderation_pkts, cq_mode); } return (retval); } return (mlx5_core_modify_cq_moderation(priv->mdev, &rq->cq.mcq, priv->params.rx_cq_moderation_usec, priv->params.rx_cq_moderation_pkts)); } static int mlx5e_refresh_channel_params_sub(struct mlx5e_priv *priv, struct mlx5e_channel *c) { int err; int i; err = mlx5e_refresh_rq_params(priv, &c->rq); if (err) goto done; for (i = 0; i != c->num_tc; i++) { err = mlx5e_refresh_sq_params(priv, &c->sq[i]); if (err) goto done; } done: return (err); } int mlx5e_refresh_channel_params(struct mlx5e_priv *priv) { int i; /* check if channels are closed */ if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) return (EINVAL); for (i = 0; i < priv->params.num_channels; i++) { int err; err = mlx5e_refresh_channel_params_sub(priv, &priv->channel[i]); if (err) return (err); } return (0); } static int mlx5e_open_tis(struct mlx5e_priv *priv, int tc) { struct mlx5_core_dev *mdev = priv->mdev; u32 in[MLX5_ST_SZ_DW(create_tis_in)]; void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); memset(in, 0, sizeof(in)); MLX5_SET(tisc, tisc, prio, tc); MLX5_SET(tisc, tisc, transport_domain, priv->tdn); return (mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc])); } static void mlx5e_close_tis(struct mlx5e_priv *priv, int tc) { mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]); } static int mlx5e_open_tises(struct mlx5e_priv *priv) { int num_tc = priv->num_tc; int err; int tc; for (tc = 0; tc < num_tc; tc++) { err = mlx5e_open_tis(priv, tc); if (err) goto err_close_tises; } return (0); err_close_tises: for (tc--; tc >= 0; tc--) mlx5e_close_tis(priv, tc); return (err); } static void mlx5e_close_tises(struct mlx5e_priv *priv) { int num_tc = priv->num_tc; int tc; for (tc = 0; tc < num_tc; tc++) mlx5e_close_tis(priv, tc); } static int mlx5e_open_rqt(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; u32 *in; u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {0}; void *rqtc; int inlen; int err; int sz; int i; sz = 1 << priv->params.rx_hash_log_tbl_sz; inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; in = mlx5_vzalloc(inlen); if (in == NULL) return (-ENOMEM); rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context); MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); MLX5_SET(rqtc, rqtc, rqt_max_size, sz); for (i = 0; i < sz; i++) { int ix = i; #ifdef RSS ix = rss_get_indirection_to_bucket(ix); #endif /* ensure we don't overflow */ ix %= priv->params.num_channels; /* apply receive side scaling stride, if any */ ix -= ix % (int)priv->params.channels_rsss; MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix].rq.rqn); } MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT); err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out)); if (!err) priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn); kvfree(in); return (err); } static void mlx5e_close_rqt(struct mlx5e_priv *priv) { u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {0}; u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)] = {0}; MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT); MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn); mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out)); } static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 * tirc, int tt) { void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); __be32 *hkey; MLX5_SET(tirc, tirc, transport_domain, priv->tdn); #define ROUGH_MAX_L2_L3_HDR_SZ 256 #define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\ MLX5_HASH_FIELD_SEL_DST_IP) #define MLX5_HASH_ALL (MLX5_HASH_FIELD_SEL_SRC_IP |\ MLX5_HASH_FIELD_SEL_DST_IP |\ MLX5_HASH_FIELD_SEL_L4_SPORT |\ MLX5_HASH_FIELD_SEL_L4_DPORT) #define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\ MLX5_HASH_FIELD_SEL_DST_IP |\ MLX5_HASH_FIELD_SEL_IPSEC_SPI) if (priv->params.hw_lro_en) { MLX5_SET(tirc, tirc, lro_enable_mask, MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO); MLX5_SET(tirc, tirc, lro_max_msg_sz, (priv->params.lro_wqe_sz - ROUGH_MAX_L2_L3_HDR_SZ) >> 8); /* TODO: add the option to choose timer value dynamically */ MLX5_SET(tirc, tirc, lro_timeout_period_usecs, MLX5_CAP_ETH(priv->mdev, lro_timer_supported_periods[2])); } /* setup parameters for hashing TIR type, if any */ switch (tt) { case MLX5E_TT_ANY: MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT); MLX5_SET(tirc, tirc, inline_rqn, priv->channel[0].rq.rqn); break; default: MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); MLX5_SET(tirc, tirc, indirect_table, priv->rqtn); MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ); hkey = (__be32 *) MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key); #ifdef RSS /* * The FreeBSD RSS implementation does currently not * support symmetric Toeplitz hashes: */ MLX5_SET(tirc, tirc, rx_hash_symmetric, 0); rss_getkey((uint8_t *)hkey); #else MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); hkey[0] = cpu_to_be32(0xD181C62C); hkey[1] = cpu_to_be32(0xF7F4DB5B); hkey[2] = cpu_to_be32(0x1983A2FC); hkey[3] = cpu_to_be32(0x943E1ADB); hkey[4] = cpu_to_be32(0xD9389E6B); hkey[5] = cpu_to_be32(0xD1039C2C); hkey[6] = cpu_to_be32(0xA74499AD); hkey[7] = cpu_to_be32(0x593D56D9); hkey[8] = cpu_to_be32(0xF3253C06); hkey[9] = cpu_to_be32(0x2ADC1FFC); #endif break; } switch (tt) { case MLX5E_TT_IPV4_TCP: MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, MLX5_L3_PROT_TYPE_IPV4); MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, MLX5_L4_PROT_TYPE_TCP); #ifdef RSS if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV4)) { MLX5_SET(rx_hash_field_select, hfso, selected_fields, MLX5_HASH_IP); } else #endif MLX5_SET(rx_hash_field_select, hfso, selected_fields, MLX5_HASH_ALL); break; case MLX5E_TT_IPV6_TCP: MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, MLX5_L3_PROT_TYPE_IPV6); MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, MLX5_L4_PROT_TYPE_TCP); #ifdef RSS if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV6)) { MLX5_SET(rx_hash_field_select, hfso, selected_fields, MLX5_HASH_IP); } else #endif MLX5_SET(rx_hash_field_select, hfso, selected_fields, MLX5_HASH_ALL); break; case MLX5E_TT_IPV4_UDP: MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, MLX5_L3_PROT_TYPE_IPV4); MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, MLX5_L4_PROT_TYPE_UDP); #ifdef RSS if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV4)) { MLX5_SET(rx_hash_field_select, hfso, selected_fields, MLX5_HASH_IP); } else #endif MLX5_SET(rx_hash_field_select, hfso, selected_fields, MLX5_HASH_ALL); break; case MLX5E_TT_IPV6_UDP: MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, MLX5_L3_PROT_TYPE_IPV6); MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, MLX5_L4_PROT_TYPE_UDP); #ifdef RSS if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV6)) { MLX5_SET(rx_hash_field_select, hfso, selected_fields, MLX5_HASH_IP); } else #endif MLX5_SET(rx_hash_field_select, hfso, selected_fields, MLX5_HASH_ALL); break; case MLX5E_TT_IPV4_IPSEC_AH: MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, MLX5_L3_PROT_TYPE_IPV4); MLX5_SET(rx_hash_field_select, hfso, selected_fields, MLX5_HASH_IP_IPSEC_SPI); break; case MLX5E_TT_IPV6_IPSEC_AH: MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, MLX5_L3_PROT_TYPE_IPV6); MLX5_SET(rx_hash_field_select, hfso, selected_fields, MLX5_HASH_IP_IPSEC_SPI); break; case MLX5E_TT_IPV4_IPSEC_ESP: MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, MLX5_L3_PROT_TYPE_IPV4); MLX5_SET(rx_hash_field_select, hfso, selected_fields, MLX5_HASH_IP_IPSEC_SPI); break; case MLX5E_TT_IPV6_IPSEC_ESP: MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, MLX5_L3_PROT_TYPE_IPV6); MLX5_SET(rx_hash_field_select, hfso, selected_fields, MLX5_HASH_IP_IPSEC_SPI); break; case MLX5E_TT_IPV4: MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, MLX5_L3_PROT_TYPE_IPV4); MLX5_SET(rx_hash_field_select, hfso, selected_fields, MLX5_HASH_IP); break; case MLX5E_TT_IPV6: MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, MLX5_L3_PROT_TYPE_IPV6); MLX5_SET(rx_hash_field_select, hfso, selected_fields, MLX5_HASH_IP); break; default: break; } } static int mlx5e_open_tir(struct mlx5e_priv *priv, int tt) { struct mlx5_core_dev *mdev = priv->mdev; u32 *in; void *tirc; int inlen; int err; inlen = MLX5_ST_SZ_BYTES(create_tir_in); in = mlx5_vzalloc(inlen); if (in == NULL) return (-ENOMEM); tirc = MLX5_ADDR_OF(create_tir_in, in, tir_context); mlx5e_build_tir_ctx(priv, tirc, tt); err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]); kvfree(in); return (err); } static void mlx5e_close_tir(struct mlx5e_priv *priv, int tt) { mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]); } static int mlx5e_open_tirs(struct mlx5e_priv *priv) { int err; int i; for (i = 0; i < MLX5E_NUM_TT; i++) { err = mlx5e_open_tir(priv, i); if (err) goto err_close_tirs; } return (0); err_close_tirs: for (i--; i >= 0; i--) mlx5e_close_tir(priv, i); return (err); } static void mlx5e_close_tirs(struct mlx5e_priv *priv) { int i; for (i = 0; i < MLX5E_NUM_TT; i++) mlx5e_close_tir(priv, i); } /* * SW MTU does not include headers, * HW MTU includes all headers and checksums. */ static int mlx5e_set_dev_port_mtu(struct ifnet *ifp, int sw_mtu) { struct mlx5e_priv *priv = ifp->if_softc; struct mlx5_core_dev *mdev = priv->mdev; int hw_mtu; int err; hw_mtu = MLX5E_SW2HW_MTU(sw_mtu); err = mlx5_set_port_mtu(mdev, hw_mtu); if (err) { if_printf(ifp, "%s: mlx5_set_port_mtu failed setting %d, err=%d\n", __func__, sw_mtu, err); return (err); } /* Update vport context MTU */ err = mlx5_set_vport_mtu(mdev, hw_mtu); if (err) { if_printf(ifp, "%s: Failed updating vport context with MTU size, err=%d\n", __func__, err); } ifp->if_mtu = sw_mtu; err = mlx5_query_vport_mtu(mdev, &hw_mtu); if (err || !hw_mtu) { /* fallback to port oper mtu */ err = mlx5_query_port_oper_mtu(mdev, &hw_mtu); } if (err) { if_printf(ifp, "Query port MTU, after setting new " "MTU value, failed\n"); return (err); } else if (MLX5E_HW2SW_MTU(hw_mtu) < sw_mtu) { err = -E2BIG, if_printf(ifp, "Port MTU %d is smaller than " "ifp mtu %d\n", hw_mtu, sw_mtu); } else if (MLX5E_HW2SW_MTU(hw_mtu) > sw_mtu) { err = -EINVAL; if_printf(ifp, "Port MTU %d is bigger than " "ifp mtu %d\n", hw_mtu, sw_mtu); } priv->params_ethtool.hw_mtu = hw_mtu; return (err); } int mlx5e_open_locked(struct ifnet *ifp) { struct mlx5e_priv *priv = ifp->if_softc; int err; u16 set_id; /* check if already opened */ if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0) return (0); #ifdef RSS if (rss_getnumbuckets() > priv->params.num_channels) { if_printf(ifp, "NOTE: There are more RSS buckets(%u) than " "channels(%u) available\n", rss_getnumbuckets(), priv->params.num_channels); } #endif err = mlx5e_open_tises(priv); if (err) { if_printf(ifp, "%s: mlx5e_open_tises failed, %d\n", __func__, err); return (err); } err = mlx5_vport_alloc_q_counter(priv->mdev, MLX5_INTERFACE_PROTOCOL_ETH, &set_id); if (err) { if_printf(priv->ifp, "%s: mlx5_vport_alloc_q_counter failed: %d\n", __func__, err); goto err_close_tises; } /* store counter set ID */ priv->counter_set_id = set_id; err = mlx5e_open_channels(priv); if (err) { if_printf(ifp, "%s: mlx5e_open_channels failed, %d\n", __func__, err); goto err_dalloc_q_counter; } err = mlx5e_open_rqt(priv); if (err) { if_printf(ifp, "%s: mlx5e_open_rqt failed, %d\n", __func__, err); goto err_close_channels; } err = mlx5e_open_tirs(priv); if (err) { if_printf(ifp, "%s: mlx5e_open_tir failed, %d\n", __func__, err); goto err_close_rqls; } err = mlx5e_open_flow_table(priv); if (err) { if_printf(ifp, "%s: mlx5e_open_flow_table failed, %d\n", __func__, err); goto err_close_tirs; } err = mlx5e_add_all_vlan_rules(priv); if (err) { if_printf(ifp, "%s: mlx5e_add_all_vlan_rules failed, %d\n", __func__, err); goto err_close_flow_table; } set_bit(MLX5E_STATE_OPENED, &priv->state); mlx5e_update_carrier(priv); mlx5e_set_rx_mode_core(priv); return (0); err_close_flow_table: mlx5e_close_flow_table(priv); err_close_tirs: mlx5e_close_tirs(priv); err_close_rqls: mlx5e_close_rqt(priv); err_close_channels: mlx5e_close_channels(priv); err_dalloc_q_counter: mlx5_vport_dealloc_q_counter(priv->mdev, MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id); err_close_tises: mlx5e_close_tises(priv); return (err); } static void mlx5e_open(void *arg) { struct mlx5e_priv *priv = arg; PRIV_LOCK(priv); if (mlx5_set_port_status(priv->mdev, MLX5_PORT_UP)) if_printf(priv->ifp, "%s: Setting port status to up failed\n", __func__); mlx5e_open_locked(priv->ifp); priv->ifp->if_drv_flags |= IFF_DRV_RUNNING; PRIV_UNLOCK(priv); } int mlx5e_close_locked(struct ifnet *ifp) { struct mlx5e_priv *priv = ifp->if_softc; /* check if already closed */ if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) return (0); clear_bit(MLX5E_STATE_OPENED, &priv->state); mlx5e_set_rx_mode_core(priv); mlx5e_del_all_vlan_rules(priv); if_link_state_change(priv->ifp, LINK_STATE_DOWN); mlx5e_close_flow_table(priv); mlx5e_close_tirs(priv); mlx5e_close_rqt(priv); mlx5e_close_channels(priv); mlx5_vport_dealloc_q_counter(priv->mdev, MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id); mlx5e_close_tises(priv); return (0); } #if (__FreeBSD_version >= 1100000) static uint64_t mlx5e_get_counter(struct ifnet *ifp, ift_counter cnt) { struct mlx5e_priv *priv = ifp->if_softc; u64 retval; /* PRIV_LOCK(priv); XXX not allowed */ switch (cnt) { case IFCOUNTER_IPACKETS: retval = priv->stats.vport.rx_packets; break; case IFCOUNTER_IERRORS: retval = priv->stats.pport.in_range_len_errors + priv->stats.pport.out_of_range_len + priv->stats.pport.too_long_errors + priv->stats.pport.check_seq_err + priv->stats.pport.alignment_err; break; case IFCOUNTER_IQDROPS: retval = priv->stats.vport.rx_out_of_buffer; break; case IFCOUNTER_OPACKETS: retval = priv->stats.vport.tx_packets; break; case IFCOUNTER_OERRORS: retval = priv->stats.port_stats_debug.out_discards; break; case IFCOUNTER_IBYTES: retval = priv->stats.vport.rx_bytes; break; case IFCOUNTER_OBYTES: retval = priv->stats.vport.tx_bytes; break; case IFCOUNTER_IMCASTS: retval = priv->stats.vport.rx_multicast_packets; break; case IFCOUNTER_OMCASTS: retval = priv->stats.vport.tx_multicast_packets; break; case IFCOUNTER_OQDROPS: retval = priv->stats.vport.tx_queue_dropped; break; case IFCOUNTER_COLLISIONS: retval = priv->stats.pport.collisions; break; default: retval = if_get_counter_default(ifp, cnt); break; } /* PRIV_UNLOCK(priv); XXX not allowed */ return (retval); } #endif static void mlx5e_set_rx_mode(struct ifnet *ifp) { struct mlx5e_priv *priv = ifp->if_softc; queue_work(priv->wq, &priv->set_rx_mode_work); } static int mlx5e_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct mlx5e_priv *priv; struct ifreq *ifr; struct ifi2creq i2c; int error = 0; int mask = 0; int size_read = 0; int module_status; int module_num; int max_mtu; uint8_t read_addr; priv = ifp->if_softc; /* check if detaching */ if (priv == NULL || priv->gone != 0) return (ENXIO); switch (command) { case SIOCSIFMTU: ifr = (struct ifreq *)data; PRIV_LOCK(priv); mlx5_query_port_max_mtu(priv->mdev, &max_mtu); if (ifr->ifr_mtu >= MLX5E_MTU_MIN && ifr->ifr_mtu <= MIN(MLX5E_MTU_MAX, max_mtu)) { int was_opened; was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); if (was_opened) mlx5e_close_locked(ifp); /* set new MTU */ mlx5e_set_dev_port_mtu(ifp, ifr->ifr_mtu); if (was_opened) mlx5e_open_locked(ifp); } else { error = EINVAL; if_printf(ifp, "Invalid MTU value. Min val: %d, Max val: %d\n", MLX5E_MTU_MIN, MIN(MLX5E_MTU_MAX, max_mtu)); } PRIV_UNLOCK(priv); break; case SIOCSIFFLAGS: if ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) { mlx5e_set_rx_mode(ifp); break; } PRIV_LOCK(priv); if (ifp->if_flags & IFF_UP) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) mlx5e_open_locked(ifp); ifp->if_drv_flags |= IFF_DRV_RUNNING; mlx5_set_port_status(priv->mdev, MLX5_PORT_UP); } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { mlx5_set_port_status(priv->mdev, MLX5_PORT_DOWN); if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0) mlx5e_close_locked(ifp); mlx5e_update_carrier(priv); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; } } PRIV_UNLOCK(priv); break; case SIOCADDMULTI: case SIOCDELMULTI: mlx5e_set_rx_mode(ifp); break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: case SIOCGIFXMEDIA: ifr = (struct ifreq *)data; error = ifmedia_ioctl(ifp, ifr, &priv->media, command); break; case SIOCSIFCAP: ifr = (struct ifreq *)data; PRIV_LOCK(priv); mask = ifr->ifr_reqcap ^ ifp->if_capenable; if (mask & IFCAP_TXCSUM) { ifp->if_capenable ^= IFCAP_TXCSUM; ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); if (IFCAP_TSO4 & ifp->if_capenable && !(IFCAP_TXCSUM & ifp->if_capenable)) { ifp->if_capenable &= ~IFCAP_TSO4; ifp->if_hwassist &= ~CSUM_IP_TSO; if_printf(ifp, "tso4 disabled due to -txcsum.\n"); } } if (mask & IFCAP_TXCSUM_IPV6) { ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); if (IFCAP_TSO6 & ifp->if_capenable && !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { ifp->if_capenable &= ~IFCAP_TSO6; ifp->if_hwassist &= ~CSUM_IP6_TSO; if_printf(ifp, "tso6 disabled due to -txcsum6.\n"); } } if (mask & IFCAP_RXCSUM) ifp->if_capenable ^= IFCAP_RXCSUM; if (mask & IFCAP_RXCSUM_IPV6) ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; if (mask & IFCAP_TSO4) { if (!(IFCAP_TSO4 & ifp->if_capenable) && !(IFCAP_TXCSUM & ifp->if_capenable)) { if_printf(ifp, "enable txcsum first.\n"); error = EAGAIN; goto out; } ifp->if_capenable ^= IFCAP_TSO4; ifp->if_hwassist ^= CSUM_IP_TSO; } if (mask & IFCAP_TSO6) { if (!(IFCAP_TSO6 & ifp->if_capenable) && !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { if_printf(ifp, "enable txcsum6 first.\n"); error = EAGAIN; goto out; } ifp->if_capenable ^= IFCAP_TSO6; ifp->if_hwassist ^= CSUM_IP6_TSO; } if (mask & IFCAP_VLAN_HWFILTER) { if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) mlx5e_disable_vlan_filter(priv); else mlx5e_enable_vlan_filter(priv); ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; } if (mask & IFCAP_VLAN_HWTAGGING) ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; if (mask & IFCAP_WOL_MAGIC) ifp->if_capenable ^= IFCAP_WOL_MAGIC; VLAN_CAPABILITIES(ifp); /* turn off LRO means also turn of HW LRO - if it's on */ if (mask & IFCAP_LRO) { int was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); bool need_restart = false; ifp->if_capenable ^= IFCAP_LRO; /* figure out if updating HW LRO is needed */ if (!(ifp->if_capenable & IFCAP_LRO)) { if (priv->params.hw_lro_en) { priv->params.hw_lro_en = false; need_restart = true; } } else { if (priv->params.hw_lro_en == false && priv->params_ethtool.hw_lro != 0) { priv->params.hw_lro_en = true; need_restart = true; } } if (was_opened && need_restart) { mlx5e_close_locked(ifp); mlx5e_open_locked(ifp); } } if (mask & IFCAP_HWRXTSTMP) { ifp->if_capenable ^= IFCAP_HWRXTSTMP; if (ifp->if_capenable & IFCAP_HWRXTSTMP) { if (priv->clbr_done == 0) mlx5e_reset_calibration_callout(priv); } else { callout_drain(&priv->tstmp_clbr); priv->clbr_done = 0; } } out: PRIV_UNLOCK(priv); break; case SIOCGI2C: ifr = (struct ifreq *)data; /* * Copy from the user-space address ifr_data to the * kernel-space address i2c */ error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); if (error) break; if (i2c.len > sizeof(i2c.data)) { error = EINVAL; break; } PRIV_LOCK(priv); /* Get module_num which is required for the query_eeprom */ error = mlx5_query_module_num(priv->mdev, &module_num); if (error) { if_printf(ifp, "Query module num failed, eeprom " "reading is not supported\n"); error = EINVAL; goto err_i2c; } /* Check if module is present before doing an access */ module_status = mlx5_query_module_status(priv->mdev, module_num); if (module_status != MLX5_MODULE_STATUS_PLUGGED_ENABLED && module_status != MLX5_MODULE_STATUS_PLUGGED_DISABLED) { error = EINVAL; goto err_i2c; } /* * Currently 0XA0 and 0xA2 are the only addresses permitted. * The internal conversion is as follows: */ if (i2c.dev_addr == 0xA0) read_addr = MLX5E_I2C_ADDR_LOW; else if (i2c.dev_addr == 0xA2) read_addr = MLX5E_I2C_ADDR_HIGH; else { if_printf(ifp, "Query eeprom failed, " "Invalid Address: %X\n", i2c.dev_addr); error = EINVAL; goto err_i2c; } error = mlx5_query_eeprom(priv->mdev, read_addr, MLX5E_EEPROM_LOW_PAGE, (uint32_t)i2c.offset, (uint32_t)i2c.len, module_num, (uint32_t *)i2c.data, &size_read); if (error) { if_printf(ifp, "Query eeprom failed, eeprom " "reading is not supported\n"); error = EINVAL; goto err_i2c; } if (i2c.len > MLX5_EEPROM_MAX_BYTES) { error = mlx5_query_eeprom(priv->mdev, read_addr, MLX5E_EEPROM_LOW_PAGE, (uint32_t)(i2c.offset + size_read), (uint32_t)(i2c.len - size_read), module_num, (uint32_t *)(i2c.data + size_read), &size_read); } if (error) { if_printf(ifp, "Query eeprom failed, eeprom " "reading is not supported\n"); error = EINVAL; goto err_i2c; } error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c)); err_i2c: PRIV_UNLOCK(priv); break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) { /* * TODO: uncoment once FW really sets all these bits if * (!mdev->caps.eth.rss_ind_tbl_cap || !mdev->caps.eth.csum_cap || * !mdev->caps.eth.max_lso_cap || !mdev->caps.eth.vlan_cap || * !(mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_SCQE_BRK_MOD)) return * -ENOTSUPP; */ /* TODO: add more must-to-have features */ if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) return (-ENODEV); return (0); } static u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev) { uint32_t bf_buf_size = (1U << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2U; bf_buf_size -= sizeof(struct mlx5e_tx_wqe) - 2; /* verify against driver hardware limit */ if (bf_buf_size > MLX5E_MAX_TX_INLINE) bf_buf_size = MLX5E_MAX_TX_INLINE; return (bf_buf_size); } static int mlx5e_build_ifp_priv(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv, int num_comp_vectors) { int err; /* * TODO: Consider link speed for setting "log_sq_size", * "log_rq_size" and "cq_moderation_xxx": */ priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; priv->params.rx_cq_moderation_usec = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE : MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; priv->params.rx_cq_moderation_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? 1 : 0; priv->params.rx_cq_moderation_pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; priv->params.tx_cq_moderation_usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; priv->params.tx_cq_moderation_pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; priv->params.min_rx_wqes = MLX5E_PARAMS_DEFAULT_MIN_RX_WQES; priv->params.rx_hash_log_tbl_sz = (order_base_2(num_comp_vectors) > MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ? order_base_2(num_comp_vectors) : MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ; priv->params.num_tc = 1; priv->params.default_vlan_prio = 0; priv->counter_set_id = -1; priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev); err = mlx5_query_min_inline(mdev, &priv->params.tx_min_inline_mode); if (err) return (err); /* * hw lro is currently defaulted to off. when it won't anymore we * will consider the HW capability: "!!MLX5_CAP_ETH(mdev, lro_cap)" */ priv->params.hw_lro_en = false; priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; /* * CQE zipping is currently defaulted to off. when it won't * anymore we will consider the HW capability: * "!!MLX5_CAP_GEN(mdev, cqe_compression)" */ priv->params.cqe_zipping_en = false; priv->mdev = mdev; priv->params.num_channels = num_comp_vectors; priv->params.channels_rsss = 1; priv->order_base_2_num_channels = order_base_2(num_comp_vectors); priv->queue_mapping_channel_mask = roundup_pow_of_two(num_comp_vectors) - 1; priv->num_tc = priv->params.num_tc; priv->default_vlan_prio = priv->params.default_vlan_prio; INIT_WORK(&priv->update_stats_work, mlx5e_update_stats_work); INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work); INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work); return (0); } static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn, struct mlx5_core_mr *mkey) { struct ifnet *ifp = priv->ifp; struct mlx5_core_dev *mdev = priv->mdev; int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); void *mkc; u32 *in; int err; in = mlx5_vzalloc(inlen); if (in == NULL) { if_printf(ifp, "%s: failed to allocate inbox\n", __func__); return (-ENOMEM); } mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); MLX5_SET(mkc, mkc, access_mode, MLX5_ACCESS_MODE_PA); MLX5_SET(mkc, mkc, lw, 1); MLX5_SET(mkc, mkc, lr, 1); MLX5_SET(mkc, mkc, pd, pdn); MLX5_SET(mkc, mkc, length64, 1); MLX5_SET(mkc, mkc, qpn, 0xffffff); err = mlx5_core_create_mkey(mdev, mkey, in, inlen); if (err) if_printf(ifp, "%s: mlx5_core_create_mkey failed, %d\n", __func__, err); kvfree(in); return (err); } static const char *mlx5e_vport_stats_desc[] = { MLX5E_VPORT_STATS(MLX5E_STATS_DESC) }; static const char *mlx5e_pport_stats_desc[] = { MLX5E_PPORT_STATS(MLX5E_STATS_DESC) }; static void mlx5e_priv_mtx_init(struct mlx5e_priv *priv) { mtx_init(&priv->async_events_mtx, "mlx5async", MTX_NETWORK_LOCK, MTX_DEF); sx_init(&priv->state_lock, "mlx5state"); callout_init_mtx(&priv->watchdog, &priv->async_events_mtx, 0); MLX5_INIT_DOORBELL_LOCK(&priv->doorbell_lock); } static void mlx5e_priv_mtx_destroy(struct mlx5e_priv *priv) { mtx_destroy(&priv->async_events_mtx); sx_destroy(&priv->state_lock); } static int sysctl_firmware(SYSCTL_HANDLER_ARGS) { /* * %d.%d%.d the string format. * fw_rev_{maj,min,sub} return u16, 2^16 = 65536. * We need at most 5 chars to store that. * It also has: two "." and NULL at the end, which means we need 18 * (5*3 + 3) chars at most. */ char fw[18]; struct mlx5e_priv *priv = arg1; int error; snprintf(fw, sizeof(fw), "%d.%d.%d", fw_rev_maj(priv->mdev), fw_rev_min(priv->mdev), fw_rev_sub(priv->mdev)); error = sysctl_handle_string(oidp, fw, sizeof(fw), req); return (error); } static void mlx5e_disable_tx_dma(struct mlx5e_channel *ch) { int i; for (i = 0; i < ch->num_tc; i++) mlx5e_drain_sq(&ch->sq[i]); } static void mlx5e_reset_sq_doorbell_record(struct mlx5e_sq *sq) { sq->doorbell.d32[0] = cpu_to_be32(MLX5_OPCODE_NOP); sq->doorbell.d32[1] = cpu_to_be32(sq->sqn << 8); mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0); sq->doorbell.d64 = 0; } void mlx5e_resume_sq(struct mlx5e_sq *sq) { int err; /* check if already enabled */ if (READ_ONCE(sq->running) != 0) return; err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_ERR, MLX5_SQC_STATE_RST); if (err != 0) { if_printf(sq->ifp, "mlx5e_modify_sq() from ERR to RST failed: %d\n", err); } sq->cc = 0; sq->pc = 0; /* reset doorbell prior to moving from RST to RDY */ mlx5e_reset_sq_doorbell_record(sq); err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY); if (err != 0) { if_printf(sq->ifp, "mlx5e_modify_sq() from RST to RDY failed: %d\n", err); } sq->cev_next_state = MLX5E_CEV_STATE_INITIAL; WRITE_ONCE(sq->running, 1); } static void mlx5e_enable_tx_dma(struct mlx5e_channel *ch) { int i; for (i = 0; i < ch->num_tc; i++) mlx5e_resume_sq(&ch->sq[i]); } static void mlx5e_disable_rx_dma(struct mlx5e_channel *ch) { struct mlx5e_rq *rq = &ch->rq; int err; mtx_lock(&rq->mtx); rq->enabled = 0; callout_stop(&rq->watchdog); mtx_unlock(&rq->mtx); callout_drain(&rq->watchdog); err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR); if (err != 0) { if_printf(rq->ifp, "mlx5e_modify_rq() from RDY to RST failed: %d\n", err); } while (!mlx5_wq_ll_is_empty(&rq->wq)) { msleep(1); rq->cq.mcq.comp(&rq->cq.mcq); } /* * Transitioning into RST state will allow the FW to track less ERR state queues, * thus reducing the recv queue flushing time */ err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_ERR, MLX5_RQC_STATE_RST); if (err != 0) { if_printf(rq->ifp, "mlx5e_modify_rq() from ERR to RST failed: %d\n", err); } } static void mlx5e_enable_rx_dma(struct mlx5e_channel *ch) { struct mlx5e_rq *rq = &ch->rq; int err; rq->wq.wqe_ctr = 0; mlx5_wq_ll_update_db_record(&rq->wq); err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); if (err != 0) { if_printf(rq->ifp, "mlx5e_modify_rq() from RST to RDY failed: %d\n", err); } rq->enabled = 1; rq->cq.mcq.comp(&rq->cq.mcq); } void mlx5e_modify_tx_dma(struct mlx5e_priv *priv, uint8_t value) { int i; if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) return; for (i = 0; i < priv->params.num_channels; i++) { if (value) mlx5e_disable_tx_dma(&priv->channel[i]); else mlx5e_enable_tx_dma(&priv->channel[i]); } } void mlx5e_modify_rx_dma(struct mlx5e_priv *priv, uint8_t value) { int i; if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0) return; for (i = 0; i < priv->params.num_channels; i++) { if (value) mlx5e_disable_rx_dma(&priv->channel[i]); else mlx5e_enable_rx_dma(&priv->channel[i]); } } static void mlx5e_add_hw_stats(struct mlx5e_priv *priv) { SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw), OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD, priv, 0, sysctl_firmware, "A", "HCA firmware version"); SYSCTL_ADD_STRING(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw), OID_AUTO, "board_id", CTLFLAG_RD, priv->mdev->board_id, 0, "Board ID"); } static int mlx5e_sysctl_tx_priority_flow_control(SYSCTL_HANDLER_ARGS) { struct mlx5e_priv *priv = arg1; uint8_t temp[MLX5E_MAX_PRIORITY]; uint32_t tx_pfc; int err; int i; PRIV_LOCK(priv); tx_pfc = priv->params.tx_priority_flow_control; for (i = 0; i != MLX5E_MAX_PRIORITY; i++) temp[i] = (tx_pfc >> i) & 1; err = SYSCTL_OUT(req, temp, MLX5E_MAX_PRIORITY); if (err || !req->newptr) goto done; err = SYSCTL_IN(req, temp, MLX5E_MAX_PRIORITY); if (err) goto done; priv->params.tx_priority_flow_control = 0; /* range check input value */ for (i = 0; i != MLX5E_MAX_PRIORITY; i++) { if (temp[i] > 1) { err = ERANGE; goto done; } priv->params.tx_priority_flow_control |= (temp[i] << i); } /* check if update is required */ if (tx_pfc != priv->params.tx_priority_flow_control) err = -mlx5e_set_port_pfc(priv); done: if (err != 0) priv->params.tx_priority_flow_control= tx_pfc; PRIV_UNLOCK(priv); return (err); } static int mlx5e_sysctl_rx_priority_flow_control(SYSCTL_HANDLER_ARGS) { struct mlx5e_priv *priv = arg1; uint8_t temp[MLX5E_MAX_PRIORITY]; uint32_t rx_pfc; int err; int i; PRIV_LOCK(priv); rx_pfc = priv->params.rx_priority_flow_control; for (i = 0; i != MLX5E_MAX_PRIORITY; i++) temp[i] = (rx_pfc >> i) & 1; err = SYSCTL_OUT(req, temp, MLX5E_MAX_PRIORITY); if (err || !req->newptr) goto done; err = SYSCTL_IN(req, temp, MLX5E_MAX_PRIORITY); if (err) goto done; priv->params.rx_priority_flow_control = 0; /* range check input value */ for (i = 0; i != MLX5E_MAX_PRIORITY; i++) { if (temp[i] > 1) { err = ERANGE; goto done; } priv->params.rx_priority_flow_control |= (temp[i] << i); } /* check if update is required */ if (rx_pfc != priv->params.rx_priority_flow_control) err = -mlx5e_set_port_pfc(priv); done: if (err != 0) priv->params.rx_priority_flow_control= rx_pfc; PRIV_UNLOCK(priv); return (err); } static void mlx5e_setup_pauseframes(struct mlx5e_priv *priv) { #if (__FreeBSD_version < 1100000) char path[96]; #endif int error; /* enable pauseframes by default */ priv->params.tx_pauseframe_control = 1; priv->params.rx_pauseframe_control = 1; /* disable ports flow control, PFC, by default */ priv->params.tx_priority_flow_control = 0; priv->params.rx_priority_flow_control = 0; #if (__FreeBSD_version < 1100000) /* compute path for sysctl */ snprintf(path, sizeof(path), "dev.mce.%d.tx_pauseframe_control", device_get_unit(priv->mdev->pdev->dev.bsddev)); /* try to fetch tunable, if any */ TUNABLE_INT_FETCH(path, &priv->params.tx_pauseframe_control); /* compute path for sysctl */ snprintf(path, sizeof(path), "dev.mce.%d.rx_pauseframe_control", device_get_unit(priv->mdev->pdev->dev.bsddev)); /* try to fetch tunable, if any */ TUNABLE_INT_FETCH(path, &priv->params.rx_pauseframe_control); #endif /* register pauseframe SYSCTLs */ SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), OID_AUTO, "tx_pauseframe_control", CTLFLAG_RDTUN, &priv->params.tx_pauseframe_control, 0, "Set to enable TX pause frames. Clear to disable."); SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), OID_AUTO, "rx_pauseframe_control", CTLFLAG_RDTUN, &priv->params.rx_pauseframe_control, 0, "Set to enable RX pause frames. Clear to disable."); /* register priority flow control, PFC, SYSCTLs */ SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), OID_AUTO, "tx_priority_flow_control", CTLTYPE_U8 | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, priv, 0, &mlx5e_sysctl_tx_priority_flow_control, "CU", "Set to enable TX ports flow control frames for priorities 0..7. Clear to disable."); SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), OID_AUTO, "rx_priority_flow_control", CTLTYPE_U8 | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, priv, 0, &mlx5e_sysctl_rx_priority_flow_control, "CU", "Set to enable RX ports flow control frames for priorities 0..7. Clear to disable."); PRIV_LOCK(priv); /* range check */ priv->params.tx_pauseframe_control = priv->params.tx_pauseframe_control ? 1 : 0; priv->params.rx_pauseframe_control = priv->params.rx_pauseframe_control ? 1 : 0; /* update firmware */ error = mlx5e_set_port_pause_and_pfc(priv); if (error == -EINVAL) { if_printf(priv->ifp, "Global pauseframes must be disabled before enabling PFC.\n"); priv->params.rx_priority_flow_control = 0; priv->params.tx_priority_flow_control = 0; /* update firmware */ (void) mlx5e_set_port_pause_and_pfc(priv); } PRIV_UNLOCK(priv); } static int mlx5e_ul_snd_tag_alloc(struct ifnet *ifp, union if_snd_tag_alloc_params *params, struct m_snd_tag **ppmt) { struct mlx5e_priv *priv; struct mlx5e_channel *pch; priv = ifp->if_softc; if (unlikely(priv->gone || params->hdr.flowtype == M_HASHTYPE_NONE)) { return (EOPNOTSUPP); } else { /* keep this code synced with mlx5e_select_queue() */ u32 ch = priv->params.num_channels; #ifdef RSS u32 temp; if (rss_hash2bucket(params->hdr.flowid, params->hdr.flowtype, &temp) == 0) ch = temp % ch; else #endif ch = (params->hdr.flowid % 128) % ch; /* * NOTE: The channels array is only freed at detach * and it safe to return a pointer to the send tag * inside the channels structure as long as we * reference the priv. */ pch = priv->channel + ch; /* check if send queue is not running */ if (unlikely(pch->sq[0].running == 0)) return (ENXIO); mlx5e_ref_channel(priv); *ppmt = &pch->tag.m_snd_tag; return (0); } } static int mlx5e_ul_snd_tag_query(struct m_snd_tag *pmt, union if_snd_tag_query_params *params) { struct mlx5e_channel *pch = container_of(pmt, struct mlx5e_channel, tag.m_snd_tag); params->unlimited.max_rate = -1ULL; params->unlimited.queue_level = mlx5e_sq_queue_level(&pch->sq[0]); return (0); } static void mlx5e_ul_snd_tag_free(struct m_snd_tag *pmt) { struct mlx5e_channel *pch = container_of(pmt, struct mlx5e_channel, tag.m_snd_tag); mlx5e_unref_channel(pch->priv); } static int mlx5e_snd_tag_alloc(struct ifnet *ifp, union if_snd_tag_alloc_params *params, struct m_snd_tag **ppmt) { switch (params->hdr.type) { #ifdef RATELIMIT case IF_SND_TAG_TYPE_RATE_LIMIT: return (mlx5e_rl_snd_tag_alloc(ifp, params, ppmt)); #endif case IF_SND_TAG_TYPE_UNLIMITED: return (mlx5e_ul_snd_tag_alloc(ifp, params, ppmt)); default: return (EOPNOTSUPP); } } static int mlx5e_snd_tag_modify(struct m_snd_tag *pmt, union if_snd_tag_modify_params *params) { struct mlx5e_snd_tag *tag = container_of(pmt, struct mlx5e_snd_tag, m_snd_tag); switch (tag->type) { #ifdef RATELIMIT case IF_SND_TAG_TYPE_RATE_LIMIT: return (mlx5e_rl_snd_tag_modify(pmt, params)); #endif case IF_SND_TAG_TYPE_UNLIMITED: default: return (EOPNOTSUPP); } } static int mlx5e_snd_tag_query(struct m_snd_tag *pmt, union if_snd_tag_query_params *params) { struct mlx5e_snd_tag *tag = container_of(pmt, struct mlx5e_snd_tag, m_snd_tag); switch (tag->type) { #ifdef RATELIMIT case IF_SND_TAG_TYPE_RATE_LIMIT: return (mlx5e_rl_snd_tag_query(pmt, params)); #endif case IF_SND_TAG_TYPE_UNLIMITED: return (mlx5e_ul_snd_tag_query(pmt, params)); default: return (EOPNOTSUPP); } } static void mlx5e_snd_tag_free(struct m_snd_tag *pmt) { struct mlx5e_snd_tag *tag = container_of(pmt, struct mlx5e_snd_tag, m_snd_tag); switch (tag->type) { #ifdef RATELIMIT case IF_SND_TAG_TYPE_RATE_LIMIT: mlx5e_rl_snd_tag_free(pmt); break; #endif case IF_SND_TAG_TYPE_UNLIMITED: mlx5e_ul_snd_tag_free(pmt); break; default: break; } } static void * mlx5e_create_ifp(struct mlx5_core_dev *mdev) { struct ifnet *ifp; struct mlx5e_priv *priv; u8 dev_addr[ETHER_ADDR_LEN] __aligned(4); u8 connector_type; struct sysctl_oid_list *child; int ncv = mdev->priv.eq_table.num_comp_vectors; char unit[16]; struct pfil_head_args pa; int err; int i,j; u32 eth_proto_cap; u32 out[MLX5_ST_SZ_DW(ptys_reg)]; bool ext = 0; u32 speeds_num; struct media media_entry = {}; if (mlx5e_check_required_hca_cap(mdev)) { mlx5_core_dbg(mdev, "mlx5e_check_required_hca_cap() failed\n"); return (NULL); } /* * Try to allocate the priv and make room for worst-case * number of channel structures: */ priv = malloc(sizeof(*priv) + (sizeof(priv->channel[0]) * mdev->priv.eq_table.num_comp_vectors), M_MLX5EN, M_WAITOK | M_ZERO); mlx5e_priv_mtx_init(priv); ifp = priv->ifp = if_alloc_dev(IFT_ETHER, mdev->pdev->dev.bsddev); if (ifp == NULL) { mlx5_core_err(mdev, "if_alloc() failed\n"); goto err_free_priv; } ifp->if_softc = priv; if_initname(ifp, "mce", device_get_unit(mdev->pdev->dev.bsddev)); ifp->if_mtu = ETHERMTU; ifp->if_init = mlx5e_open; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = mlx5e_ioctl; ifp->if_transmit = mlx5e_xmit; ifp->if_qflush = if_qflush; #if (__FreeBSD_version >= 1100000) ifp->if_get_counter = mlx5e_get_counter; #endif ifp->if_snd.ifq_maxlen = ifqmaxlen; /* * Set driver features */ ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6; ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; ifp->if_capabilities |= IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWFILTER; ifp->if_capabilities |= IFCAP_LINKSTATE | IFCAP_JUMBO_MTU; ifp->if_capabilities |= IFCAP_LRO; ifp->if_capabilities |= IFCAP_TSO | IFCAP_VLAN_HWTSO; ifp->if_capabilities |= IFCAP_HWSTATS | IFCAP_HWRXTSTMP; ifp->if_capabilities |= IFCAP_TXRTLMT; ifp->if_snd_tag_alloc = mlx5e_snd_tag_alloc; ifp->if_snd_tag_free = mlx5e_snd_tag_free; ifp->if_snd_tag_modify = mlx5e_snd_tag_modify; ifp->if_snd_tag_query = mlx5e_snd_tag_query; /* set TSO limits so that we don't have to drop TX packets */ ifp->if_hw_tsomax = MLX5E_MAX_TX_PAYLOAD_SIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); ifp->if_hw_tsomaxsegcount = MLX5E_MAX_TX_MBUF_FRAGS - 1 /* hdr */; ifp->if_hw_tsomaxsegsize = MLX5E_MAX_TX_MBUF_SIZE; ifp->if_capenable = ifp->if_capabilities; ifp->if_hwassist = 0; if (ifp->if_capenable & IFCAP_TSO) ifp->if_hwassist |= CSUM_TSO; if (ifp->if_capenable & IFCAP_TXCSUM) ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP); if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) ifp->if_hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); /* ifnet sysctl tree */ sysctl_ctx_init(&priv->sysctl_ctx); priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_dev), OID_AUTO, ifp->if_dname, CTLFLAG_RD, 0, "MLX5 ethernet - interface name"); if (priv->sysctl_ifnet == NULL) { mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n"); goto err_free_sysctl; } snprintf(unit, sizeof(unit), "%d", ifp->if_dunit); priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), OID_AUTO, unit, CTLFLAG_RD, 0, "MLX5 ethernet - interface unit"); if (priv->sysctl_ifnet == NULL) { mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n"); goto err_free_sysctl; } /* HW sysctl tree */ child = SYSCTL_CHILDREN(device_get_sysctl_tree(mdev->pdev->dev.bsddev)); priv->sysctl_hw = SYSCTL_ADD_NODE(&priv->sysctl_ctx, child, OID_AUTO, "hw", CTLFLAG_RD, 0, "MLX5 ethernet dev hw"); if (priv->sysctl_hw == NULL) { mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n"); goto err_free_sysctl; } err = mlx5e_build_ifp_priv(mdev, priv, ncv); if (err) { mlx5_core_err(mdev, "mlx5e_build_ifp_priv() failed (%d)\n", err); goto err_free_sysctl; } /* reuse mlx5core's watchdog workqueue */ priv->wq = mdev->priv.health.wq_watchdog; err = mlx5_alloc_map_uar(mdev, &priv->cq_uar); if (err) { if_printf(ifp, "%s: mlx5_alloc_map_uar failed, %d\n", __func__, err); goto err_free_wq; } err = mlx5_core_alloc_pd(mdev, &priv->pdn); if (err) { if_printf(ifp, "%s: mlx5_core_alloc_pd failed, %d\n", __func__, err); goto err_unmap_free_uar; } err = mlx5_alloc_transport_domain(mdev, &priv->tdn); if (err) { if_printf(ifp, "%s: mlx5_alloc_transport_domain failed, %d\n", __func__, err); goto err_dealloc_pd; } err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr); if (err) { if_printf(ifp, "%s: mlx5e_create_mkey failed, %d\n", __func__, err); goto err_dealloc_transport_domain; } mlx5_query_nic_vport_mac_address(priv->mdev, 0, dev_addr); /* check if we should generate a random MAC address */ if (MLX5_CAP_GEN(priv->mdev, vport_group_manager) == 0 && is_zero_ether_addr(dev_addr)) { random_ether_addr(dev_addr); if_printf(ifp, "Assigned random MAC address\n"); } #ifdef RATELIMIT err = mlx5e_rl_init(priv); if (err) { if_printf(ifp, "%s: mlx5e_rl_init failed, %d\n", __func__, err); goto err_create_mkey; } #endif /* set default MTU */ mlx5e_set_dev_port_mtu(ifp, ifp->if_mtu); /* Set default media status */ priv->media_status_last = IFM_AVALID; priv->media_active_last = IFM_ETHER | IFM_AUTO | IFM_ETH_RXPAUSE | IFM_FDX; /* setup default pauseframes configuration */ mlx5e_setup_pauseframes(priv); /* Setup supported medias */ //TODO: If we failed to query ptys is it ok to proceed?? if (!mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1)) { ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); eth_proto_cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_capability); if (MLX5_CAP_PCAM_FEATURE(mdev, ptys_connector_type)) connector_type = MLX5_GET(ptys_reg, out, connector_type); } else { eth_proto_cap = 0; if_printf(ifp, "%s: Query port media capability failed," " %d\n", __func__, err); } ifmedia_init(&priv->media, IFM_IMASK | IFM_ETH_FMASK, mlx5e_media_change, mlx5e_media_status); speeds_num = ext ? MLX5E_EXT_LINK_SPEEDS_NUMBER : MLX5E_LINK_SPEEDS_NUMBER; for (i = 0; i != speeds_num; i++) { for (j = 0; j < MLX5E_LINK_MODES_NUMBER ; ++j) { media_entry = ext ? mlx5e_ext_mode_table[i][j] : mlx5e_mode_table[i][j]; if (media_entry.baudrate == 0) continue; if (MLX5E_PROT_MASK(i) & eth_proto_cap) { ifmedia_add(&priv->media, media_entry.subtype | IFM_ETHER, 0, NULL); ifmedia_add(&priv->media, media_entry.subtype | IFM_ETHER | IFM_FDX | IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL); } } } ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO | IFM_FDX | IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL); /* Set autoselect by default */ ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO | IFM_FDX | IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE); ether_ifattach(ifp, dev_addr); /* Register for VLAN events */ priv->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, mlx5e_vlan_rx_add_vid, priv, EVENTHANDLER_PRI_FIRST); priv->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, mlx5e_vlan_rx_kill_vid, priv, EVENTHANDLER_PRI_FIRST); /* Link is down by default */ if_link_state_change(ifp, LINK_STATE_DOWN); mlx5e_enable_async_events(priv); mlx5e_add_hw_stats(priv); mlx5e_create_stats(&priv->stats.vport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), "vstats", mlx5e_vport_stats_desc, MLX5E_VPORT_STATS_NUM, priv->stats.vport.arg); mlx5e_create_stats(&priv->stats.pport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), "pstats", mlx5e_pport_stats_desc, MLX5E_PPORT_STATS_NUM, priv->stats.pport.arg); mlx5e_create_ethtool(priv); mtx_lock(&priv->async_events_mtx); mlx5e_update_stats(priv); mtx_unlock(&priv->async_events_mtx); SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet), OID_AUTO, "rx_clbr_done", CTLFLAG_RD, &priv->clbr_done, 0, "RX timestamps calibration state"); callout_init(&priv->tstmp_clbr, CALLOUT_DIRECT); mlx5e_reset_calibration_callout(priv); pa.pa_version = PFIL_VERSION; pa.pa_flags = PFIL_IN; pa.pa_type = PFIL_TYPE_ETHERNET; pa.pa_headname = ifp->if_xname; priv->pfil = pfil_head_register(&pa); return (priv); #ifdef RATELIMIT err_create_mkey: mlx5_core_destroy_mkey(priv->mdev, &priv->mr); #endif err_dealloc_transport_domain: mlx5_dealloc_transport_domain(mdev, priv->tdn); err_dealloc_pd: mlx5_core_dealloc_pd(mdev, priv->pdn); err_unmap_free_uar: mlx5_unmap_free_uar(mdev, &priv->cq_uar); err_free_wq: flush_workqueue(priv->wq); err_free_sysctl: sysctl_ctx_free(&priv->sysctl_ctx); if (priv->sysctl_debug) sysctl_ctx_free(&priv->stats.port_stats_debug.ctx); if_free(ifp); err_free_priv: mlx5e_priv_mtx_destroy(priv); free(priv, M_MLX5EN); return (NULL); } static void mlx5e_destroy_ifp(struct mlx5_core_dev *mdev, void *vpriv) { struct mlx5e_priv *priv = vpriv; struct ifnet *ifp = priv->ifp; /* don't allow more IOCTLs */ priv->gone = 1; /* XXX wait a bit to allow IOCTL handlers to complete */ pause("W", hz); #ifdef RATELIMIT /* * The kernel can have reference(s) via the m_snd_tag's into * the ratelimit channels, and these must go away before * detaching: */ while (READ_ONCE(priv->rl.stats.tx_active_connections) != 0) { if_printf(priv->ifp, "Waiting for all ratelimit connections " "to terminate\n"); pause("W", hz); } #endif /* stop watchdog timer */ callout_drain(&priv->watchdog); callout_drain(&priv->tstmp_clbr); if (priv->vlan_attach != NULL) EVENTHANDLER_DEREGISTER(vlan_config, priv->vlan_attach); if (priv->vlan_detach != NULL) EVENTHANDLER_DEREGISTER(vlan_unconfig, priv->vlan_detach); /* make sure device gets closed */ PRIV_LOCK(priv); mlx5e_close_locked(ifp); PRIV_UNLOCK(priv); /* wait for all unlimited send tags to go away */ while (priv->channel_refs != 0) { if_printf(priv->ifp, "Waiting for all unlimited connections " "to terminate\n"); pause("W", hz); } /* deregister pfil */ if (priv->pfil != NULL) { pfil_head_unregister(priv->pfil); priv->pfil = NULL; } /* unregister device */ ifmedia_removeall(&priv->media); ether_ifdetach(ifp); if_free(ifp); #ifdef RATELIMIT mlx5e_rl_cleanup(priv); #endif /* destroy all remaining sysctl nodes */ sysctl_ctx_free(&priv->stats.vport.ctx); sysctl_ctx_free(&priv->stats.pport.ctx); if (priv->sysctl_debug) sysctl_ctx_free(&priv->stats.port_stats_debug.ctx); sysctl_ctx_free(&priv->sysctl_ctx); mlx5_core_destroy_mkey(priv->mdev, &priv->mr); mlx5_dealloc_transport_domain(priv->mdev, priv->tdn); mlx5_core_dealloc_pd(priv->mdev, priv->pdn); mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar); mlx5e_disable_async_events(priv); flush_workqueue(priv->wq); mlx5e_priv_mtx_destroy(priv); free(priv, M_MLX5EN); } static void * mlx5e_get_ifp(void *vpriv) { struct mlx5e_priv *priv = vpriv; return (priv->ifp); } static struct mlx5_interface mlx5e_interface = { .add = mlx5e_create_ifp, .remove = mlx5e_destroy_ifp, .event = mlx5e_async_event, .protocol = MLX5_INTERFACE_PROTOCOL_ETH, .get_dev = mlx5e_get_ifp, }; void mlx5e_init(void) { mlx5_register_interface(&mlx5e_interface); } void mlx5e_cleanup(void) { mlx5_unregister_interface(&mlx5e_interface); } static void mlx5e_show_version(void __unused *arg) { printf("%s", mlx5e_version); } SYSINIT(mlx5e_show_version, SI_SUB_DRIVERS, SI_ORDER_ANY, mlx5e_show_version, NULL); module_init_order(mlx5e_init, SI_ORDER_THIRD); module_exit_order(mlx5e_cleanup, SI_ORDER_THIRD); #if (__FreeBSD_version >= 1100000) MODULE_DEPEND(mlx5en, linuxkpi, 1, 1, 1); #endif MODULE_DEPEND(mlx5en, mlx5, 1, 1, 1); MODULE_VERSION(mlx5en, 1); Index: user/ngie/bug-237403/sys/mips/atheros/ar531x/ar5315_wdog.c =================================================================== --- user/ngie/bug-237403/sys/mips/atheros/ar531x/ar5315_wdog.c (revision 348028) +++ user/ngie/bug-237403/sys/mips/atheros/ar531x/ar5315_wdog.c (revision 348029) @@ -1,150 +1,151 @@ /*- * Copyright (c) 2016, Hiroki Mori * Copyright (c) 2009, Oleksandr Tymoshenko * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Watchdog driver for AR5315 */ #include __FBSDID("$FreeBSD$"); #include #include #include #include +#include #include #include #include #include #include struct ar5315_wdog_softc { device_t dev; int armed; int reboot_from_watchdog; int debug; }; static void ar5315_wdog_watchdog_fn(void *private, u_int cmd, int *error) { struct ar5315_wdog_softc *sc = private; uint64_t timer_val; cmd &= WD_INTERVAL; if (sc->debug) device_printf(sc->dev, "ar5315_wdog_watchdog_fn: cmd: %x\n", cmd); if (cmd > 0) { timer_val = (uint64_t)(1ULL << cmd) * ar531x_ahb_freq() / 1000000000; if (sc->debug) device_printf(sc->dev, "ar5315_wdog_watchdog_fn: programming timer: %jx\n", (uintmax_t) timer_val); /* * Load timer with large enough value to prevent spurious * reset */ ATH_WRITE_REG(ar531x_wdog_timer(), ar531x_ahb_freq() * 10); ATH_WRITE_REG(ar531x_wdog_ctl(), AR5315_WDOG_CTL_RESET); ATH_WRITE_REG(ar531x_wdog_timer(), (timer_val & 0xffffffff)); sc->armed = 1; *error = 0; } else { if (sc->debug) device_printf(sc->dev, "ar5315_wdog_watchdog_fn: disarming\n"); if (sc->armed) { ATH_WRITE_REG(ar531x_wdog_ctl(), AR5315_WDOG_CTL_IGNORE); sc->armed = 0; } } } static int ar5315_wdog_probe(device_t dev) { device_set_desc(dev, "Atheros AR531x watchdog timer"); return (0); } static void ar5315_wdog_sysctl(device_t dev) { struct ar5315_wdog_softc *sc = device_get_softc(dev); struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev); struct sysctl_oid *tree = device_get_sysctl_tree(sc->dev); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug", CTLFLAG_RW, &sc->debug, 0, "enable watchdog debugging"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "armed", CTLFLAG_RD, &sc->armed, 0, "whether the watchdog is armed"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "reboot_from_watchdog", CTLFLAG_RD, &sc->reboot_from_watchdog, 0, "whether the system rebooted from the watchdog"); } static int ar5315_wdog_attach(device_t dev) { struct ar5315_wdog_softc *sc = device_get_softc(dev); /* Initialise */ sc->reboot_from_watchdog = 0; sc->armed = 0; sc->debug = 0; ATH_WRITE_REG(ar531x_wdog_ctl(), AR5315_WDOG_CTL_IGNORE); sc->dev = dev; EVENTHANDLER_REGISTER(watchdog_list, ar5315_wdog_watchdog_fn, sc, 0); ar5315_wdog_sysctl(dev); return (0); } static device_method_t ar5315_wdog_methods[] = { DEVMETHOD(device_probe, ar5315_wdog_probe), DEVMETHOD(device_attach, ar5315_wdog_attach), DEVMETHOD_END }; static driver_t ar5315_wdog_driver = { "ar5315_wdog", ar5315_wdog_methods, sizeof(struct ar5315_wdog_softc), }; static devclass_t ar5315_wdog_devclass; DRIVER_MODULE(ar5315_wdog, apb, ar5315_wdog_driver, ar5315_wdog_devclass, 0, 0); Index: user/ngie/bug-237403/sys/mips/cavium/octeon_wdog.c =================================================================== --- user/ngie/bug-237403/sys/mips/cavium/octeon_wdog.c (revision 348028) +++ user/ngie/bug-237403/sys/mips/cavium/octeon_wdog.c (revision 348029) @@ -1,277 +1,278 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009, Oleksandr Tymoshenko * Copyright (c) 2010-2011, Juli Mallett * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Watchdog driver for Cavium Octeon */ #include __FBSDID("$FreeBSD$"); #include #include #include #include +#include #include #include #include #include #include #include #include #define DEFAULT_TIMER_VAL 65535 struct octeon_wdog_softc { device_t sc_dev; struct octeon_wdog_core_softc { int csc_core; struct resource *csc_intr; void *csc_intr_cookie; } sc_cores[MAXCPU]; int sc_armed; int sc_debug; }; extern void octeon_wdog_nmi_handler(void); void octeon_wdog_nmi(void); static void octeon_watchdog_arm_core(int); static void octeon_watchdog_disarm_core(int); static int octeon_wdog_attach(device_t); static void octeon_wdog_identify(driver_t *, device_t); static int octeon_wdog_intr(void *); static int octeon_wdog_probe(device_t); static void octeon_wdog_setup(struct octeon_wdog_softc *, int); static void octeon_wdog_sysctl(device_t); static void octeon_wdog_watchdog_fn(void *, u_int, int *); void octeon_wdog_nmi(void) { int core; core = cvmx_get_core_num(); printf("cpu%u: NMI detected\n", core); printf("cpu%u: Exception PC: %p\n", core, (void *)mips_rd_excpc()); printf("cpu%u: status %#x cause %#x\n", core, mips_rd_status(), mips_rd_cause()); /* * This is the end * Beautiful friend * * Just wait for Soft Reset to come and take us */ for (;;) continue; } static void octeon_watchdog_arm_core(int core) { cvmx_ciu_wdogx_t ciu_wdog; /* Poke it! */ cvmx_write_csr(CVMX_CIU_PP_POKEX(core), 1); /* * XXX * Perhaps if KDB is enabled, we should use mode=2 and drop into the * debugger on NMI? * * XXX * Timer should be calculated based on CPU frquency */ ciu_wdog.u64 = 0; ciu_wdog.s.len = DEFAULT_TIMER_VAL; ciu_wdog.s.mode = 3; cvmx_write_csr(CVMX_CIU_WDOGX(core), ciu_wdog.u64); } static void octeon_watchdog_disarm_core(int core) { cvmx_write_csr(CVMX_CIU_WDOGX(core), 0); } static void octeon_wdog_watchdog_fn(void *private, u_int cmd, int *error) { struct octeon_wdog_softc *sc = private; int core; cmd &= WD_INTERVAL; if (sc->sc_debug) device_printf(sc->sc_dev, "%s: cmd: %x\n", __func__, cmd); if (cmd > 0) { CPU_FOREACH(core) octeon_watchdog_arm_core(core); sc->sc_armed = 1; *error = 0; } else { if (sc->sc_armed) { CPU_FOREACH(core) octeon_watchdog_disarm_core(core); sc->sc_armed = 0; } } } static void octeon_wdog_sysctl(device_t dev) { struct octeon_wdog_softc *sc = device_get_softc(dev); struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug", CTLFLAG_RW, &sc->sc_debug, 0, "enable watchdog debugging"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "armed", CTLFLAG_RD, &sc->sc_armed, 0, "whether the watchdog is armed"); } static void octeon_wdog_setup(struct octeon_wdog_softc *sc, int core) { struct octeon_wdog_core_softc *csc; int rid, error; csc = &sc->sc_cores[core]; csc->csc_core = core; /* Interrupt part */ rid = 0; csc->csc_intr = bus_alloc_resource(sc->sc_dev, SYS_RES_IRQ, &rid, OCTEON_IRQ_WDOG0 + core, OCTEON_IRQ_WDOG0 + core, 1, RF_ACTIVE); if (csc->csc_intr == NULL) panic("%s: bus_alloc_resource for core %u failed", __func__, core); error = bus_setup_intr(sc->sc_dev, csc->csc_intr, INTR_TYPE_MISC, octeon_wdog_intr, NULL, csc, &csc->csc_intr_cookie); if (error != 0) panic("%s: bus_setup_intr for core %u: %d", __func__, core, error); bus_bind_intr(sc->sc_dev, csc->csc_intr, core); bus_describe_intr(sc->sc_dev, csc->csc_intr, csc->csc_intr_cookie, "cpu%u", core); if (sc->sc_armed) { /* Armed by default. */ octeon_watchdog_arm_core(core); } else { /* Disarmed by default. */ octeon_watchdog_disarm_core(core); } } static int octeon_wdog_intr(void *arg) { struct octeon_wdog_core_softc *csc = arg; KASSERT(csc->csc_core == cvmx_get_core_num(), ("got watchdog interrupt for core %u on core %u.", csc->csc_core, cvmx_get_core_num())); (void)csc; /* Poke it! */ cvmx_write_csr(CVMX_CIU_PP_POKEX(cvmx_get_core_num()), 1); return (FILTER_HANDLED); } static int octeon_wdog_probe(device_t dev) { device_set_desc(dev, "Cavium Octeon watchdog timer"); return (0); } static int octeon_wdog_attach(device_t dev) { struct octeon_wdog_softc *sc = device_get_softc(dev); uint64_t *nmi_handler = (uint64_t*)octeon_wdog_nmi_handler; int core, i; /* Initialise */ sc->sc_armed = 0; /* XXX Ought to be a tunable / config option. */ sc->sc_debug = 0; sc->sc_dev = dev; EVENTHANDLER_REGISTER(watchdog_list, octeon_wdog_watchdog_fn, sc, 0); octeon_wdog_sysctl(dev); for (i = 0; i < 16; i++) { cvmx_write_csr(CVMX_MIO_BOOT_LOC_ADR, i * 8); cvmx_write_csr(CVMX_MIO_BOOT_LOC_DAT, nmi_handler[i]); } cvmx_write_csr(CVMX_MIO_BOOT_LOC_CFGX(0), 0x81fc0000); CPU_FOREACH(core) octeon_wdog_setup(sc, core); return (0); } static void octeon_wdog_identify(driver_t *drv, device_t parent) { BUS_ADD_CHILD(parent, 0, "owdog", 0); } static device_method_t octeon_wdog_methods[] = { DEVMETHOD(device_identify, octeon_wdog_identify), DEVMETHOD(device_probe, octeon_wdog_probe), DEVMETHOD(device_attach, octeon_wdog_attach), {0, 0}, }; static driver_t octeon_wdog_driver = { "owdog", octeon_wdog_methods, sizeof(struct octeon_wdog_softc), }; static devclass_t octeon_wdog_devclass; DRIVER_MODULE(owdog, ciu, octeon_wdog_driver, octeon_wdog_devclass, 0, 0); Index: user/ngie/bug-237403/sys/mips/ingenic/jz4780_lcd.c =================================================================== --- user/ngie/bug-237403/sys/mips/ingenic/jz4780_lcd.c (revision 348028) +++ user/ngie/bug-237403/sys/mips/ingenic/jz4780_lcd.c (revision 348029) @@ -1,575 +1,576 @@ /*- * Copyright (c) 2016 Jared McNeill * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Ingenic JZ4780 LCD Controller */ #include __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "fb_if.h" #include "hdmi_if.h" #define FB_DEFAULT_W 800 #define FB_DEFAULT_H 600 #define FB_DEFAULT_REF 60 #define FB_BPP 32 #define FB_ALIGN (16 * 4) #define FB_MAX_BW (1920 * 1080 * 60) #define FB_MAX_W 2048 #define FB_MAX_H 2048 #define FB_DIVIDE(x, y) (((x) + ((y) / 2)) / (y)) #define PCFG_MAGIC 0xc7ff2100 #define DOT_CLOCK_TO_HZ(c) ((c) * 1000) #ifndef VM_MEMATTR_WRITE_COMBINING #define VM_MEMATTR_WRITE_COMBINING VM_MEMATTR_UNCACHEABLE #endif struct jzlcd_softc { device_t dev; device_t fbdev; struct resource *res[1]; /* Clocks */ clk_t clk; clk_t clk_pix; /* Framebuffer */ struct fb_info info; size_t fbsize; bus_addr_t paddr; vm_offset_t vaddr; /* HDMI */ eventhandler_tag hdmi_evh; /* Frame descriptor DMA */ bus_dma_tag_t fdesc_tag; bus_dmamap_t fdesc_map; bus_addr_t fdesc_paddr; struct lcd_frame_descriptor *fdesc; }; static struct resource_spec jzlcd_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { -1, 0 } }; #define LCD_READ(sc, reg) bus_read_4((sc)->res[0], (reg)) #define LCD_WRITE(sc, reg, val) bus_write_4((sc)->res[0], (reg), (val)) static int jzlcd_allocfb(struct jzlcd_softc *sc) { sc->vaddr = kmem_alloc_contig(sc->fbsize, M_NOWAIT | M_ZERO, 0, ~0, FB_ALIGN, 0, VM_MEMATTR_WRITE_COMBINING); if (sc->vaddr == 0) { device_printf(sc->dev, "failed to allocate FB memory\n"); return (ENOMEM); } sc->paddr = pmap_kextract(sc->vaddr); return (0); } static void jzlcd_freefb(struct jzlcd_softc *sc) { kmem_free(sc->vaddr, sc->fbsize); } static void jzlcd_start(struct jzlcd_softc *sc) { uint32_t ctrl; /* Clear status registers */ LCD_WRITE(sc, LCDSTATE, 0); LCD_WRITE(sc, LCDOSDS, 0); /* Enable the controller */ ctrl = LCD_READ(sc, LCDCTRL); ctrl |= LCDCTRL_ENA; ctrl &= ~LCDCTRL_DIS; LCD_WRITE(sc, LCDCTRL, ctrl); } static void jzlcd_stop(struct jzlcd_softc *sc) { uint32_t ctrl; ctrl = LCD_READ(sc, LCDCTRL); if ((ctrl & LCDCTRL_ENA) != 0) { /* Disable the controller and wait for it to stop */ ctrl |= LCDCTRL_DIS; LCD_WRITE(sc, LCDCTRL, ctrl); while ((LCD_READ(sc, LCDSTATE) & LCDSTATE_LDD) == 0) DELAY(100); } /* Clear all status except for disable */ LCD_WRITE(sc, LCDSTATE, LCD_READ(sc, LCDSTATE) & ~LCDSTATE_LDD); } static void jzlcd_setup_descriptor(struct jzlcd_softc *sc, const struct videomode *mode, u_int desno) { struct lcd_frame_descriptor *fdesc; int line_sz; /* Frame size is specified in # words */ line_sz = (mode->hdisplay * FB_BPP) >> 3; line_sz = ((line_sz + 3) & ~3) / 4; fdesc = sc->fdesc + desno; if (desno == 0) fdesc->next = sc->fdesc_paddr + sizeof(struct lcd_frame_descriptor); else fdesc->next = sc->fdesc_paddr; fdesc->physaddr = sc->paddr; fdesc->id = desno; fdesc->cmd = LCDCMD_FRM_EN | (line_sz * mode->vdisplay); fdesc->offs = 0; fdesc->pw = 0; fdesc->cnum_pos = LCDPOS_BPP01_18_24 | LCDPOS_PREMULTI01 | (desno == 0 ? LCDPOS_COEF_BLE01_1 : LCDPOS_COEF_SLE01); fdesc->dessize = LCDDESSIZE_ALPHA | ((mode->vdisplay - 1) << LCDDESSIZE_HEIGHT_SHIFT) | ((mode->hdisplay - 1) << LCDDESSIZE_WIDTH_SHIFT); } static int jzlcd_set_videomode(struct jzlcd_softc *sc, const struct videomode *mode) { u_int hbp, hfp, hsw, vbp, vfp, vsw; u_int hds, hde, ht, vds, vde, vt; uint32_t ctrl; int error; hbp = mode->htotal - mode->hsync_end; hfp = mode->hsync_start - mode->hdisplay; hsw = mode->hsync_end - mode->hsync_start; vbp = mode->vtotal - mode->vsync_end; vfp = mode->vsync_start - mode->vdisplay; vsw = mode->vsync_end - mode->vsync_start; hds = hsw + hbp; hde = hds + mode->hdisplay; ht = hde + hfp; vds = vsw + vbp; vde = vds + mode->vdisplay; vt = vde + vfp; /* Setup timings */ LCD_WRITE(sc, LCDVAT, (ht << LCDVAT_HT_SHIFT) | (vt << LCDVAT_VT_SHIFT)); LCD_WRITE(sc, LCDDAH, (hds << LCDDAH_HDS_SHIFT) | (hde << LCDDAH_HDE_SHIFT)); LCD_WRITE(sc, LCDDAV, (vds << LCDDAV_VDS_SHIFT) | (vde << LCDDAV_VDE_SHIFT)); LCD_WRITE(sc, LCDHSYNC, hsw); LCD_WRITE(sc, LCDVSYNC, vsw); /* Set configuration */ LCD_WRITE(sc, LCDCFG, LCDCFG_NEWDES | LCDCFG_RECOVER | LCDCFG_24 | LCDCFG_PSM | LCDCFG_CLSM | LCDCFG_SPLM | LCDCFG_REVM | LCDCFG_PCP); ctrl = LCD_READ(sc, LCDCTRL); ctrl &= ~LCDCTRL_BST; ctrl |= LCDCTRL_BST_64 | LCDCTRL_OFUM; LCD_WRITE(sc, LCDCTRL, ctrl); LCD_WRITE(sc, LCDPCFG, PCFG_MAGIC); LCD_WRITE(sc, LCDRGBC, LCDRGBC_RGBFMT); /* Update registers */ LCD_WRITE(sc, LCDSTATE, 0); /* Setup frame descriptors */ jzlcd_setup_descriptor(sc, mode, 0); jzlcd_setup_descriptor(sc, mode, 1); bus_dmamap_sync(sc->fdesc_tag, sc->fdesc_map, BUS_DMASYNC_PREWRITE); /* Setup DMA channels */ LCD_WRITE(sc, LCDDA0, sc->fdesc_paddr + sizeof(struct lcd_frame_descriptor)); LCD_WRITE(sc, LCDDA1, sc->fdesc_paddr); /* Set display clock */ error = clk_set_freq(sc->clk_pix, DOT_CLOCK_TO_HZ(mode->dot_clock), 0); if (error != 0) { device_printf(sc->dev, "failed to set pixel clock to %u Hz\n", DOT_CLOCK_TO_HZ(mode->dot_clock)); return (error); } return (0); } static int jzlcd_configure(struct jzlcd_softc *sc, const struct videomode *mode) { size_t fbsize; int error; fbsize = round_page(mode->hdisplay * mode->vdisplay * (FB_BPP / NBBY)); /* Detach the old FB device */ if (sc->fbdev != NULL) { device_delete_child(sc->dev, sc->fbdev); sc->fbdev = NULL; } /* If the FB size has changed, free the old FB memory */ if (sc->fbsize > 0 && sc->fbsize != fbsize) { jzlcd_freefb(sc); sc->vaddr = 0; } /* Allocate the FB if necessary */ sc->fbsize = fbsize; if (sc->vaddr == 0) { error = jzlcd_allocfb(sc); if (error != 0) { device_printf(sc->dev, "failed to allocate FB memory\n"); return (ENXIO); } } /* Setup video mode */ error = jzlcd_set_videomode(sc, mode); if (error != 0) return (error); /* Attach framebuffer device */ sc->info.fb_name = device_get_nameunit(sc->dev); sc->info.fb_vbase = (intptr_t)sc->vaddr; sc->info.fb_pbase = sc->paddr; sc->info.fb_size = sc->fbsize; sc->info.fb_bpp = sc->info.fb_depth = FB_BPP; sc->info.fb_stride = mode->hdisplay * (FB_BPP / NBBY); sc->info.fb_width = mode->hdisplay; sc->info.fb_height = mode->vdisplay; #ifdef VM_MEMATTR_WRITE_COMBINING sc->info.fb_flags = FB_FLAG_MEMATTR; sc->info.fb_memattr = VM_MEMATTR_WRITE_COMBINING; #endif sc->fbdev = device_add_child(sc->dev, "fbd", device_get_unit(sc->dev)); if (sc->fbdev == NULL) { device_printf(sc->dev, "failed to add fbd child\n"); return (ENOENT); } error = device_probe_and_attach(sc->fbdev); if (error != 0) { device_printf(sc->dev, "failed to attach fbd device\n"); return (error); } return (0); } static int jzlcd_get_bandwidth(const struct videomode *mode) { int refresh; refresh = FB_DIVIDE(FB_DIVIDE(DOT_CLOCK_TO_HZ(mode->dot_clock), mode->htotal), mode->vtotal); return mode->hdisplay * mode->vdisplay * refresh; } static int jzlcd_mode_supported(const struct videomode *mode) { /* Width and height must be less than 2048 */ if (mode->hdisplay > FB_MAX_W || mode->vdisplay > FB_MAX_H) return (0); /* Bandwidth check */ if (jzlcd_get_bandwidth(mode) > FB_MAX_BW) return (0); /* Interlace modes not yet supported by the driver */ if ((mode->flags & VID_INTERLACE) != 0) return (0); return (1); } static const struct videomode * jzlcd_find_mode(struct edid_info *ei) { const struct videomode *best; int n, bw, best_bw; /* If the preferred mode is OK, just use it */ if (jzlcd_mode_supported(ei->edid_preferred_mode) != 0) return ei->edid_preferred_mode; /* Pick the mode with the highest bandwidth requirements */ best = NULL; best_bw = 0; for (n = 0; n < ei->edid_nmodes; n++) { if (jzlcd_mode_supported(&ei->edid_modes[n]) == 0) continue; bw = jzlcd_get_bandwidth(&ei->edid_modes[n]); if (bw > FB_MAX_BW) continue; if (best == NULL || bw > best_bw) { best = &ei->edid_modes[n]; best_bw = bw; } } return best; } static void jzlcd_hdmi_event(void *arg, device_t hdmi_dev) { const struct videomode *mode; struct videomode hdmi_mode; struct jzlcd_softc *sc; struct edid_info ei; uint8_t *edid; uint32_t edid_len; int error; sc = arg; edid = NULL; edid_len = 0; mode = NULL; error = HDMI_GET_EDID(hdmi_dev, &edid, &edid_len); if (error != 0) { device_printf(sc->dev, "failed to get EDID: %d\n", error); } else { error = edid_parse(edid, &ei); if (error != 0) { device_printf(sc->dev, "failed to parse EDID: %d\n", error); } else { if (bootverbose) edid_print(&ei); mode = jzlcd_find_mode(&ei); } } /* If a suitable mode could not be found, try the default */ if (mode == NULL) mode = pick_mode_by_ref(FB_DEFAULT_W, FB_DEFAULT_H, FB_DEFAULT_REF); if (mode == NULL) { device_printf(sc->dev, "failed to find usable video mode\n"); return; } if (bootverbose) device_printf(sc->dev, "using %dx%d\n", mode->hdisplay, mode->vdisplay); /* Stop the controller */ jzlcd_stop(sc); /* Configure LCD controller */ error = jzlcd_configure(sc, mode); if (error != 0) { device_printf(sc->dev, "failed to configure FB: %d\n", error); return; } /* Enable HDMI TX */ hdmi_mode = *mode; HDMI_SET_VIDEOMODE(hdmi_dev, &hdmi_mode); /* Start the controller! */ jzlcd_start(sc); } static void jzlcd_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { if (error != 0) return; *(bus_addr_t *)arg = segs[0].ds_addr; } static int jzlcd_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "ingenic,jz4780-lcd")) return (ENXIO); device_set_desc(dev, "Ingenic JZ4780 LCD Controller"); return (BUS_PROBE_DEFAULT); } static int jzlcd_attach(device_t dev) { struct jzlcd_softc *sc; int error; sc = device_get_softc(dev); sc->dev = dev; if (bus_alloc_resources(dev, jzlcd_spec, sc->res)) { device_printf(dev, "cannot allocate resources for device\n"); goto failed; } if (clk_get_by_ofw_name(dev, 0, "lcd_clk", &sc->clk) != 0 || clk_get_by_ofw_name(dev, 0, "lcd_pixclk", &sc->clk_pix) != 0) { device_printf(dev, "cannot get clocks\n"); goto failed; } if (clk_enable(sc->clk) != 0 || clk_enable(sc->clk_pix) != 0) { device_printf(dev, "cannot enable clocks\n"); goto failed; } error = bus_dma_tag_create( bus_get_dma_tag(dev), sizeof(struct lcd_frame_descriptor), 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct lcd_frame_descriptor) * 2, 1, sizeof(struct lcd_frame_descriptor) * 2, 0, NULL, NULL, &sc->fdesc_tag); if (error != 0) { device_printf(dev, "cannot create bus dma tag\n"); goto failed; } error = bus_dmamem_alloc(sc->fdesc_tag, (void **)&sc->fdesc, BUS_DMA_NOCACHE | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->fdesc_map); if (error != 0) { device_printf(dev, "cannot allocate dma descriptor\n"); goto dmaalloc_failed; } error = bus_dmamap_load(sc->fdesc_tag, sc->fdesc_map, sc->fdesc, sizeof(struct lcd_frame_descriptor) * 2, jzlcd_dmamap_cb, &sc->fdesc_paddr, 0); if (error != 0) { device_printf(dev, "cannot load dma map\n"); goto dmaload_failed; } sc->hdmi_evh = EVENTHANDLER_REGISTER(hdmi_event, jzlcd_hdmi_event, sc, 0); return (0); dmaload_failed: bus_dmamem_free(sc->fdesc_tag, sc->fdesc, sc->fdesc_map); dmaalloc_failed: bus_dma_tag_destroy(sc->fdesc_tag); failed: if (sc->clk_pix != NULL) clk_release(sc->clk); if (sc->clk != NULL) clk_release(sc->clk); if (sc->res != NULL) bus_release_resources(dev, jzlcd_spec, sc->res); return (ENXIO); } static struct fb_info * jzlcd_fb_getinfo(device_t dev) { struct jzlcd_softc *sc; sc = device_get_softc(dev); return (&sc->info); } static device_method_t jzlcd_methods[] = { /* Device interface */ DEVMETHOD(device_probe, jzlcd_probe), DEVMETHOD(device_attach, jzlcd_attach), /* FB interface */ DEVMETHOD(fb_getinfo, jzlcd_fb_getinfo), DEVMETHOD_END }; static driver_t jzlcd_driver = { "fb", jzlcd_methods, sizeof(struct jzlcd_softc), }; static devclass_t jzlcd_devclass; DRIVER_MODULE(fb, simplebus, jzlcd_driver, jzlcd_devclass, 0, 0); Index: user/ngie/bug-237403/sys/ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c =================================================================== --- user/ngie/bug-237403/sys/ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c (revision 348028) +++ user/ngie/bug-237403/sys/ofed/drivers/infiniband/core/ib_roce_gid_mgmt.c (revision 348029) @@ -1,483 +1,484 @@ /*- * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0 * * Copyright (c) 2015-2017, Mellanox Technologies inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include __FBSDID("$FreeBSD$"); #include "core_priv.h" +#include #include #include #include #include #include #include static struct workqueue_struct *roce_gid_mgmt_wq; enum gid_op_type { GID_DEL = 0, GID_ADD }; struct roce_netdev_event_work { struct work_struct work; struct net_device *ndev; }; struct roce_rescan_work { struct work_struct work; struct ib_device *ib_dev; }; static const struct { bool (*is_supported)(const struct ib_device *device, u8 port_num); enum ib_gid_type gid_type; } PORT_CAP_TO_GID_TYPE[] = { {rdma_protocol_roce_eth_encap, IB_GID_TYPE_ROCE}, {rdma_protocol_roce_udp_encap, IB_GID_TYPE_ROCE_UDP_ENCAP}, }; #define CAP_TO_GID_TABLE_SIZE ARRAY_SIZE(PORT_CAP_TO_GID_TYPE) unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port) { int i; unsigned int ret_flags = 0; if (!rdma_protocol_roce(ib_dev, port)) return 1UL << IB_GID_TYPE_IB; for (i = 0; i < CAP_TO_GID_TABLE_SIZE; i++) if (PORT_CAP_TO_GID_TYPE[i].is_supported(ib_dev, port)) ret_flags |= 1UL << PORT_CAP_TO_GID_TYPE[i].gid_type; return ret_flags; } EXPORT_SYMBOL(roce_gid_type_mask_support); static void update_gid(enum gid_op_type gid_op, struct ib_device *ib_dev, u8 port, union ib_gid *gid, struct net_device *ndev) { int i; unsigned long gid_type_mask = roce_gid_type_mask_support(ib_dev, port); struct ib_gid_attr gid_attr; memset(&gid_attr, 0, sizeof(gid_attr)); gid_attr.ndev = ndev; for (i = 0; i != IB_GID_TYPE_SIZE; i++) { if ((1UL << i) & gid_type_mask) { gid_attr.gid_type = i; switch (gid_op) { case GID_ADD: ib_cache_gid_add(ib_dev, port, gid, &gid_attr); break; case GID_DEL: ib_cache_gid_del(ib_dev, port, gid, &gid_attr); break; } } } } static int roce_gid_match_netdev(struct ib_device *ib_dev, u8 port, struct net_device *idev, void *cookie) { struct net_device *ndev = (struct net_device *)cookie; if (idev == NULL) return (0); return (ndev == idev); } static int roce_gid_match_all(struct ib_device *ib_dev, u8 port, struct net_device *idev, void *cookie) { if (idev == NULL) return (0); return (1); } static int roce_gid_enum_netdev_default(struct ib_device *ib_dev, u8 port, struct net_device *idev) { unsigned long gid_type_mask; gid_type_mask = roce_gid_type_mask_support(ib_dev, port); ib_cache_gid_set_default_gid(ib_dev, port, idev, gid_type_mask, IB_CACHE_GID_DEFAULT_MODE_SET); return (hweight_long(gid_type_mask)); } static void roce_gid_update_addr_callback(struct ib_device *device, u8 port, struct net_device *ndev, void *cookie) { struct ipx_entry { STAILQ_ENTRY(ipx_entry) entry; union ipx_addr { struct sockaddr sa[0]; struct sockaddr_in v4; struct sockaddr_in6 v6; } ipx_addr; struct net_device *ndev; }; struct ipx_entry *entry; struct net_device *idev; #if defined(INET) || defined(INET6) struct ifaddr *ifa; #endif VNET_ITERATOR_DECL(vnet_iter); struct ib_gid_attr gid_attr; union ib_gid gid; int default_gids; u16 index_num; int i; STAILQ_HEAD(, ipx_entry) ipx_head; STAILQ_INIT(&ipx_head); /* make sure default GIDs are in */ default_gids = roce_gid_enum_netdev_default(device, port, ndev); VNET_LIST_RLOCK(); VNET_FOREACH(vnet_iter) { CURVNET_SET(vnet_iter); IFNET_RLOCK(); CK_STAILQ_FOREACH(idev, &V_ifnet, if_link) { struct epoch_tracker et; if (idev != ndev) { if (idev->if_type != IFT_L2VLAN) continue; if (ndev != rdma_vlan_dev_real_dev(idev)) continue; } /* clone address information for IPv4 and IPv6 */ NET_EPOCH_ENTER(et); #if defined(INET) CK_STAILQ_FOREACH(ifa, &idev->if_addrhead, ifa_link) { if (ifa->ifa_addr == NULL || ifa->ifa_addr->sa_family != AF_INET) continue; entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (entry == NULL) { pr_warn("roce_gid_update_addr_callback: " "couldn't allocate entry for IPv4 update\n"); continue; } entry->ipx_addr.v4 = *((struct sockaddr_in *)ifa->ifa_addr); entry->ndev = idev; STAILQ_INSERT_TAIL(&ipx_head, entry, entry); } #endif #if defined(INET6) CK_STAILQ_FOREACH(ifa, &idev->if_addrhead, ifa_link) { if (ifa->ifa_addr == NULL || ifa->ifa_addr->sa_family != AF_INET6) continue; entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (entry == NULL) { pr_warn("roce_gid_update_addr_callback: " "couldn't allocate entry for IPv6 update\n"); continue; } entry->ipx_addr.v6 = *((struct sockaddr_in6 *)ifa->ifa_addr); entry->ndev = idev; /* trash IPv6 scope ID */ sa6_recoverscope(&entry->ipx_addr.v6); entry->ipx_addr.v6.sin6_scope_id = 0; STAILQ_INSERT_TAIL(&ipx_head, entry, entry); } #endif NET_EPOCH_EXIT(et); } IFNET_RUNLOCK(); CURVNET_RESTORE(); } VNET_LIST_RUNLOCK(); /* add missing GIDs, if any */ STAILQ_FOREACH(entry, &ipx_head, entry) { unsigned long gid_type_mask = roce_gid_type_mask_support(device, port); if (rdma_ip2gid(&entry->ipx_addr.sa[0], &gid) != 0) continue; for (i = 0; i != IB_GID_TYPE_SIZE; i++) { if (!((1UL << i) & gid_type_mask)) continue; /* check if entry found */ if (ib_find_cached_gid_by_port(device, &gid, i, port, entry->ndev, &index_num) == 0) break; } if (i != IB_GID_TYPE_SIZE) continue; /* add new GID */ update_gid(GID_ADD, device, port, &gid, entry->ndev); } /* remove stale GIDs, if any */ for (i = default_gids; ib_get_cached_gid(device, port, i, &gid, &gid_attr) == 0; i++) { union ipx_addr ipx; /* check for valid network device pointer */ ndev = gid_attr.ndev; if (ndev == NULL) continue; dev_put(ndev); /* don't delete empty entries */ if (memcmp(&gid, &zgid, sizeof(zgid)) == 0) continue; /* zero default */ memset(&ipx, 0, sizeof(ipx)); rdma_gid2ip(&ipx.sa[0], &gid); STAILQ_FOREACH(entry, &ipx_head, entry) { if (entry->ndev == ndev && memcmp(&entry->ipx_addr, &ipx, sizeof(ipx)) == 0) break; } /* check if entry found */ if (entry != NULL) continue; /* remove GID */ update_gid(GID_DEL, device, port, &gid, ndev); } while ((entry = STAILQ_FIRST(&ipx_head))) { STAILQ_REMOVE_HEAD(&ipx_head, entry); kfree(entry); } } static void roce_gid_queue_scan_event_handler(struct work_struct *_work) { struct roce_netdev_event_work *work = container_of(_work, struct roce_netdev_event_work, work); ib_enum_all_roce_netdevs(roce_gid_match_netdev, work->ndev, roce_gid_update_addr_callback, NULL); dev_put(work->ndev); kfree(work); } static void roce_gid_queue_scan_event(struct net_device *ndev) { struct roce_netdev_event_work *work; retry: switch (ndev->if_type) { case IFT_ETHER: break; case IFT_L2VLAN: ndev = rdma_vlan_dev_real_dev(ndev); if (ndev != NULL) goto retry; /* FALLTHROUGH */ default: return; } work = kmalloc(sizeof(*work), GFP_ATOMIC); if (!work) { pr_warn("roce_gid_mgmt: Couldn't allocate work for addr_event\n"); return; } INIT_WORK(&work->work, roce_gid_queue_scan_event_handler); dev_hold(ndev); work->ndev = ndev; queue_work(roce_gid_mgmt_wq, &work->work); } static void roce_gid_delete_all_event_handler(struct work_struct *_work) { struct roce_netdev_event_work *work = container_of(_work, struct roce_netdev_event_work, work); ib_cache_gid_del_all_by_netdev(work->ndev); dev_put(work->ndev); kfree(work); } static void roce_gid_delete_all_event(struct net_device *ndev) { struct roce_netdev_event_work *work; work = kmalloc(sizeof(*work), GFP_ATOMIC); if (!work) { pr_warn("roce_gid_mgmt: Couldn't allocate work for addr_event\n"); return; } INIT_WORK(&work->work, roce_gid_delete_all_event_handler); dev_hold(ndev); work->ndev = ndev; queue_work(roce_gid_mgmt_wq, &work->work); /* make sure job is complete before returning */ flush_workqueue(roce_gid_mgmt_wq); } static int inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *ndev = ptr; switch (event) { case NETDEV_UNREGISTER: roce_gid_delete_all_event(ndev); break; case NETDEV_REGISTER: case NETDEV_CHANGEADDR: case NETDEV_CHANGEIFADDR: roce_gid_queue_scan_event(ndev); break; default: break; } return NOTIFY_DONE; } static struct notifier_block nb_inetaddr = { .notifier_call = inetaddr_event }; static eventhandler_tag eh_ifnet_event; static void roce_ifnet_event(void *arg, struct ifnet *ifp, int event) { if (event != IFNET_EVENT_PCP || is_vlan_dev(ifp)) return; /* make sure GID table is reloaded */ roce_gid_delete_all_event(ifp); roce_gid_queue_scan_event(ifp); } static void roce_rescan_device_handler(struct work_struct *_work) { struct roce_rescan_work *work = container_of(_work, struct roce_rescan_work, work); ib_enum_roce_netdev(work->ib_dev, roce_gid_match_all, NULL, roce_gid_update_addr_callback, NULL); kfree(work); } /* Caller must flush system workqueue before removing the ib_device */ int roce_rescan_device(struct ib_device *ib_dev) { struct roce_rescan_work *work = kmalloc(sizeof(*work), GFP_KERNEL); if (!work) return -ENOMEM; work->ib_dev = ib_dev; INIT_WORK(&work->work, roce_rescan_device_handler); queue_work(roce_gid_mgmt_wq, &work->work); return 0; } int __init roce_gid_mgmt_init(void) { roce_gid_mgmt_wq = alloc_ordered_workqueue("roce_gid_mgmt_wq", 0); if (!roce_gid_mgmt_wq) { pr_warn("roce_gid_mgmt: can't allocate work queue\n"); return -ENOMEM; } register_inetaddr_notifier(&nb_inetaddr); /* * We rely on the netdevice notifier to enumerate all existing * devices in the system. Register to this notifier last to * make sure we will not miss any IP add/del callbacks. */ register_netdevice_notifier(&nb_inetaddr); eh_ifnet_event = EVENTHANDLER_REGISTER(ifnet_event, roce_ifnet_event, NULL, EVENTHANDLER_PRI_ANY); return 0; } void __exit roce_gid_mgmt_cleanup(void) { if (eh_ifnet_event != NULL) EVENTHANDLER_DEREGISTER(ifnet_event, eh_ifnet_event); unregister_inetaddr_notifier(&nb_inetaddr); unregister_netdevice_notifier(&nb_inetaddr); /* * Ensure all gid deletion tasks complete before we go down, * to avoid any reference to free'd memory. By the time * ib-core is removed, all physical devices have been removed, * so no issue with remaining hardware contexts. */ synchronize_rcu(); drain_workqueue(roce_gid_mgmt_wq); destroy_workqueue(roce_gid_mgmt_wq); } Index: user/ngie/bug-237403/sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_main.c =================================================================== --- user/ngie/bug-237403/sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_main.c (revision 348028) +++ user/ngie/bug-237403/sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_main.c (revision 348029) @@ -1,1750 +1,1751 @@ /*- * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0 * * Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2004 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include __FBSDID("$FreeBSD$"); #include "ipoib.h" +#include static int ipoib_resolvemulti(struct ifnet *, struct sockaddr **, struct sockaddr *); #include #include #include #include #include /* For ARPHRD_xxx */ #include #include #include #include MODULE_AUTHOR("Roland Dreier"); MODULE_DESCRIPTION("IP-over-InfiniBand net driver"); MODULE_LICENSE("Dual BSD/GPL"); int ipoib_sendq_size = IPOIB_TX_RING_SIZE; int ipoib_recvq_size = IPOIB_RX_RING_SIZE; module_param_named(send_queue_size, ipoib_sendq_size, int, 0444); MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue"); module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444); MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue"); #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG int ipoib_debug_level = 1; module_param_named(debug_level, ipoib_debug_level, int, 0644); MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); #endif struct ipoib_path_iter { struct ipoib_dev_priv *priv; struct ipoib_path path; }; static const u8 ipv4_bcast_addr[] = { 0x00, 0xff, 0xff, 0xff, 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff }; struct workqueue_struct *ipoib_workqueue; struct ib_sa_client ipoib_sa_client; static void ipoib_add_one(struct ib_device *device); static void ipoib_remove_one(struct ib_device *device, void *client_data); static struct net_device *ipoib_get_net_dev_by_params( struct ib_device *dev, u8 port, u16 pkey, const union ib_gid *gid, const struct sockaddr *addr, void *client_data); static void ipoib_start(struct ifnet *dev); static int ipoib_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst, struct route *ro); static int ipoib_ioctl(struct ifnet *ifp, u_long command, caddr_t data); static void ipoib_input(struct ifnet *ifp, struct mbuf *m); #define IPOIB_MTAP(_ifp, _m) \ do { \ if (bpf_peers_present((_ifp)->if_bpf)) { \ M_ASSERTVALID(_m); \ ipoib_mtap_mb((_ifp), (_m)); \ } \ } while (0) static struct unrhdr *ipoib_unrhdr; static void ipoib_unrhdr_init(void *arg) { ipoib_unrhdr = new_unrhdr(0, 65535, NULL); } SYSINIT(ipoib_unrhdr_init, SI_SUB_KLD - 1, SI_ORDER_ANY, ipoib_unrhdr_init, NULL); static void ipoib_unrhdr_uninit(void *arg) { if (ipoib_unrhdr != NULL) { struct unrhdr *hdr; hdr = ipoib_unrhdr; ipoib_unrhdr = NULL; delete_unrhdr(hdr); } } SYSUNINIT(ipoib_unrhdr_uninit, SI_SUB_KLD - 1, SI_ORDER_ANY, ipoib_unrhdr_uninit, NULL); /* * This is for clients that have an ipoib_header in the mbuf. */ static void ipoib_mtap_mb(struct ifnet *ifp, struct mbuf *mb) { struct ipoib_header *ih; struct ether_header eh; ih = mtod(mb, struct ipoib_header *); eh.ether_type = ih->proto; bcopy(ih->hwaddr, &eh.ether_dhost, ETHER_ADDR_LEN); bzero(&eh.ether_shost, ETHER_ADDR_LEN); mb->m_data += sizeof(struct ipoib_header); mb->m_len -= sizeof(struct ipoib_header); bpf_mtap2(ifp->if_bpf, &eh, sizeof(eh), mb); mb->m_data -= sizeof(struct ipoib_header); mb->m_len += sizeof(struct ipoib_header); } void ipoib_mtap_proto(struct ifnet *ifp, struct mbuf *mb, uint16_t proto) { struct ether_header eh; eh.ether_type = proto; bzero(&eh.ether_shost, ETHER_ADDR_LEN); bzero(&eh.ether_dhost, ETHER_ADDR_LEN); bpf_mtap2(ifp->if_bpf, &eh, sizeof(eh), mb); } static struct ib_client ipoib_client = { .name = "ipoib", .add = ipoib_add_one, .remove = ipoib_remove_one, .get_net_dev_by_params = ipoib_get_net_dev_by_params, }; int ipoib_open(struct ipoib_dev_priv *priv) { struct ifnet *dev = priv->dev; ipoib_dbg(priv, "bringing up interface\n"); set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); if (ipoib_pkey_dev_delay_open(priv)) return 0; if (ipoib_ib_dev_open(priv)) goto err_disable; if (ipoib_ib_dev_up(priv)) goto err_stop; if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { struct ipoib_dev_priv *cpriv; /* Bring up any child interfaces too */ mutex_lock(&priv->vlan_mutex); list_for_each_entry(cpriv, &priv->child_intfs, list) if ((cpriv->dev->if_drv_flags & IFF_DRV_RUNNING) == 0) ipoib_open(cpriv); mutex_unlock(&priv->vlan_mutex); } dev->if_drv_flags |= IFF_DRV_RUNNING; dev->if_drv_flags &= ~IFF_DRV_OACTIVE; return 0; err_stop: ipoib_ib_dev_stop(priv, 1); err_disable: clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); return -EINVAL; } static void ipoib_init(void *arg) { struct ifnet *dev; struct ipoib_dev_priv *priv; priv = arg; dev = priv->dev; if ((dev->if_drv_flags & IFF_DRV_RUNNING) == 0) ipoib_open(priv); queue_work(ipoib_workqueue, &priv->flush_light); } static int ipoib_stop(struct ipoib_dev_priv *priv) { struct ifnet *dev = priv->dev; ipoib_dbg(priv, "stopping interface\n"); clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); dev->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); ipoib_ib_dev_down(priv, 0); ipoib_ib_dev_stop(priv, 0); if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { struct ipoib_dev_priv *cpriv; /* Bring down any child interfaces too */ mutex_lock(&priv->vlan_mutex); list_for_each_entry(cpriv, &priv->child_intfs, list) if ((cpriv->dev->if_drv_flags & IFF_DRV_RUNNING) != 0) ipoib_stop(cpriv); mutex_unlock(&priv->vlan_mutex); } return 0; } static int ipoib_propagate_ifnet_mtu(struct ipoib_dev_priv *priv, int new_mtu, bool propagate) { struct ifnet *ifp; struct ifreq ifr; int error; ifp = priv->dev; if (ifp->if_mtu == new_mtu) return (0); if (propagate) { strlcpy(ifr.ifr_name, if_name(ifp), IFNAMSIZ); ifr.ifr_mtu = new_mtu; CURVNET_SET(ifp->if_vnet); error = ifhwioctl(SIOCSIFMTU, ifp, (caddr_t)&ifr, curthread); CURVNET_RESTORE(); } else { ifp->if_mtu = new_mtu; error = 0; } return (error); } int ipoib_change_mtu(struct ipoib_dev_priv *priv, int new_mtu, bool propagate) { int error, prev_admin_mtu; /* dev->if_mtu > 2K ==> connected mode */ if (ipoib_cm_admin_enabled(priv)) { if (new_mtu > IPOIB_CM_MTU(ipoib_cm_max_mtu(priv))) return -EINVAL; if (new_mtu > priv->mcast_mtu) ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n", priv->mcast_mtu); return (ipoib_propagate_ifnet_mtu(priv, new_mtu, propagate)); } if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu)) return -EINVAL; prev_admin_mtu = priv->admin_mtu; priv->admin_mtu = new_mtu; error = ipoib_propagate_ifnet_mtu(priv, min(priv->mcast_mtu, priv->admin_mtu), propagate); if (error == 0) { /* check for MTU change to avoid infinite loop */ if (prev_admin_mtu != new_mtu) queue_work(ipoib_workqueue, &priv->flush_light); } else priv->admin_mtu = prev_admin_mtu; return (error); } static int ipoib_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct ipoib_dev_priv *priv = ifp->if_softc; struct ifaddr *ifa = (struct ifaddr *) data; struct ifreq *ifr = (struct ifreq *) data; int error = 0; /* check if detaching */ if (priv == NULL || priv->gone != 0) return (ENXIO); switch (command) { case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) error = -ipoib_open(priv); } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) ipoib_stop(priv); break; case SIOCADDMULTI: case SIOCDELMULTI: if (ifp->if_drv_flags & IFF_DRV_RUNNING) queue_work(ipoib_workqueue, &priv->restart_task); break; case SIOCSIFADDR: ifp->if_flags |= IFF_UP; switch (ifa->ifa_addr->sa_family) { #ifdef INET case AF_INET: ifp->if_init(ifp->if_softc); /* before arpwhohas */ arp_ifinit(ifp, ifa); break; #endif default: ifp->if_init(ifp->if_softc); break; } break; case SIOCGIFADDR: bcopy(IF_LLADDR(ifp), &ifr->ifr_addr.sa_data[0], INFINIBAND_ALEN); break; case SIOCSIFMTU: /* * Set the interface MTU. */ error = -ipoib_change_mtu(priv, ifr->ifr_mtu, false); break; default: error = EINVAL; break; } return (error); } static struct ipoib_path * __path_find(struct ipoib_dev_priv *priv, void *gid) { struct rb_node *n = priv->path_tree.rb_node; struct ipoib_path *path; int ret; while (n) { path = rb_entry(n, struct ipoib_path, rb_node); ret = memcmp(gid, path->pathrec.dgid.raw, sizeof (union ib_gid)); if (ret < 0) n = n->rb_left; else if (ret > 0) n = n->rb_right; else return path; } return NULL; } static int __path_add(struct ipoib_dev_priv *priv, struct ipoib_path *path) { struct rb_node **n = &priv->path_tree.rb_node; struct rb_node *pn = NULL; struct ipoib_path *tpath; int ret; while (*n) { pn = *n; tpath = rb_entry(pn, struct ipoib_path, rb_node); ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw, sizeof (union ib_gid)); if (ret < 0) n = &pn->rb_left; else if (ret > 0) n = &pn->rb_right; else return -EEXIST; } rb_link_node(&path->rb_node, pn, n); rb_insert_color(&path->rb_node, &priv->path_tree); list_add_tail(&path->list, &priv->path_list); return 0; } void ipoib_path_free(struct ipoib_dev_priv *priv, struct ipoib_path *path) { _IF_DRAIN(&path->queue); if (path->ah) ipoib_put_ah(path->ah); if (ipoib_cm_get(path)) ipoib_cm_destroy_tx(ipoib_cm_get(path)); kfree(path); } #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG struct ipoib_path_iter * ipoib_path_iter_init(struct ipoib_dev_priv *priv) { struct ipoib_path_iter *iter; iter = kmalloc(sizeof *iter, GFP_KERNEL); if (!iter) return NULL; iter->priv = priv; memset(iter->path.pathrec.dgid.raw, 0, 16); if (ipoib_path_iter_next(iter)) { kfree(iter); return NULL; } return iter; } int ipoib_path_iter_next(struct ipoib_path_iter *iter) { struct ipoib_dev_priv *priv = iter->priv; struct rb_node *n; struct ipoib_path *path; int ret = 1; spin_lock_irq(&priv->lock); n = rb_first(&priv->path_tree); while (n) { path = rb_entry(n, struct ipoib_path, rb_node); if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw, sizeof (union ib_gid)) < 0) { iter->path = *path; ret = 0; break; } n = rb_next(n); } spin_unlock_irq(&priv->lock); return ret; } void ipoib_path_iter_read(struct ipoib_path_iter *iter, struct ipoib_path *path) { *path = iter->path; } #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */ void ipoib_mark_paths_invalid(struct ipoib_dev_priv *priv) { struct ipoib_path *path, *tp; spin_lock_irq(&priv->lock); list_for_each_entry_safe(path, tp, &priv->path_list, list) { ipoib_dbg(priv, "mark path LID 0x%04x GID %16D invalid\n", be16_to_cpu(path->pathrec.dlid), path->pathrec.dgid.raw, ":"); path->valid = 0; } spin_unlock_irq(&priv->lock); } void ipoib_flush_paths(struct ipoib_dev_priv *priv) { struct ipoib_path *path, *tp; LIST_HEAD(remove_list); unsigned long flags; spin_lock_irqsave(&priv->lock, flags); list_splice_init(&priv->path_list, &remove_list); list_for_each_entry(path, &remove_list, list) rb_erase(&path->rb_node, &priv->path_tree); list_for_each_entry_safe(path, tp, &remove_list, list) { if (path->query) ib_sa_cancel_query(path->query_id, path->query); spin_unlock_irqrestore(&priv->lock, flags); wait_for_completion(&path->done); ipoib_path_free(priv, path); spin_lock_irqsave(&priv->lock, flags); } spin_unlock_irqrestore(&priv->lock, flags); } static void path_rec_completion(int status, struct ib_sa_path_rec *pathrec, void *path_ptr) { struct ipoib_path *path = path_ptr; struct ipoib_dev_priv *priv = path->priv; struct ifnet *dev = priv->dev; struct ipoib_ah *ah = NULL; struct ipoib_ah *old_ah = NULL; struct ifqueue mbqueue; struct mbuf *mb; unsigned long flags; if (!status) ipoib_dbg(priv, "PathRec LID 0x%04x for GID %16D\n", be16_to_cpu(pathrec->dlid), pathrec->dgid.raw, ":"); else ipoib_dbg(priv, "PathRec status %d for GID %16D\n", status, path->pathrec.dgid.raw, ":"); bzero(&mbqueue, sizeof(mbqueue)); if (!status) { struct ib_ah_attr av; if (!ib_init_ah_from_path(priv->ca, priv->port, pathrec, &av)) ah = ipoib_create_ah(priv, priv->pd, &av); } spin_lock_irqsave(&priv->lock, flags); if (ah) { path->pathrec = *pathrec; old_ah = path->ah; path->ah = ah; ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n", ah, be16_to_cpu(pathrec->dlid), pathrec->sl); for (;;) { _IF_DEQUEUE(&path->queue, mb); if (mb == NULL) break; _IF_ENQUEUE(&mbqueue, mb); } #ifdef CONFIG_INFINIBAND_IPOIB_CM if (ipoib_cm_enabled(priv, path->hwaddr) && !ipoib_cm_get(path)) ipoib_cm_set(path, ipoib_cm_create_tx(priv, path)); #endif path->valid = 1; } path->query = NULL; complete(&path->done); spin_unlock_irqrestore(&priv->lock, flags); if (old_ah) ipoib_put_ah(old_ah); for (;;) { _IF_DEQUEUE(&mbqueue, mb); if (mb == NULL) break; mb->m_pkthdr.rcvif = dev; if (dev->if_transmit(dev, mb)) ipoib_warn(priv, "dev_queue_xmit failed " "to requeue packet\n"); } } static struct ipoib_path * path_rec_create(struct ipoib_dev_priv *priv, uint8_t *hwaddr) { struct ipoib_path *path; if (!priv->broadcast) return NULL; path = kzalloc(sizeof *path, GFP_ATOMIC); if (!path) return NULL; path->priv = priv; bzero(&path->queue, sizeof(path->queue)); #ifdef CONFIG_INFINIBAND_IPOIB_CM memcpy(&path->hwaddr, hwaddr, INFINIBAND_ALEN); #endif memcpy(path->pathrec.dgid.raw, &hwaddr[4], sizeof (union ib_gid)); path->pathrec.sgid = priv->local_gid; path->pathrec.pkey = cpu_to_be16(priv->pkey); path->pathrec.numb_path = 1; path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class; return path; } static int path_rec_start(struct ipoib_dev_priv *priv, struct ipoib_path *path) { struct ifnet *dev = priv->dev; ib_sa_comp_mask comp_mask = IB_SA_PATH_REC_MTU_SELECTOR | IB_SA_PATH_REC_MTU; struct ib_sa_path_rec p_rec; p_rec = path->pathrec; p_rec.mtu_selector = IB_SA_GT; switch (roundup_pow_of_two(dev->if_mtu + IPOIB_ENCAP_LEN)) { case 512: p_rec.mtu = IB_MTU_256; break; case 1024: p_rec.mtu = IB_MTU_512; break; case 2048: p_rec.mtu = IB_MTU_1024; break; case 4096: p_rec.mtu = IB_MTU_2048; break; default: /* Wildcard everything */ comp_mask = 0; p_rec.mtu = 0; p_rec.mtu_selector = 0; } ipoib_dbg(priv, "Start path record lookup for %16D MTU > %d\n", p_rec.dgid.raw, ":", comp_mask ? ib_mtu_enum_to_int(p_rec.mtu) : 0); init_completion(&path->done); path->query_id = ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port, &p_rec, comp_mask | IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | IB_SA_PATH_REC_NUMB_PATH | IB_SA_PATH_REC_TRAFFIC_CLASS | IB_SA_PATH_REC_PKEY, 1000, GFP_ATOMIC, path_rec_completion, path, &path->query); if (path->query_id < 0) { ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id); path->query = NULL; complete(&path->done); return path->query_id; } return 0; } static void ipoib_unicast_send(struct mbuf *mb, struct ipoib_dev_priv *priv, struct ipoib_header *eh) { struct ipoib_path *path; path = __path_find(priv, eh->hwaddr + 4); if (!path || !path->valid) { int new_path = 0; if (!path) { path = path_rec_create(priv, eh->hwaddr); new_path = 1; } if (path) { if (_IF_QLEN(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) _IF_ENQUEUE(&path->queue, mb); else { if_inc_counter(priv->dev, IFCOUNTER_OERRORS, 1); m_freem(mb); } if (!path->query && path_rec_start(priv, path)) { spin_unlock_irqrestore(&priv->lock, flags); if (new_path) ipoib_path_free(priv, path); return; } else __path_add(priv, path); } else { if_inc_counter(priv->dev, IFCOUNTER_OERRORS, 1); m_freem(mb); } return; } if (ipoib_cm_get(path) && ipoib_cm_up(path)) { ipoib_cm_send(priv, mb, ipoib_cm_get(path)); } else if (path->ah) { ipoib_send(priv, mb, path->ah, IPOIB_QPN(eh->hwaddr)); } else if ((path->query || !path_rec_start(priv, path)) && path->queue.ifq_len < IPOIB_MAX_PATH_REC_QUEUE) { _IF_ENQUEUE(&path->queue, mb); } else { if_inc_counter(priv->dev, IFCOUNTER_OERRORS, 1); m_freem(mb); } } static int ipoib_send_one(struct ipoib_dev_priv *priv, struct mbuf *mb) { struct ipoib_header *eh; eh = mtod(mb, struct ipoib_header *); if (IPOIB_IS_MULTICAST(eh->hwaddr)) { /* Add in the P_Key for multicast*/ eh->hwaddr[8] = (priv->pkey >> 8) & 0xff; eh->hwaddr[9] = priv->pkey & 0xff; ipoib_mcast_send(priv, eh->hwaddr + 4, mb); } else ipoib_unicast_send(mb, priv, eh); return 0; } static void _ipoib_start(struct ifnet *dev, struct ipoib_dev_priv *priv) { struct mbuf *mb; if ((dev->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return; spin_lock(&priv->lock); while (!IFQ_DRV_IS_EMPTY(&dev->if_snd) && (dev->if_drv_flags & IFF_DRV_OACTIVE) == 0) { IFQ_DRV_DEQUEUE(&dev->if_snd, mb); if (mb == NULL) break; IPOIB_MTAP(dev, mb); ipoib_send_one(priv, mb); } spin_unlock(&priv->lock); } static void ipoib_start(struct ifnet *dev) { _ipoib_start(dev, dev->if_softc); } static void ipoib_vlan_start(struct ifnet *dev) { struct ipoib_dev_priv *priv; struct mbuf *mb; priv = VLAN_COOKIE(dev); if (priv != NULL) return _ipoib_start(dev, priv); while (!IFQ_DRV_IS_EMPTY(&dev->if_snd)) { IFQ_DRV_DEQUEUE(&dev->if_snd, mb); if (mb == NULL) break; m_freem(mb); if_inc_counter(dev, IFCOUNTER_OERRORS, 1); } } int ipoib_dev_init(struct ipoib_dev_priv *priv, struct ib_device *ca, int port) { /* Allocate RX/TX "rings" to hold queued mbs */ priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring, GFP_KERNEL); if (!priv->rx_ring) { printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n", ca->name, ipoib_recvq_size); goto out; } priv->tx_ring = kzalloc(ipoib_sendq_size * sizeof *priv->tx_ring, GFP_KERNEL); if (!priv->tx_ring) { printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n", ca->name, ipoib_sendq_size); goto out_rx_ring_cleanup; } memset(priv->tx_ring, 0, ipoib_sendq_size * sizeof *priv->tx_ring); /* priv->tx_head, tx_tail & tx_outstanding are already 0 */ if (ipoib_ib_dev_init(priv, ca, port)) goto out_tx_ring_cleanup; return 0; out_tx_ring_cleanup: kfree(priv->tx_ring); out_rx_ring_cleanup: kfree(priv->rx_ring); out: return -ENOMEM; } static void ipoib_detach(struct ipoib_dev_priv *priv) { struct ifnet *dev; dev = priv->dev; if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { priv->gone = 1; bpfdetach(dev); if_detach(dev); if_free(dev); free_unr(ipoib_unrhdr, priv->unit); } else VLAN_SETCOOKIE(priv->dev, NULL); free(priv, M_TEMP); } void ipoib_dev_cleanup(struct ipoib_dev_priv *priv) { struct ipoib_dev_priv *cpriv, *tcpriv; /* Delete any child interfaces first */ list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) { ipoib_dev_cleanup(cpriv); ipoib_detach(cpriv); } ipoib_ib_dev_cleanup(priv); kfree(priv->rx_ring); kfree(priv->tx_ring); priv->rx_ring = NULL; priv->tx_ring = NULL; } static struct ipoib_dev_priv * ipoib_priv_alloc(void) { struct ipoib_dev_priv *priv; priv = malloc(sizeof(struct ipoib_dev_priv), M_TEMP, M_ZERO|M_WAITOK); spin_lock_init(&priv->lock); spin_lock_init(&priv->drain_lock); mutex_init(&priv->vlan_mutex); INIT_LIST_HEAD(&priv->path_list); INIT_LIST_HEAD(&priv->child_intfs); INIT_LIST_HEAD(&priv->dead_ahs); INIT_LIST_HEAD(&priv->multicast_list); INIT_DELAYED_WORK(&priv->pkey_poll_task, ipoib_pkey_poll); INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task); INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task); INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light); INIT_WORK(&priv->flush_normal, ipoib_ib_dev_flush_normal); INIT_WORK(&priv->flush_heavy, ipoib_ib_dev_flush_heavy); INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task); INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah); memcpy(priv->broadcastaddr, ipv4_bcast_addr, INFINIBAND_ALEN); return (priv); } struct ipoib_dev_priv * ipoib_intf_alloc(const char *name) { struct ipoib_dev_priv *priv; struct sockaddr_dl *sdl; struct ifnet *dev; priv = ipoib_priv_alloc(); dev = priv->dev = if_alloc(IFT_INFINIBAND); if (!dev) { free(priv, M_TEMP); return NULL; } dev->if_softc = priv; priv->unit = alloc_unr(ipoib_unrhdr); if (priv->unit == -1) { if_free(dev); free(priv, M_TEMP); return NULL; } if_initname(dev, name, priv->unit); dev->if_flags = IFF_BROADCAST | IFF_MULTICAST; dev->if_addrlen = INFINIBAND_ALEN; dev->if_hdrlen = IPOIB_HEADER_LEN; if_attach(dev); dev->if_init = ipoib_init; dev->if_ioctl = ipoib_ioctl; dev->if_start = ipoib_start; dev->if_output = ipoib_output; dev->if_input = ipoib_input; dev->if_resolvemulti = ipoib_resolvemulti; dev->if_baudrate = IF_Gbps(10); dev->if_broadcastaddr = priv->broadcastaddr; dev->if_snd.ifq_maxlen = ipoib_sendq_size * 2; sdl = (struct sockaddr_dl *)dev->if_addr->ifa_addr; sdl->sdl_type = IFT_INFINIBAND; sdl->sdl_alen = dev->if_addrlen; priv->dev = dev; if_link_state_change(dev, LINK_STATE_DOWN); bpfattach(dev, DLT_EN10MB, ETHER_HDR_LEN); return dev->if_softc; } int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca) { struct ib_device_attr *device_attr = &hca->attrs; priv->hca_caps = device_attr->device_cap_flags; priv->dev->if_hwassist = 0; priv->dev->if_capabilities = 0; #ifndef CONFIG_INFINIBAND_IPOIB_CM if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) { set_bit(IPOIB_FLAG_CSUM, &priv->flags); priv->dev->if_hwassist = CSUM_IP | CSUM_TCP | CSUM_UDP; priv->dev->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM; } #if 0 if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO) { priv->dev->if_capabilities |= IFCAP_TSO4; priv->dev->if_hwassist |= CSUM_TSO; } #endif #endif priv->dev->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_LINKSTATE; priv->dev->if_capenable = priv->dev->if_capabilities; return 0; } static struct ifnet * ipoib_add_port(const char *format, struct ib_device *hca, u8 port) { struct ipoib_dev_priv *priv; struct ib_port_attr attr; int result = -ENOMEM; priv = ipoib_intf_alloc(format); if (!priv) goto alloc_mem_failed; if (!ib_query_port(hca, port, &attr)) priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); else { printk(KERN_WARNING "%s: ib_query_port %d failed\n", hca->name, port); goto device_init_failed; } /* MTU will be reset when mcast join happens */ priv->dev->if_mtu = IPOIB_UD_MTU(priv->max_ib_mtu); priv->mcast_mtu = priv->admin_mtu = priv->dev->if_mtu; result = ib_query_pkey(hca, port, 0, &priv->pkey); if (result) { printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n", hca->name, port, result); goto device_init_failed; } if (ipoib_set_dev_features(priv, hca)) goto device_init_failed; /* * Set the full membership bit, so that we join the right * broadcast group, etc. */ priv->pkey |= 0x8000; priv->broadcastaddr[8] = priv->pkey >> 8; priv->broadcastaddr[9] = priv->pkey & 0xff; result = ib_query_gid(hca, port, 0, &priv->local_gid, NULL); if (result) { printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n", hca->name, port, result); goto device_init_failed; } memcpy(IF_LLADDR(priv->dev) + 4, priv->local_gid.raw, sizeof (union ib_gid)); result = ipoib_dev_init(priv, hca, port); if (result < 0) { printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n", hca->name, port, result); goto device_init_failed; } if (ipoib_cm_admin_enabled(priv)) priv->dev->if_mtu = IPOIB_CM_MTU(ipoib_cm_max_mtu(priv)); INIT_IB_EVENT_HANDLER(&priv->event_handler, priv->ca, ipoib_event); result = ib_register_event_handler(&priv->event_handler); if (result < 0) { printk(KERN_WARNING "%s: ib_register_event_handler failed for " "port %d (ret = %d)\n", hca->name, port, result); goto event_failed; } if_printf(priv->dev, "Attached to %s port %d\n", hca->name, port); return priv->dev; event_failed: ipoib_dev_cleanup(priv); device_init_failed: ipoib_detach(priv); alloc_mem_failed: return ERR_PTR(result); } static void ipoib_add_one(struct ib_device *device) { struct list_head *dev_list; struct ifnet *dev; struct ipoib_dev_priv *priv; int s, e, p; if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) return; dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL); if (!dev_list) return; INIT_LIST_HEAD(dev_list); if (device->node_type == RDMA_NODE_IB_SWITCH) { s = 0; e = 0; } else { s = 1; e = device->phys_port_cnt; } for (p = s; p <= e; ++p) { if (rdma_port_get_link_layer(device, p) != IB_LINK_LAYER_INFINIBAND) continue; dev = ipoib_add_port("ib", device, p); if (!IS_ERR(dev)) { priv = dev->if_softc; list_add_tail(&priv->list, dev_list); } } ib_set_client_data(device, &ipoib_client, dev_list); } static void ipoib_remove_one(struct ib_device *device, void *client_data) { struct ipoib_dev_priv *priv, *tmp; struct list_head *dev_list = client_data; if (!dev_list) return; if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) return; list_for_each_entry_safe(priv, tmp, dev_list, list) { if (rdma_port_get_link_layer(device, priv->port) != IB_LINK_LAYER_INFINIBAND) continue; ipoib_stop(priv); ib_unregister_event_handler(&priv->event_handler); /* dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP); */ flush_workqueue(ipoib_workqueue); ipoib_dev_cleanup(priv); ipoib_detach(priv); } kfree(dev_list); } static int ipoib_match_dev_addr(const struct sockaddr *addr, struct net_device *dev) { struct epoch_tracker et; struct ifaddr *ifa; int retval = 0; CURVNET_SET(dev->if_vnet); NET_EPOCH_ENTER(et); CK_STAILQ_FOREACH(ifa, &dev->if_addrhead, ifa_link) { if (ifa->ifa_addr == NULL || ifa->ifa_addr->sa_family != addr->sa_family || ifa->ifa_addr->sa_len != addr->sa_len) { continue; } if (memcmp(ifa->ifa_addr, addr, addr->sa_len) == 0) { retval = 1; break; } } NET_EPOCH_EXIT(et); CURVNET_RESTORE(); return (retval); } /* * ipoib_match_gid_pkey_addr - returns the number of IPoIB netdevs on * top a given ipoib device matching a pkey_index and address, if one * exists. * * @found_net_dev: contains a matching net_device if the return value * >= 1, with a reference held. */ static int ipoib_match_gid_pkey_addr(struct ipoib_dev_priv *priv, const union ib_gid *gid, u16 pkey_index, const struct sockaddr *addr, struct net_device **found_net_dev) { struct ipoib_dev_priv *child_priv; int matches = 0; if (priv->pkey_index == pkey_index && (!gid || !memcmp(gid, &priv->local_gid, sizeof(*gid)))) { if (addr == NULL || ipoib_match_dev_addr(addr, priv->dev) != 0) { if (*found_net_dev == NULL) { struct net_device *net_dev; if (priv->parent != NULL) net_dev = priv->parent; else net_dev = priv->dev; *found_net_dev = net_dev; dev_hold(net_dev); } matches++; } } /* Check child interfaces */ mutex_lock(&priv->vlan_mutex); list_for_each_entry(child_priv, &priv->child_intfs, list) { matches += ipoib_match_gid_pkey_addr(child_priv, gid, pkey_index, addr, found_net_dev); if (matches > 1) break; } mutex_unlock(&priv->vlan_mutex); return matches; } /* * __ipoib_get_net_dev_by_params - returns the number of matching * net_devs found (between 0 and 2). Also return the matching * net_device in the @net_dev parameter, holding a reference to the * net_device, if the number of matches >= 1 */ static int __ipoib_get_net_dev_by_params(struct list_head *dev_list, u8 port, u16 pkey_index, const union ib_gid *gid, const struct sockaddr *addr, struct net_device **net_dev) { struct ipoib_dev_priv *priv; int matches = 0; *net_dev = NULL; list_for_each_entry(priv, dev_list, list) { if (priv->port != port) continue; matches += ipoib_match_gid_pkey_addr(priv, gid, pkey_index, addr, net_dev); if (matches > 1) break; } return matches; } static struct net_device * ipoib_get_net_dev_by_params(struct ib_device *dev, u8 port, u16 pkey, const union ib_gid *gid, const struct sockaddr *addr, void *client_data) { struct net_device *net_dev; struct list_head *dev_list = client_data; u16 pkey_index; int matches; int ret; if (!rdma_protocol_ib(dev, port)) return NULL; ret = ib_find_cached_pkey(dev, port, pkey, &pkey_index); if (ret) return NULL; if (!dev_list) return NULL; /* See if we can find a unique device matching the L2 parameters */ matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index, gid, NULL, &net_dev); switch (matches) { case 0: return NULL; case 1: return net_dev; } dev_put(net_dev); /* Couldn't find a unique device with L2 parameters only. Use L3 * address to uniquely match the net device */ matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index, gid, addr, &net_dev); switch (matches) { case 0: return NULL; default: dev_warn_ratelimited(&dev->dev, "duplicate IP address detected\n"); /* Fall through */ case 1: return net_dev; } } static void ipoib_config_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag) { struct ipoib_dev_priv *parent; struct ipoib_dev_priv *priv; struct ifnet *dev; uint16_t pkey; int error; if (ifp->if_type != IFT_INFINIBAND) return; dev = VLAN_DEVAT(ifp, vtag); if (dev == NULL) return; priv = NULL; error = 0; parent = ifp->if_softc; /* We only support 15 bits of pkey. */ if (vtag & 0x8000) return; pkey = vtag | 0x8000; /* Set full membership bit. */ if (pkey == parent->pkey) return; /* Check for dups */ mutex_lock(&parent->vlan_mutex); list_for_each_entry(priv, &parent->child_intfs, list) { if (priv->pkey == pkey) { priv = NULL; error = EBUSY; goto out; } } priv = ipoib_priv_alloc(); priv->dev = dev; priv->max_ib_mtu = parent->max_ib_mtu; priv->mcast_mtu = priv->admin_mtu = parent->dev->if_mtu; set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags); error = ipoib_set_dev_features(priv, parent->ca); if (error) goto out; priv->pkey = pkey; priv->broadcastaddr[8] = pkey >> 8; priv->broadcastaddr[9] = pkey & 0xff; dev->if_broadcastaddr = priv->broadcastaddr; error = ipoib_dev_init(priv, parent->ca, parent->port); if (error) goto out; priv->parent = parent->dev; list_add_tail(&priv->list, &parent->child_intfs); VLAN_SETCOOKIE(dev, priv); dev->if_start = ipoib_vlan_start; dev->if_drv_flags &= ~IFF_DRV_RUNNING; dev->if_hdrlen = IPOIB_HEADER_LEN; if (ifp->if_drv_flags & IFF_DRV_RUNNING) ipoib_open(priv); mutex_unlock(&parent->vlan_mutex); return; out: mutex_unlock(&parent->vlan_mutex); if (priv) free(priv, M_TEMP); if (error) ipoib_warn(parent, "failed to initialize subinterface: device %s, port %d vtag 0x%X", parent->ca->name, parent->port, vtag); return; } static void ipoib_unconfig_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag) { struct ipoib_dev_priv *parent; struct ipoib_dev_priv *priv; struct ifnet *dev; uint16_t pkey; if (ifp->if_type != IFT_INFINIBAND) return; dev = VLAN_DEVAT(ifp, vtag); if (dev) VLAN_SETCOOKIE(dev, NULL); pkey = vtag | 0x8000; parent = ifp->if_softc; mutex_lock(&parent->vlan_mutex); list_for_each_entry(priv, &parent->child_intfs, list) { if (priv->pkey == pkey) { ipoib_dev_cleanup(priv); list_del(&priv->list); break; } } mutex_unlock(&parent->vlan_mutex); } eventhandler_tag ipoib_vlan_attach; eventhandler_tag ipoib_vlan_detach; static int __init ipoib_init_module(void) { int ret; ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size); ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE); ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE); ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size); ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE); ipoib_sendq_size = max(ipoib_sendq_size, max(2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE)); #ifdef CONFIG_INFINIBAND_IPOIB_CM ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP); #endif ipoib_vlan_attach = EVENTHANDLER_REGISTER(vlan_config, ipoib_config_vlan, NULL, EVENTHANDLER_PRI_FIRST); ipoib_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, ipoib_unconfig_vlan, NULL, EVENTHANDLER_PRI_FIRST); /* * We create our own workqueue mainly because we want to be * able to flush it when devices are being removed. We can't * use schedule_work()/flush_scheduled_work() because both * unregister_netdev() and linkwatch_event take the rtnl lock, * so flush_scheduled_work() can deadlock during device * removal. */ ipoib_workqueue = create_singlethread_workqueue("ipoib"); if (!ipoib_workqueue) { ret = -ENOMEM; goto err_fs; } ib_sa_register_client(&ipoib_sa_client); ret = ib_register_client(&ipoib_client); if (ret) goto err_sa; return 0; err_sa: ib_sa_unregister_client(&ipoib_sa_client); destroy_workqueue(ipoib_workqueue); err_fs: return ret; } static void __exit ipoib_cleanup_module(void) { EVENTHANDLER_DEREGISTER(vlan_config, ipoib_vlan_attach); EVENTHANDLER_DEREGISTER(vlan_unconfig, ipoib_vlan_detach); ib_unregister_client(&ipoib_client); ib_sa_unregister_client(&ipoib_sa_client); destroy_workqueue(ipoib_workqueue); } /* * Infiniband output routine. */ static int ipoib_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst, struct route *ro) { u_char edst[INFINIBAND_ALEN]; #if defined(INET) || defined(INET6) struct llentry *lle = NULL; #endif struct ipoib_header *eh; int error = 0, is_gw = 0; short type; if (ro != NULL) is_gw = (ro->ro_flags & RT_HAS_GW) != 0; #ifdef MAC error = mac_ifnet_check_transmit(ifp, m); if (error) goto bad; #endif M_PROFILE(m); if (ifp->if_flags & IFF_MONITOR) { error = ENETDOWN; goto bad; } if (!((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))) { error = ENETDOWN; goto bad; } switch (dst->sa_family) { #ifdef INET case AF_INET: if (lle != NULL && (lle->la_flags & LLE_VALID)) memcpy(edst, lle->ll_addr, sizeof(edst)); else if (m->m_flags & M_MCAST) ip_ib_mc_map(((struct sockaddr_in *)dst)->sin_addr.s_addr, ifp->if_broadcastaddr, edst); else error = arpresolve(ifp, is_gw, m, dst, edst, NULL, NULL); if (error) return (error == EWOULDBLOCK ? 0 : error); type = htons(ETHERTYPE_IP); break; case AF_ARP: { struct arphdr *ah; ah = mtod(m, struct arphdr *); ah->ar_hrd = htons(ARPHRD_INFINIBAND); switch(ntohs(ah->ar_op)) { case ARPOP_REVREQUEST: case ARPOP_REVREPLY: type = htons(ETHERTYPE_REVARP); break; case ARPOP_REQUEST: case ARPOP_REPLY: default: type = htons(ETHERTYPE_ARP); break; } if (m->m_flags & M_BCAST) bcopy(ifp->if_broadcastaddr, edst, INFINIBAND_ALEN); else bcopy(ar_tha(ah), edst, INFINIBAND_ALEN); } break; #endif #ifdef INET6 case AF_INET6: if (lle != NULL && (lle->la_flags & LLE_VALID)) memcpy(edst, lle->ll_addr, sizeof(edst)); else if (m->m_flags & M_MCAST) ipv6_ib_mc_map(&((struct sockaddr_in6 *)dst)->sin6_addr, ifp->if_broadcastaddr, edst); else error = nd6_resolve(ifp, is_gw, m, dst, edst, NULL, NULL); if (error) return error; type = htons(ETHERTYPE_IPV6); break; #endif default: if_printf(ifp, "can't handle af%d\n", dst->sa_family); error = EAFNOSUPPORT; goto bad; } /* * Add local net header. If no space in first mbuf, * allocate another. */ M_PREPEND(m, IPOIB_HEADER_LEN, M_NOWAIT); if (m == NULL) { error = ENOBUFS; goto bad; } eh = mtod(m, struct ipoib_header *); (void)memcpy(&eh->proto, &type, sizeof(eh->proto)); (void)memcpy(&eh->hwaddr, edst, sizeof (edst)); /* * Queue message on interface, update output statistics if * successful, and start output if interface not yet active. */ return ((ifp->if_transmit)(ifp, m)); bad: if (m != NULL) m_freem(m); return (error); } /* * Upper layer processing for a received Infiniband packet. */ void ipoib_demux(struct ifnet *ifp, struct mbuf *m, u_short proto) { int isr; #ifdef MAC /* * Tag the mbuf with an appropriate MAC label before any other * consumers can get to it. */ mac_ifnet_create_mbuf(ifp, m); #endif /* Allow monitor mode to claim this frame, after stats are updated. */ if (ifp->if_flags & IFF_MONITOR) { if_printf(ifp, "discard frame at IFF_MONITOR\n"); m_freem(m); return; } /* * Dispatch frame to upper layer. */ switch (proto) { #ifdef INET case ETHERTYPE_IP: isr = NETISR_IP; break; case ETHERTYPE_ARP: if (ifp->if_flags & IFF_NOARP) { /* Discard packet if ARP is disabled on interface */ m_freem(m); return; } isr = NETISR_ARP; break; #endif #ifdef INET6 case ETHERTYPE_IPV6: isr = NETISR_IPV6; break; #endif default: goto discard; } netisr_dispatch(isr, m); return; discard: m_freem(m); } /* * Process a received Infiniband packet. */ static void ipoib_input(struct ifnet *ifp, struct mbuf *m) { struct ipoib_header *eh; if ((ifp->if_flags & IFF_UP) == 0) { m_freem(m); return; } CURVNET_SET_QUIET(ifp->if_vnet); /* Let BPF have it before we strip the header. */ IPOIB_MTAP(ifp, m); eh = mtod(m, struct ipoib_header *); /* * Reset layer specific mbuf flags to avoid confusing upper layers. * Strip off Infiniband header. */ m->m_flags &= ~M_VLANTAG; m_clrprotoflags(m); m_adj(m, IPOIB_HEADER_LEN); if (IPOIB_IS_MULTICAST(eh->hwaddr)) { if (memcmp(eh->hwaddr, ifp->if_broadcastaddr, ifp->if_addrlen) == 0) m->m_flags |= M_BCAST; else m->m_flags |= M_MCAST; if_inc_counter(ifp, IFCOUNTER_IMCASTS, 1); } ipoib_demux(ifp, m, ntohs(eh->proto)); CURVNET_RESTORE(); } static int ipoib_resolvemulti(struct ifnet *ifp, struct sockaddr **llsa, struct sockaddr *sa) { struct sockaddr_dl *sdl; #ifdef INET struct sockaddr_in *sin; #endif #ifdef INET6 struct sockaddr_in6 *sin6; #endif u_char *e_addr; switch(sa->sa_family) { case AF_LINK: /* * No mapping needed. Just check that it's a valid MC address. */ sdl = (struct sockaddr_dl *)sa; e_addr = LLADDR(sdl); if (!IPOIB_IS_MULTICAST(e_addr)) return EADDRNOTAVAIL; *llsa = NULL; return 0; #ifdef INET case AF_INET: sin = (struct sockaddr_in *)sa; if (!IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) return EADDRNOTAVAIL; sdl = link_init_sdl(ifp, *llsa, IFT_INFINIBAND); sdl->sdl_alen = INFINIBAND_ALEN; e_addr = LLADDR(sdl); ip_ib_mc_map(sin->sin_addr.s_addr, ifp->if_broadcastaddr, e_addr); *llsa = (struct sockaddr *)sdl; return 0; #endif #ifdef INET6 case AF_INET6: sin6 = (struct sockaddr_in6 *)sa; /* * An IP6 address of 0 means listen to all * of the multicast address used for IP6. * This has no meaning in ipoib. */ if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) return EADDRNOTAVAIL; if (!IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) return EADDRNOTAVAIL; sdl = link_init_sdl(ifp, *llsa, IFT_INFINIBAND); sdl->sdl_alen = INFINIBAND_ALEN; e_addr = LLADDR(sdl); ipv6_ib_mc_map(&sin6->sin6_addr, ifp->if_broadcastaddr, e_addr); *llsa = (struct sockaddr *)sdl; return 0; #endif default: return EAFNOSUPPORT; } } module_init(ipoib_init_module); module_exit(ipoib_cleanup_module); static int ipoib_evhand(module_t mod, int event, void *arg) { return (0); } static moduledata_t ipoib_mod = { .name = "ipoib", .evhand = ipoib_evhand, }; DECLARE_MODULE(ipoib, ipoib_mod, SI_SUB_LAST, SI_ORDER_ANY); MODULE_DEPEND(ipoib, ibcore, 1, 1, 1); MODULE_DEPEND(ipoib, linuxkpi, 1, 1, 1); Index: user/ngie/bug-237403/sys/ofed/drivers/infiniband/ulp/sdp/sdp_main.c =================================================================== --- user/ngie/bug-237403/sys/ofed/drivers/infiniband/ulp/sdp/sdp_main.c (revision 348028) +++ user/ngie/bug-237403/sys/ofed/drivers/infiniband/ulp/sdp/sdp_main.c (revision 348029) @@ -1,1964 +1,1965 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 * The Regents of the University of California. All rights reserved. * Copyright (c) 2004 The FreeBSD Foundation. All rights reserved. * Copyright (c) 2004-2008 Robert N. M. Watson. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Excerpts taken from tcp_subr.c, tcp_usrreq.c, uipc_socket.c */ /* * * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include +#include #include #include #include "sdp.h" #include #include #include #include uma_zone_t sdp_zone; struct rwlock sdp_lock; LIST_HEAD(, sdp_sock) sdp_list; struct workqueue_struct *rx_comp_wq; RW_SYSINIT(sdplockinit, &sdp_lock, "SDP lock"); #define SDP_LIST_WLOCK() rw_wlock(&sdp_lock) #define SDP_LIST_RLOCK() rw_rlock(&sdp_lock) #define SDP_LIST_WUNLOCK() rw_wunlock(&sdp_lock) #define SDP_LIST_RUNLOCK() rw_runlock(&sdp_lock) #define SDP_LIST_WLOCK_ASSERT() rw_assert(&sdp_lock, RW_WLOCKED) #define SDP_LIST_RLOCK_ASSERT() rw_assert(&sdp_lock, RW_RLOCKED) #define SDP_LIST_LOCK_ASSERT() rw_assert(&sdp_lock, RW_LOCKED) MALLOC_DEFINE(M_SDP, "sdp", "Sockets Direct Protocol"); static void sdp_stop_keepalive_timer(struct socket *so); /* * SDP protocol interface to socket abstraction. */ /* * sdp_sendspace and sdp_recvspace are the default send and receive window * sizes, respectively. */ u_long sdp_sendspace = 1024*32; u_long sdp_recvspace = 1024*64; static int sdp_count; /* * Disable async. CMA events for sockets which are being torn down. */ static void sdp_destroy_cma(struct sdp_sock *ssk) { if (ssk->id == NULL) return; rdma_destroy_id(ssk->id); ssk->id = NULL; } static int sdp_pcbbind(struct sdp_sock *ssk, struct sockaddr *nam, struct ucred *cred) { struct sockaddr_in *sin; struct sockaddr_in null; int error; SDP_WLOCK_ASSERT(ssk); if (ssk->lport != 0 || ssk->laddr != INADDR_ANY) return (EINVAL); /* rdma_bind_addr handles bind races. */ SDP_WUNLOCK(ssk); if (ssk->id == NULL) ssk->id = rdma_create_id(&init_net, sdp_cma_handler, ssk, RDMA_PS_SDP, IB_QPT_RC); if (ssk->id == NULL) { SDP_WLOCK(ssk); return (ENOMEM); } if (nam == NULL) { null.sin_family = AF_INET; null.sin_len = sizeof(null); null.sin_addr.s_addr = INADDR_ANY; null.sin_port = 0; bzero(&null.sin_zero, sizeof(null.sin_zero)); nam = (struct sockaddr *)&null; } error = -rdma_bind_addr(ssk->id, nam); SDP_WLOCK(ssk); if (error == 0) { sin = (struct sockaddr_in *)&ssk->id->route.addr.src_addr; ssk->laddr = sin->sin_addr.s_addr; ssk->lport = sin->sin_port; } else sdp_destroy_cma(ssk); return (error); } static void sdp_pcbfree(struct sdp_sock *ssk) { KASSERT(ssk->socket == NULL, ("ssk %p socket still attached", ssk)); KASSERT((ssk->flags & SDP_DESTROY) == 0, ("ssk %p already destroyed", ssk)); sdp_dbg(ssk->socket, "Freeing pcb"); SDP_WLOCK_ASSERT(ssk); ssk->flags |= SDP_DESTROY; SDP_WUNLOCK(ssk); SDP_LIST_WLOCK(); sdp_count--; LIST_REMOVE(ssk, list); SDP_LIST_WUNLOCK(); crfree(ssk->cred); ssk->qp_active = 0; if (ssk->qp) { ib_destroy_qp(ssk->qp); ssk->qp = NULL; } sdp_tx_ring_destroy(ssk); sdp_rx_ring_destroy(ssk); sdp_destroy_cma(ssk); rw_destroy(&ssk->rx_ring.destroyed_lock); rw_destroy(&ssk->lock); uma_zfree(sdp_zone, ssk); } /* * Common routines to return a socket address. */ static struct sockaddr * sdp_sockaddr(in_port_t port, struct in_addr *addr_p) { struct sockaddr_in *sin; sin = malloc(sizeof *sin, M_SONAME, M_WAITOK | M_ZERO); sin->sin_family = AF_INET; sin->sin_len = sizeof(*sin); sin->sin_addr = *addr_p; sin->sin_port = port; return (struct sockaddr *)sin; } static int sdp_getsockaddr(struct socket *so, struct sockaddr **nam) { struct sdp_sock *ssk; struct in_addr addr; in_port_t port; ssk = sdp_sk(so); SDP_RLOCK(ssk); port = ssk->lport; addr.s_addr = ssk->laddr; SDP_RUNLOCK(ssk); *nam = sdp_sockaddr(port, &addr); return 0; } static int sdp_getpeeraddr(struct socket *so, struct sockaddr **nam) { struct sdp_sock *ssk; struct in_addr addr; in_port_t port; ssk = sdp_sk(so); SDP_RLOCK(ssk); port = ssk->fport; addr.s_addr = ssk->faddr; SDP_RUNLOCK(ssk); *nam = sdp_sockaddr(port, &addr); return 0; } static void sdp_pcbnotifyall(struct in_addr faddr, int errno, struct sdp_sock *(*notify)(struct sdp_sock *, int)) { struct sdp_sock *ssk, *ssk_temp; SDP_LIST_WLOCK(); LIST_FOREACH_SAFE(ssk, &sdp_list, list, ssk_temp) { SDP_WLOCK(ssk); if (ssk->faddr != faddr.s_addr || ssk->socket == NULL) { SDP_WUNLOCK(ssk); continue; } if ((ssk->flags & SDP_DESTROY) == 0) if ((*notify)(ssk, errno)) SDP_WUNLOCK(ssk); } SDP_LIST_WUNLOCK(); } #if 0 static void sdp_apply_all(void (*func)(struct sdp_sock *, void *), void *arg) { struct sdp_sock *ssk; SDP_LIST_RLOCK(); LIST_FOREACH(ssk, &sdp_list, list) { SDP_WLOCK(ssk); func(ssk, arg); SDP_WUNLOCK(ssk); } SDP_LIST_RUNLOCK(); } #endif static void sdp_output_reset(struct sdp_sock *ssk) { struct rdma_cm_id *id; SDP_WLOCK_ASSERT(ssk); if (ssk->id) { id = ssk->id; ssk->qp_active = 0; SDP_WUNLOCK(ssk); rdma_disconnect(id); SDP_WLOCK(ssk); } ssk->state = TCPS_CLOSED; } /* * Attempt to close a SDP socket, marking it as dropped, and freeing * the socket if we hold the only reference. */ static struct sdp_sock * sdp_closed(struct sdp_sock *ssk) { struct socket *so; SDP_WLOCK_ASSERT(ssk); ssk->flags |= SDP_DROPPED; so = ssk->socket; soisdisconnected(so); if (ssk->flags & SDP_SOCKREF) { KASSERT(so->so_state & SS_PROTOREF, ("sdp_closed: !SS_PROTOREF")); ssk->flags &= ~SDP_SOCKREF; SDP_WUNLOCK(ssk); SOCK_LOCK(so); so->so_state &= ~SS_PROTOREF; sofree(so); return (NULL); } return (ssk); } /* * Perform timer based shutdowns which can not operate in * callout context. */ static void sdp_shutdown_task(void *data, int pending) { struct sdp_sock *ssk; ssk = data; SDP_WLOCK(ssk); /* * I don't think this can race with another call to pcbfree() * because SDP_TIMEWAIT protects it. SDP_DESTROY may be redundant. */ if (ssk->flags & SDP_DESTROY) panic("sdp_shutdown_task: Racing with pcbfree for ssk %p", ssk); if (ssk->flags & SDP_DISCON) sdp_output_reset(ssk); /* We have to clear this so sdp_detach() will call pcbfree(). */ ssk->flags &= ~(SDP_TIMEWAIT | SDP_DREQWAIT); if ((ssk->flags & SDP_DROPPED) == 0 && sdp_closed(ssk) == NULL) return; if (ssk->socket == NULL) { sdp_pcbfree(ssk); return; } SDP_WUNLOCK(ssk); } /* * 2msl has expired, schedule the shutdown task. */ static void sdp_2msl_timeout(void *data) { struct sdp_sock *ssk; ssk = data; /* Callout canceled. */ if (!callout_active(&ssk->keep2msl)) goto out; callout_deactivate(&ssk->keep2msl); /* Should be impossible, defensive programming. */ if ((ssk->flags & SDP_TIMEWAIT) == 0) goto out; taskqueue_enqueue(taskqueue_thread, &ssk->shutdown_task); out: SDP_WUNLOCK(ssk); return; } /* * Schedule the 2msl wait timer. */ static void sdp_2msl_wait(struct sdp_sock *ssk) { SDP_WLOCK_ASSERT(ssk); ssk->flags |= SDP_TIMEWAIT; ssk->state = TCPS_TIME_WAIT; soisdisconnected(ssk->socket); callout_reset(&ssk->keep2msl, TCPTV_MSL, sdp_2msl_timeout, ssk); } /* * Timed out waiting for the final fin/ack from rdma_disconnect(). */ static void sdp_dreq_timeout(void *data) { struct sdp_sock *ssk; ssk = data; /* Callout canceled. */ if (!callout_active(&ssk->keep2msl)) goto out; /* Callout rescheduled, probably as a different timer. */ if (callout_pending(&ssk->keep2msl)) goto out; callout_deactivate(&ssk->keep2msl); if (ssk->state != TCPS_FIN_WAIT_1 && ssk->state != TCPS_LAST_ACK) goto out; if ((ssk->flags & SDP_DREQWAIT) == 0) goto out; ssk->flags &= ~SDP_DREQWAIT; ssk->flags |= SDP_DISCON; sdp_2msl_wait(ssk); ssk->qp_active = 0; out: SDP_WUNLOCK(ssk); } /* * Received the final fin/ack. Cancel the 2msl. */ void sdp_cancel_dreq_wait_timeout(struct sdp_sock *ssk) { sdp_dbg(ssk->socket, "cancelling dreq wait timeout\n"); ssk->flags &= ~SDP_DREQWAIT; sdp_2msl_wait(ssk); } static int sdp_init_sock(struct socket *sk) { struct sdp_sock *ssk = sdp_sk(sk); sdp_dbg(sk, "%s\n", __func__); callout_init_rw(&ssk->keep2msl, &ssk->lock, CALLOUT_RETURNUNLOCKED); TASK_INIT(&ssk->shutdown_task, 0, sdp_shutdown_task, ssk); #ifdef SDP_ZCOPY INIT_DELAYED_WORK(&ssk->srcavail_cancel_work, srcavail_cancel_timeout); ssk->zcopy_thresh = -1; /* use global sdp_zcopy_thresh */ ssk->tx_ring.rdma_inflight = NULL; #endif atomic_set(&ssk->mseq_ack, 0); sdp_rx_ring_init(ssk); ssk->tx_ring.buffer = NULL; return 0; } /* * Allocate an sdp_sock for the socket and reserve socket buffer space. */ static int sdp_attach(struct socket *so, int proto, struct thread *td) { struct sdp_sock *ssk; int error; ssk = sdp_sk(so); KASSERT(ssk == NULL, ("sdp_attach: ssk already set on so %p", so)); if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) { error = soreserve(so, sdp_sendspace, sdp_recvspace); if (error) return (error); } so->so_rcv.sb_flags |= SB_AUTOSIZE; so->so_snd.sb_flags |= SB_AUTOSIZE; ssk = uma_zalloc(sdp_zone, M_NOWAIT | M_ZERO); if (ssk == NULL) return (ENOBUFS); rw_init(&ssk->lock, "sdpsock"); ssk->socket = so; ssk->cred = crhold(so->so_cred); so->so_pcb = (caddr_t)ssk; sdp_init_sock(so); ssk->flags = 0; ssk->qp_active = 0; ssk->state = TCPS_CLOSED; mbufq_init(&ssk->rxctlq, INT_MAX); SDP_LIST_WLOCK(); LIST_INSERT_HEAD(&sdp_list, ssk, list); sdp_count++; SDP_LIST_WUNLOCK(); if ((so->so_options & SO_LINGER) && so->so_linger == 0) so->so_linger = TCP_LINGERTIME; return (0); } /* * Detach SDP from the socket, potentially leaving it around for the * timewait to expire. */ static void sdp_detach(struct socket *so) { struct sdp_sock *ssk; ssk = sdp_sk(so); SDP_WLOCK(ssk); KASSERT(ssk->socket != NULL, ("sdp_detach: socket is NULL")); ssk->socket->so_pcb = NULL; ssk->socket = NULL; if (ssk->flags & (SDP_TIMEWAIT | SDP_DREQWAIT)) SDP_WUNLOCK(ssk); else if (ssk->flags & SDP_DROPPED || ssk->state < TCPS_SYN_SENT) sdp_pcbfree(ssk); else panic("sdp_detach: Unexpected state, ssk %p.\n", ssk); } /* * Allocate a local address for the socket. */ static int sdp_bind(struct socket *so, struct sockaddr *nam, struct thread *td) { int error = 0; struct sdp_sock *ssk; struct sockaddr_in *sin; sin = (struct sockaddr_in *)nam; if (nam->sa_len != sizeof (*sin)) return (EINVAL); if (sin->sin_family != AF_INET) return (EINVAL); if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) return (EAFNOSUPPORT); ssk = sdp_sk(so); SDP_WLOCK(ssk); if (ssk->flags & (SDP_TIMEWAIT | SDP_DROPPED)) { error = EINVAL; goto out; } error = sdp_pcbbind(ssk, nam, td->td_ucred); out: SDP_WUNLOCK(ssk); return (error); } /* * Prepare to accept connections. */ static int sdp_listen(struct socket *so, int backlog, struct thread *td) { int error = 0; struct sdp_sock *ssk; ssk = sdp_sk(so); SDP_WLOCK(ssk); if (ssk->flags & (SDP_TIMEWAIT | SDP_DROPPED)) { error = EINVAL; goto out; } if (error == 0 && ssk->lport == 0) error = sdp_pcbbind(ssk, (struct sockaddr *)0, td->td_ucred); SOCK_LOCK(so); if (error == 0) error = solisten_proto_check(so); if (error == 0) { solisten_proto(so, backlog); ssk->state = TCPS_LISTEN; } SOCK_UNLOCK(so); out: SDP_WUNLOCK(ssk); if (error == 0) error = -rdma_listen(ssk->id, backlog); return (error); } /* * Initiate a SDP connection to nam. */ static int sdp_start_connect(struct sdp_sock *ssk, struct sockaddr *nam, struct thread *td) { struct sockaddr_in src; struct socket *so; int error; so = ssk->socket; SDP_WLOCK_ASSERT(ssk); if (ssk->lport == 0) { error = sdp_pcbbind(ssk, (struct sockaddr *)0, td->td_ucred); if (error) return error; } src.sin_family = AF_INET; src.sin_len = sizeof(src); bzero(&src.sin_zero, sizeof(src.sin_zero)); src.sin_port = ssk->lport; src.sin_addr.s_addr = ssk->laddr; soisconnecting(so); SDP_WUNLOCK(ssk); error = -rdma_resolve_addr(ssk->id, (struct sockaddr *)&src, nam, SDP_RESOLVE_TIMEOUT); SDP_WLOCK(ssk); if (error == 0) ssk->state = TCPS_SYN_SENT; return 0; } /* * Initiate SDP connection. */ static int sdp_connect(struct socket *so, struct sockaddr *nam, struct thread *td) { int error = 0; struct sdp_sock *ssk; struct sockaddr_in *sin; sin = (struct sockaddr_in *)nam; if (nam->sa_len != sizeof (*sin)) return (EINVAL); if (sin->sin_family != AF_INET) return (EINVAL); if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) return (EAFNOSUPPORT); if ((error = prison_remote_ip4(td->td_ucred, &sin->sin_addr)) != 0) return (error); ssk = sdp_sk(so); SDP_WLOCK(ssk); if (ssk->flags & (SDP_TIMEWAIT | SDP_DROPPED)) error = EINVAL; else error = sdp_start_connect(ssk, nam, td); SDP_WUNLOCK(ssk); return (error); } /* * Drop a SDP socket, reporting * the specified error. If connection is synchronized, * then send a RST to peer. */ static struct sdp_sock * sdp_drop(struct sdp_sock *ssk, int errno) { struct socket *so; SDP_WLOCK_ASSERT(ssk); so = ssk->socket; if (TCPS_HAVERCVDSYN(ssk->state)) sdp_output_reset(ssk); if (errno == ETIMEDOUT && ssk->softerror) errno = ssk->softerror; so->so_error = errno; return (sdp_closed(ssk)); } /* * User issued close, and wish to trail through shutdown states: * if never received SYN, just forget it. If got a SYN from peer, * but haven't sent FIN, then go to FIN_WAIT_1 state to send peer a FIN. * If already got a FIN from peer, then almost done; go to LAST_ACK * state. In all other cases, have already sent FIN to peer (e.g. * after PRU_SHUTDOWN), and just have to play tedious game waiting * for peer to send FIN or not respond to keep-alives, etc. * We can let the user exit from the close as soon as the FIN is acked. */ static void sdp_usrclosed(struct sdp_sock *ssk) { SDP_WLOCK_ASSERT(ssk); switch (ssk->state) { case TCPS_LISTEN: ssk->state = TCPS_CLOSED; SDP_WUNLOCK(ssk); sdp_destroy_cma(ssk); SDP_WLOCK(ssk); /* FALLTHROUGH */ case TCPS_CLOSED: ssk = sdp_closed(ssk); /* * sdp_closed() should never return NULL here as the socket is * still open. */ KASSERT(ssk != NULL, ("sdp_usrclosed: sdp_closed() returned NULL")); break; case TCPS_SYN_SENT: /* FALLTHROUGH */ case TCPS_SYN_RECEIVED: ssk->flags |= SDP_NEEDFIN; break; case TCPS_ESTABLISHED: ssk->flags |= SDP_NEEDFIN; ssk->state = TCPS_FIN_WAIT_1; break; case TCPS_CLOSE_WAIT: ssk->state = TCPS_LAST_ACK; break; } if (ssk->state >= TCPS_FIN_WAIT_2) { /* Prevent the connection hanging in FIN_WAIT_2 forever. */ if (ssk->state == TCPS_FIN_WAIT_2) sdp_2msl_wait(ssk); else soisdisconnected(ssk->socket); } } static void sdp_output_disconnect(struct sdp_sock *ssk) { SDP_WLOCK_ASSERT(ssk); callout_reset(&ssk->keep2msl, SDP_FIN_WAIT_TIMEOUT, sdp_dreq_timeout, ssk); ssk->flags |= SDP_NEEDFIN | SDP_DREQWAIT; sdp_post_sends(ssk, M_NOWAIT); } /* * Initiate or continue a disconnect. * If embryonic state, just send reset (once). * If in ``let data drain'' option and linger null, just drop. * Otherwise (hard), mark socket disconnecting and drop * current input data; switch states based on user close, and * send segment to peer (with FIN). */ static void sdp_start_disconnect(struct sdp_sock *ssk) { struct socket *so; int unread; so = ssk->socket; SDP_WLOCK_ASSERT(ssk); sdp_stop_keepalive_timer(so); /* * Neither sdp_closed() nor sdp_drop() should return NULL, as the * socket is still open. */ if (ssk->state < TCPS_ESTABLISHED) { ssk = sdp_closed(ssk); KASSERT(ssk != NULL, ("sdp_start_disconnect: sdp_close() returned NULL")); } else if ((so->so_options & SO_LINGER) && so->so_linger == 0) { ssk = sdp_drop(ssk, 0); KASSERT(ssk != NULL, ("sdp_start_disconnect: sdp_drop() returned NULL")); } else { soisdisconnecting(so); unread = sbused(&so->so_rcv); sbflush(&so->so_rcv); sdp_usrclosed(ssk); if (!(ssk->flags & SDP_DROPPED)) { if (unread) sdp_output_reset(ssk); else sdp_output_disconnect(ssk); } } } /* * User initiated disconnect. */ static int sdp_disconnect(struct socket *so) { struct sdp_sock *ssk; int error = 0; ssk = sdp_sk(so); SDP_WLOCK(ssk); if (ssk->flags & (SDP_TIMEWAIT | SDP_DROPPED)) { error = ECONNRESET; goto out; } sdp_start_disconnect(ssk); out: SDP_WUNLOCK(ssk); return (error); } /* * Accept a connection. Essentially all the work is done at higher levels; * just return the address of the peer, storing through addr. * * * XXX This is broken XXX * * The rationale for acquiring the sdp lock here is somewhat complicated, * and is described in detail in the commit log entry for r175612. Acquiring * it delays an accept(2) racing with sonewconn(), which inserts the socket * before the address/port fields are initialized. A better fix would * prevent the socket from being placed in the listen queue until all fields * are fully initialized. */ static int sdp_accept(struct socket *so, struct sockaddr **nam) { struct sdp_sock *ssk = NULL; struct in_addr addr; in_port_t port; int error; if (so->so_state & SS_ISDISCONNECTED) return (ECONNABORTED); port = 0; addr.s_addr = 0; error = 0; ssk = sdp_sk(so); SDP_WLOCK(ssk); if (ssk->flags & (SDP_TIMEWAIT | SDP_DROPPED)) { error = ECONNABORTED; goto out; } port = ssk->fport; addr.s_addr = ssk->faddr; out: SDP_WUNLOCK(ssk); if (error == 0) *nam = sdp_sockaddr(port, &addr); return error; } /* * Mark the connection as being incapable of further output. */ static int sdp_shutdown(struct socket *so) { int error = 0; struct sdp_sock *ssk; ssk = sdp_sk(so); SDP_WLOCK(ssk); if (ssk->flags & (SDP_TIMEWAIT | SDP_DROPPED)) { error = ECONNRESET; goto out; } socantsendmore(so); sdp_usrclosed(ssk); if (!(ssk->flags & SDP_DROPPED)) sdp_output_disconnect(ssk); out: SDP_WUNLOCK(ssk); return (error); } static void sdp_append(struct sdp_sock *ssk, struct sockbuf *sb, struct mbuf *mb, int cnt) { struct mbuf *n; int ncnt; SOCKBUF_LOCK_ASSERT(sb); SBLASTRECORDCHK(sb); KASSERT(mb->m_flags & M_PKTHDR, ("sdp_append: %p Missing packet header.\n", mb)); n = sb->sb_lastrecord; /* * If the queue is empty just set all pointers and proceed. */ if (n == NULL) { sb->sb_lastrecord = sb->sb_mb = sb->sb_sndptr = mb; for (; mb; mb = mb->m_next) { sb->sb_mbtail = mb; sballoc(sb, mb); } return; } /* * Count the number of mbufs in the current tail. */ for (ncnt = 0; n->m_next; n = n->m_next) ncnt++; n = sb->sb_lastrecord; /* * If the two chains can fit in a single sdp packet and * the last record has not been sent yet (WRITABLE) coalesce * them. The lastrecord remains the same but we must strip the * packet header and then let sbcompress do the hard part. */ if (M_WRITABLE(n) && ncnt + cnt < SDP_MAX_SEND_SGES && n->m_pkthdr.len + mb->m_pkthdr.len - SDP_HEAD_SIZE < ssk->xmit_size_goal) { m_adj(mb, SDP_HEAD_SIZE); n->m_pkthdr.len += mb->m_pkthdr.len; n->m_flags |= mb->m_flags & (M_PUSH | M_URG); m_demote(mb, 1, 0); sbcompress(sb, mb, sb->sb_mbtail); return; } /* * Not compressible, just append to the end and adjust counters. */ sb->sb_lastrecord->m_flags |= M_PUSH; sb->sb_lastrecord->m_nextpkt = mb; sb->sb_lastrecord = mb; if (sb->sb_sndptr == NULL) sb->sb_sndptr = mb; for (; mb; mb = mb->m_next) { sb->sb_mbtail = mb; sballoc(sb, mb); } } /* * Do a send by putting data in output queue and updating urgent * marker if URG set. Possibly send more data. Unlike the other * pru_*() routines, the mbuf chains are our responsibility. We * must either enqueue them or free them. The other pru_* routines * generally are caller-frees. * * This comes from sendfile, normal sends will come from sdp_sosend(). */ static int sdp_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, struct mbuf *control, struct thread *td) { struct sdp_sock *ssk; struct mbuf *n; int error; int cnt; error = 0; ssk = sdp_sk(so); KASSERT(m->m_flags & M_PKTHDR, ("sdp_send: %p no packet header", m)); M_PREPEND(m, SDP_HEAD_SIZE, M_WAITOK); mtod(m, struct sdp_bsdh *)->mid = SDP_MID_DATA; for (n = m, cnt = 0; n->m_next; n = n->m_next) cnt++; if (cnt > SDP_MAX_SEND_SGES) { n = m_collapse(m, M_WAITOK, SDP_MAX_SEND_SGES); if (n == NULL) { m_freem(m); return (EMSGSIZE); } m = n; for (cnt = 0; n->m_next; n = n->m_next) cnt++; } SDP_WLOCK(ssk); if (ssk->flags & (SDP_TIMEWAIT | SDP_DROPPED)) { if (control) m_freem(control); if (m) m_freem(m); error = ECONNRESET; goto out; } if (control) { /* SDP doesn't support control messages. */ if (control->m_len) { m_freem(control); if (m) m_freem(m); error = EINVAL; goto out; } m_freem(control); /* empty control, just free it */ } if (!(flags & PRUS_OOB)) { SOCKBUF_LOCK(&so->so_snd); sdp_append(ssk, &so->so_snd, m, cnt); SOCKBUF_UNLOCK(&so->so_snd); if (nam && ssk->state < TCPS_SYN_SENT) { /* * Do implied connect if not yet connected. */ error = sdp_start_connect(ssk, nam, td); if (error) goto out; } if (flags & PRUS_EOF) { /* * Close the send side of the connection after * the data is sent. */ socantsendmore(so); sdp_usrclosed(ssk); if (!(ssk->flags & SDP_DROPPED)) sdp_output_disconnect(ssk); } else if (!(ssk->flags & SDP_DROPPED) && !(flags & PRUS_MORETOCOME)) sdp_post_sends(ssk, M_NOWAIT); SDP_WUNLOCK(ssk); return (0); } else { SOCKBUF_LOCK(&so->so_snd); if (sbspace(&so->so_snd) < -512) { SOCKBUF_UNLOCK(&so->so_snd); m_freem(m); error = ENOBUFS; goto out; } /* * According to RFC961 (Assigned Protocols), * the urgent pointer points to the last octet * of urgent data. We continue, however, * to consider it to indicate the first octet * of data past the urgent section. * Otherwise, snd_up should be one lower. */ m->m_flags |= M_URG | M_PUSH; sdp_append(ssk, &so->so_snd, m, cnt); SOCKBUF_UNLOCK(&so->so_snd); if (nam && ssk->state < TCPS_SYN_SENT) { /* * Do implied connect if not yet connected. */ error = sdp_start_connect(ssk, nam, td); if (error) goto out; } sdp_post_sends(ssk, M_NOWAIT); SDP_WUNLOCK(ssk); return (0); } out: SDP_WUNLOCK(ssk); return (error); } #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? 0 : SBL_WAIT) /* * Send on a socket. If send must go all at once and message is larger than * send buffering, then hard error. Lock against other senders. If must go * all at once and not enough room now, then inform user that this would * block and do nothing. Otherwise, if nonblocking, send as much as * possible. The data to be sent is described by "uio" if nonzero, otherwise * by the mbuf chain "top" (which must be null if uio is not). Data provided * in mbuf chain must be small enough to send all at once. * * Returns nonzero on error, timeout or signal; callers must check for short * counts if EINTR/ERESTART are returned. Data and control buffers are freed * on return. */ static int sdp_sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, struct mbuf *top, struct mbuf *control, int flags, struct thread *td) { struct sdp_sock *ssk; long space, resid; int atomic; int error; int copy; if (uio != NULL) resid = uio->uio_resid; else resid = top->m_pkthdr.len; atomic = top != NULL; if (control != NULL) { if (control->m_len) { m_freem(control); if (top) m_freem(top); return (EINVAL); } m_freem(control); control = NULL; } /* * In theory resid should be unsigned. However, space must be * signed, as it might be less than 0 if we over-committed, and we * must use a signed comparison of space and resid. On the other * hand, a negative resid causes us to loop sending 0-length * segments to the protocol. * * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM * type sockets since that's an error. */ if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) { error = EINVAL; goto out; } if (td != NULL) td->td_ru.ru_msgsnd++; ssk = sdp_sk(so); error = sblock(&so->so_snd, SBLOCKWAIT(flags)); if (error) goto out; restart: do { SOCKBUF_LOCK(&so->so_snd); if (so->so_snd.sb_state & SBS_CANTSENDMORE) { SOCKBUF_UNLOCK(&so->so_snd); error = EPIPE; goto release; } if (so->so_error) { error = so->so_error; so->so_error = 0; SOCKBUF_UNLOCK(&so->so_snd); goto release; } if ((so->so_state & SS_ISCONNECTED) == 0 && addr == NULL) { SOCKBUF_UNLOCK(&so->so_snd); error = ENOTCONN; goto release; } space = sbspace(&so->so_snd); if (flags & MSG_OOB) space += 1024; if (atomic && resid > ssk->xmit_size_goal - SDP_HEAD_SIZE) { SOCKBUF_UNLOCK(&so->so_snd); error = EMSGSIZE; goto release; } if (space < resid && (atomic || space < so->so_snd.sb_lowat)) { if ((so->so_state & SS_NBIO) || (flags & (MSG_NBIO | MSG_DONTWAIT)) != 0) { SOCKBUF_UNLOCK(&so->so_snd); error = EWOULDBLOCK; goto release; } error = sbwait(&so->so_snd); SOCKBUF_UNLOCK(&so->so_snd); if (error) goto release; goto restart; } SOCKBUF_UNLOCK(&so->so_snd); do { if (uio == NULL) { resid = 0; if (flags & MSG_EOR) top->m_flags |= M_EOR; } else { /* * Copy the data from userland into a mbuf * chain. If no data is to be copied in, * a single empty mbuf is returned. */ copy = min(space, ssk->xmit_size_goal - SDP_HEAD_SIZE); top = m_uiotombuf(uio, M_WAITOK, copy, 0, M_PKTHDR | ((flags & MSG_EOR) ? M_EOR : 0)); if (top == NULL) { /* only possible error */ error = EFAULT; goto release; } space -= resid - uio->uio_resid; resid = uio->uio_resid; } /* * XXX all the SBS_CANTSENDMORE checks previously * done could be out of date after dropping the * socket lock. */ error = sdp_send(so, (flags & MSG_OOB) ? PRUS_OOB : /* * Set EOF on the last send if the user specified * MSG_EOF. */ ((flags & MSG_EOF) && (resid <= 0)) ? PRUS_EOF : /* If there is more to send set PRUS_MORETOCOME. */ (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0, top, addr, NULL, td); top = NULL; if (error) goto release; } while (resid && space > 0); } while (resid); release: sbunlock(&so->so_snd); out: if (top != NULL) m_freem(top); return (error); } /* * The part of soreceive() that implements reading non-inline out-of-band * data from a socket. For more complete comments, see soreceive(), from * which this code originated. * * Note that soreceive_rcvoob(), unlike the remainder of soreceive(), is * unable to return an mbuf chain to the caller. */ static int soreceive_rcvoob(struct socket *so, struct uio *uio, int flags) { struct protosw *pr = so->so_proto; struct mbuf *m; int error; KASSERT(flags & MSG_OOB, ("soreceive_rcvoob: (flags & MSG_OOB) == 0")); m = m_get(M_WAITOK, MT_DATA); error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK); if (error) goto bad; do { error = uiomove(mtod(m, void *), (int) min(uio->uio_resid, m->m_len), uio); m = m_free(m); } while (uio->uio_resid && error == 0 && m); bad: if (m != NULL) m_freem(m); return (error); } /* * Optimized version of soreceive() for stream (TCP) sockets. */ static int sdp_sorecv(struct socket *so, struct sockaddr **psa, struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp) { int len = 0, error = 0, flags, oresid; struct sockbuf *sb; struct mbuf *m, *n = NULL; struct sdp_sock *ssk; /* We only do stream sockets. */ if (so->so_type != SOCK_STREAM) return (EINVAL); if (psa != NULL) *psa = NULL; if (controlp != NULL) return (EINVAL); if (flagsp != NULL) flags = *flagsp &~ MSG_EOR; else flags = 0; if (flags & MSG_OOB) return (soreceive_rcvoob(so, uio, flags)); if (mp0 != NULL) *mp0 = NULL; sb = &so->so_rcv; ssk = sdp_sk(so); /* Prevent other readers from entering the socket. */ error = sblock(sb, SBLOCKWAIT(flags)); if (error) goto out; SOCKBUF_LOCK(sb); /* Easy one, no space to copyout anything. */ if (uio->uio_resid == 0) { error = EINVAL; goto out; } oresid = uio->uio_resid; /* We will never ever get anything unless we are connected. */ if (!(so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED))) { /* When disconnecting there may be still some data left. */ if (sbavail(sb)) goto deliver; if (!(so->so_state & SS_ISDISCONNECTED)) error = ENOTCONN; goto out; } /* Socket buffer is empty and we shall not block. */ if (sbavail(sb) == 0 && ((so->so_state & SS_NBIO) || (flags & (MSG_DONTWAIT|MSG_NBIO)))) { error = EAGAIN; goto out; } restart: SOCKBUF_LOCK_ASSERT(&so->so_rcv); /* Abort if socket has reported problems. */ if (so->so_error) { if (sbavail(sb)) goto deliver; if (oresid > uio->uio_resid) goto out; error = so->so_error; if (!(flags & MSG_PEEK)) so->so_error = 0; goto out; } /* Door is closed. Deliver what is left, if any. */ if (sb->sb_state & SBS_CANTRCVMORE) { if (sbavail(sb)) goto deliver; else goto out; } /* Socket buffer got some data that we shall deliver now. */ if (sbavail(sb) && !(flags & MSG_WAITALL) && ((so->so_state & SS_NBIO) || (flags & (MSG_DONTWAIT|MSG_NBIO)) || sbavail(sb) >= sb->sb_lowat || sbavail(sb) >= uio->uio_resid || sbavail(sb) >= sb->sb_hiwat) ) { goto deliver; } /* On MSG_WAITALL we must wait until all data or error arrives. */ if ((flags & MSG_WAITALL) && (sbavail(sb) >= uio->uio_resid || sbavail(sb) >= sb->sb_lowat)) goto deliver; /* * Wait and block until (more) data comes in. * NB: Drops the sockbuf lock during wait. */ error = sbwait(sb); if (error) goto out; goto restart; deliver: SOCKBUF_LOCK_ASSERT(&so->so_rcv); KASSERT(sbavail(sb), ("%s: sockbuf empty", __func__)); KASSERT(sb->sb_mb != NULL, ("%s: sb_mb == NULL", __func__)); /* Statistics. */ if (uio->uio_td) uio->uio_td->td_ru.ru_msgrcv++; /* Fill uio until full or current end of socket buffer is reached. */ len = min(uio->uio_resid, sbavail(sb)); if (mp0 != NULL) { /* Dequeue as many mbufs as possible. */ if (!(flags & MSG_PEEK) && len >= sb->sb_mb->m_len) { for (*mp0 = m = sb->sb_mb; m != NULL && m->m_len <= len; m = m->m_next) { len -= m->m_len; uio->uio_resid -= m->m_len; sbfree(sb, m); n = m; } sb->sb_mb = m; if (sb->sb_mb == NULL) SB_EMPTY_FIXUP(sb); n->m_next = NULL; } /* Copy the remainder. */ if (len > 0) { KASSERT(sb->sb_mb != NULL, ("%s: len > 0 && sb->sb_mb empty", __func__)); m = m_copym(sb->sb_mb, 0, len, M_NOWAIT); if (m == NULL) len = 0; /* Don't flush data from sockbuf. */ else uio->uio_resid -= m->m_len; if (*mp0 != NULL) n->m_next = m; else *mp0 = m; if (*mp0 == NULL) { error = ENOBUFS; goto out; } } } else { /* NB: Must unlock socket buffer as uiomove may sleep. */ SOCKBUF_UNLOCK(sb); error = m_mbuftouio(uio, sb->sb_mb, len); SOCKBUF_LOCK(sb); if (error) goto out; } SBLASTRECORDCHK(sb); SBLASTMBUFCHK(sb); /* * Remove the delivered data from the socket buffer unless we * were only peeking. */ if (!(flags & MSG_PEEK)) { if (len > 0) sbdrop_locked(sb, len); /* Notify protocol that we drained some data. */ SOCKBUF_UNLOCK(sb); SDP_WLOCK(ssk); sdp_do_posts(ssk); SDP_WUNLOCK(ssk); SOCKBUF_LOCK(sb); } /* * For MSG_WAITALL we may have to loop again and wait for * more data to come in. */ if ((flags & MSG_WAITALL) && uio->uio_resid > 0) goto restart; out: SOCKBUF_LOCK_ASSERT(sb); SBLASTRECORDCHK(sb); SBLASTMBUFCHK(sb); SOCKBUF_UNLOCK(sb); sbunlock(sb); return (error); } /* * Abort is used to teardown a connection typically while sitting in * the accept queue. */ void sdp_abort(struct socket *so) { struct sdp_sock *ssk; ssk = sdp_sk(so); SDP_WLOCK(ssk); /* * If we have not yet dropped, do it now. */ if (!(ssk->flags & SDP_TIMEWAIT) && !(ssk->flags & SDP_DROPPED)) sdp_drop(ssk, ECONNABORTED); KASSERT(ssk->flags & SDP_DROPPED, ("sdp_abort: %p not dropped 0x%X", ssk, ssk->flags)); SDP_WUNLOCK(ssk); } /* * Close a SDP socket and initiate a friendly disconnect. */ static void sdp_close(struct socket *so) { struct sdp_sock *ssk; ssk = sdp_sk(so); SDP_WLOCK(ssk); /* * If we have not yet dropped, do it now. */ if (!(ssk->flags & SDP_TIMEWAIT) && !(ssk->flags & SDP_DROPPED)) sdp_start_disconnect(ssk); /* * If we've still not dropped let the socket layer know we're * holding on to the socket and pcb for a while. */ if (!(ssk->flags & SDP_DROPPED)) { SOCK_LOCK(so); so->so_state |= SS_PROTOREF; SOCK_UNLOCK(so); ssk->flags |= SDP_SOCKREF; } SDP_WUNLOCK(ssk); } /* * User requests out-of-band data. */ static int sdp_rcvoob(struct socket *so, struct mbuf *m, int flags) { int error = 0; struct sdp_sock *ssk; ssk = sdp_sk(so); SDP_WLOCK(ssk); if (!rx_ring_trylock(&ssk->rx_ring)) { SDP_WUNLOCK(ssk); return (ECONNRESET); } if (ssk->flags & (SDP_TIMEWAIT | SDP_DROPPED)) { error = ECONNRESET; goto out; } if ((so->so_oobmark == 0 && (so->so_rcv.sb_state & SBS_RCVATMARK) == 0) || so->so_options & SO_OOBINLINE || ssk->oobflags & SDP_HADOOB) { error = EINVAL; goto out; } if ((ssk->oobflags & SDP_HAVEOOB) == 0) { error = EWOULDBLOCK; goto out; } m->m_len = 1; *mtod(m, caddr_t) = ssk->iobc; if ((flags & MSG_PEEK) == 0) ssk->oobflags ^= (SDP_HAVEOOB | SDP_HADOOB); out: rx_ring_unlock(&ssk->rx_ring); SDP_WUNLOCK(ssk); return (error); } void sdp_urg(struct sdp_sock *ssk, struct mbuf *mb) { struct mbuf *m; struct socket *so; so = ssk->socket; if (so == NULL) return; so->so_oobmark = sbused(&so->so_rcv) + mb->m_pkthdr.len - 1; sohasoutofband(so); ssk->oobflags &= ~(SDP_HAVEOOB | SDP_HADOOB); if (!(so->so_options & SO_OOBINLINE)) { for (m = mb; m->m_next != NULL; m = m->m_next); ssk->iobc = *(mtod(m, char *) + m->m_len - 1); ssk->oobflags |= SDP_HAVEOOB; m->m_len--; mb->m_pkthdr.len--; } } /* * Notify a sdp socket of an asynchronous error. * * Do not wake up user since there currently is no mechanism for * reporting soft errors (yet - a kqueue filter may be added). */ struct sdp_sock * sdp_notify(struct sdp_sock *ssk, int error) { SDP_WLOCK_ASSERT(ssk); if ((ssk->flags & SDP_TIMEWAIT) || (ssk->flags & SDP_DROPPED)) return (ssk); /* * Ignore some errors if we are hooked up. */ if (ssk->state == TCPS_ESTABLISHED && (error == EHOSTUNREACH || error == ENETUNREACH || error == EHOSTDOWN)) return (ssk); ssk->softerror = error; return sdp_drop(ssk, error); } static void sdp_ctlinput(int cmd, struct sockaddr *sa, void *vip) { struct in_addr faddr; faddr = ((struct sockaddr_in *)sa)->sin_addr; if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) return; sdp_pcbnotifyall(faddr, inetctlerrmap[cmd], sdp_notify); } static int sdp_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, struct thread *td) { return (EOPNOTSUPP); } static void sdp_keepalive_timeout(void *data) { struct sdp_sock *ssk; ssk = data; /* Callout canceled. */ if (!callout_active(&ssk->keep2msl)) return; /* Callout rescheduled as a different kind of timer. */ if (callout_pending(&ssk->keep2msl)) goto out; callout_deactivate(&ssk->keep2msl); if (ssk->flags & SDP_DROPPED || (ssk->socket->so_options & SO_KEEPALIVE) == 0) goto out; sdp_post_keepalive(ssk); callout_reset(&ssk->keep2msl, SDP_KEEPALIVE_TIME, sdp_keepalive_timeout, ssk); out: SDP_WUNLOCK(ssk); } void sdp_start_keepalive_timer(struct socket *so) { struct sdp_sock *ssk; ssk = sdp_sk(so); if (!callout_pending(&ssk->keep2msl)) callout_reset(&ssk->keep2msl, SDP_KEEPALIVE_TIME, sdp_keepalive_timeout, ssk); } static void sdp_stop_keepalive_timer(struct socket *so) { struct sdp_sock *ssk; ssk = sdp_sk(so); callout_stop(&ssk->keep2msl); } /* * sdp_ctloutput() must drop the inpcb lock before performing copyin on * socket option arguments. When it re-acquires the lock after the copy, it * has to revalidate that the connection is still valid for the socket * option. */ #define SDP_WLOCK_RECHECK(inp) do { \ SDP_WLOCK(ssk); \ if (ssk->flags & (SDP_TIMEWAIT | SDP_DROPPED)) { \ SDP_WUNLOCK(ssk); \ return (ECONNRESET); \ } \ } while(0) static int sdp_ctloutput(struct socket *so, struct sockopt *sopt) { int error, opt, optval; struct sdp_sock *ssk; error = 0; ssk = sdp_sk(so); if (sopt->sopt_level == SOL_SOCKET && sopt->sopt_name == SO_KEEPALIVE) { SDP_WLOCK(ssk); if (so->so_options & SO_KEEPALIVE) sdp_start_keepalive_timer(so); else sdp_stop_keepalive_timer(so); SDP_WUNLOCK(ssk); } if (sopt->sopt_level != IPPROTO_TCP) return (error); SDP_WLOCK(ssk); if (ssk->flags & (SDP_TIMEWAIT | SDP_DROPPED)) { SDP_WUNLOCK(ssk); return (ECONNRESET); } switch (sopt->sopt_dir) { case SOPT_SET: switch (sopt->sopt_name) { case TCP_NODELAY: SDP_WUNLOCK(ssk); error = sooptcopyin(sopt, &optval, sizeof optval, sizeof optval); if (error) return (error); SDP_WLOCK_RECHECK(ssk); opt = SDP_NODELAY; if (optval) ssk->flags |= opt; else ssk->flags &= ~opt; sdp_do_posts(ssk); SDP_WUNLOCK(ssk); break; default: SDP_WUNLOCK(ssk); error = ENOPROTOOPT; break; } break; case SOPT_GET: switch (sopt->sopt_name) { case TCP_NODELAY: optval = ssk->flags & SDP_NODELAY; SDP_WUNLOCK(ssk); error = sooptcopyout(sopt, &optval, sizeof optval); break; default: SDP_WUNLOCK(ssk); error = ENOPROTOOPT; break; } break; } return (error); } #undef SDP_WLOCK_RECHECK int sdp_mod_count = 0; int sdp_mod_usec = 0; void sdp_set_default_moderation(struct sdp_sock *ssk) { if (sdp_mod_count <= 0 || sdp_mod_usec <= 0) return; ib_modify_cq(ssk->rx_ring.cq, sdp_mod_count, sdp_mod_usec); } static void sdp_dev_add(struct ib_device *device) { struct ib_fmr_pool_param param; struct sdp_device *sdp_dev; sdp_dev = malloc(sizeof(*sdp_dev), M_SDP, M_WAITOK | M_ZERO); sdp_dev->pd = ib_alloc_pd(device, 0); if (IS_ERR(sdp_dev->pd)) goto out_pd; memset(¶m, 0, sizeof param); param.max_pages_per_fmr = SDP_FMR_SIZE; param.page_shift = PAGE_SHIFT; param.access = (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ); param.pool_size = SDP_FMR_POOL_SIZE; param.dirty_watermark = SDP_FMR_DIRTY_SIZE; param.cache = 1; sdp_dev->fmr_pool = ib_create_fmr_pool(sdp_dev->pd, ¶m); if (IS_ERR(sdp_dev->fmr_pool)) goto out_fmr; ib_set_client_data(device, &sdp_client, sdp_dev); return; out_fmr: ib_dealloc_pd(sdp_dev->pd); out_pd: free(sdp_dev, M_SDP); } static void sdp_dev_rem(struct ib_device *device, void *client_data) { struct sdp_device *sdp_dev; struct sdp_sock *ssk; SDP_LIST_WLOCK(); LIST_FOREACH(ssk, &sdp_list, list) { if (ssk->ib_device != device) continue; SDP_WLOCK(ssk); if ((ssk->flags & SDP_DESTROY) == 0) ssk = sdp_notify(ssk, ECONNRESET); if (ssk) SDP_WUNLOCK(ssk); } SDP_LIST_WUNLOCK(); /* * XXX Do I need to wait between these two? */ sdp_dev = ib_get_client_data(device, &sdp_client); if (!sdp_dev) return; ib_flush_fmr_pool(sdp_dev->fmr_pool); ib_destroy_fmr_pool(sdp_dev->fmr_pool); ib_dealloc_pd(sdp_dev->pd); free(sdp_dev, M_SDP); } struct ib_client sdp_client = { .name = "sdp", .add = sdp_dev_add, .remove = sdp_dev_rem }; static int sdp_pcblist(SYSCTL_HANDLER_ARGS) { int error, n, i; struct sdp_sock *ssk; struct xinpgen xig; /* * The process of preparing the TCB list is too time-consuming and * resource-intensive to repeat twice on every request. */ if (req->oldptr == NULL) { n = sdp_count; n += imax(n / 8, 10); req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xtcpcb); return (0); } if (req->newptr != NULL) return (EPERM); /* * OK, now we're committed to doing something. */ SDP_LIST_RLOCK(); n = sdp_count; SDP_LIST_RUNLOCK(); error = sysctl_wire_old_buffer(req, 2 * (sizeof xig) + n * sizeof(struct xtcpcb)); if (error != 0) return (error); bzero(&xig, sizeof(xig)); xig.xig_len = sizeof xig; xig.xig_count = n; xig.xig_gen = 0; xig.xig_sogen = so_gencnt; error = SYSCTL_OUT(req, &xig, sizeof xig); if (error) return (error); SDP_LIST_RLOCK(); for (ssk = LIST_FIRST(&sdp_list), i = 0; ssk != NULL && i < n; ssk = LIST_NEXT(ssk, list)) { struct xtcpcb xt; SDP_RLOCK(ssk); if (ssk->flags & SDP_TIMEWAIT) { if (ssk->cred != NULL) error = cr_cansee(req->td->td_ucred, ssk->cred); else error = EINVAL; /* Skip this inp. */ } else if (ssk->socket) error = cr_canseesocket(req->td->td_ucred, ssk->socket); else error = EINVAL; if (error) { error = 0; goto next; } bzero(&xt, sizeof(xt)); xt.xt_len = sizeof xt; xt.xt_inp.inp_gencnt = 0; xt.xt_inp.inp_vflag = INP_IPV4; memcpy(&xt.xt_inp.inp_laddr, &ssk->laddr, sizeof(ssk->laddr)); xt.xt_inp.inp_lport = ssk->lport; memcpy(&xt.xt_inp.inp_faddr, &ssk->faddr, sizeof(ssk->faddr)); xt.xt_inp.inp_fport = ssk->fport; xt.t_state = ssk->state; if (ssk->socket != NULL) sotoxsocket(ssk->socket, &xt.xt_inp.xi_socket); xt.xt_inp.xi_socket.xso_protocol = IPPROTO_TCP; SDP_RUNLOCK(ssk); error = SYSCTL_OUT(req, &xt, sizeof xt); if (error) break; i++; continue; next: SDP_RUNLOCK(ssk); } if (!error) { /* * Give the user an updated idea of our state. * If the generation differs from what we told * her before, she knows that something happened * while we were processing this request, and it * might be necessary to retry. */ xig.xig_gen = 0; xig.xig_sogen = so_gencnt; xig.xig_count = sdp_count; error = SYSCTL_OUT(req, &xig, sizeof xig); } SDP_LIST_RUNLOCK(); return (error); } static SYSCTL_NODE(_net_inet, -1, sdp, CTLFLAG_RW, 0, "SDP"); SYSCTL_PROC(_net_inet_sdp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD | CTLTYPE_STRUCT, 0, 0, sdp_pcblist, "S,xtcpcb", "List of active SDP connections"); static void sdp_zone_change(void *tag) { uma_zone_set_max(sdp_zone, maxsockets); } static void sdp_init(void) { LIST_INIT(&sdp_list); sdp_zone = uma_zcreate("sdp_sock", sizeof(struct sdp_sock), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); uma_zone_set_max(sdp_zone, maxsockets); EVENTHANDLER_REGISTER(maxsockets_change, sdp_zone_change, NULL, EVENTHANDLER_PRI_ANY); rx_comp_wq = create_singlethread_workqueue("rx_comp_wq"); ib_register_client(&sdp_client); } extern struct domain sdpdomain; struct pr_usrreqs sdp_usrreqs = { .pru_abort = sdp_abort, .pru_accept = sdp_accept, .pru_attach = sdp_attach, .pru_bind = sdp_bind, .pru_connect = sdp_connect, .pru_control = sdp_control, .pru_detach = sdp_detach, .pru_disconnect = sdp_disconnect, .pru_listen = sdp_listen, .pru_peeraddr = sdp_getpeeraddr, .pru_rcvoob = sdp_rcvoob, .pru_send = sdp_send, .pru_sosend = sdp_sosend, .pru_soreceive = sdp_sorecv, .pru_shutdown = sdp_shutdown, .pru_sockaddr = sdp_getsockaddr, .pru_close = sdp_close, }; struct protosw sdpsw[] = { { .pr_type = SOCK_STREAM, .pr_domain = &sdpdomain, .pr_protocol = IPPROTO_IP, .pr_flags = PR_CONNREQUIRED|PR_IMPLOPCL|PR_WANTRCVD, .pr_ctlinput = sdp_ctlinput, .pr_ctloutput = sdp_ctloutput, .pr_usrreqs = &sdp_usrreqs }, { .pr_type = SOCK_STREAM, .pr_domain = &sdpdomain, .pr_protocol = IPPROTO_TCP, .pr_flags = PR_CONNREQUIRED|PR_IMPLOPCL|PR_WANTRCVD, .pr_ctlinput = sdp_ctlinput, .pr_ctloutput = sdp_ctloutput, .pr_usrreqs = &sdp_usrreqs }, }; struct domain sdpdomain = { .dom_family = AF_INET_SDP, .dom_name = "SDP", .dom_init = sdp_init, .dom_protosw = sdpsw, .dom_protoswNPROTOSW = &sdpsw[sizeof(sdpsw)/sizeof(sdpsw[0])], }; DOMAIN_SET(sdp); int sdp_debug_level = 1; int sdp_data_debug_level = 0; Index: user/ngie/bug-237403/tests/sys/opencrypto/cryptotest.py =================================================================== --- user/ngie/bug-237403/tests/sys/opencrypto/cryptotest.py (revision 348028) +++ user/ngie/bug-237403/tests/sys/opencrypto/cryptotest.py (revision 348029) @@ -1,506 +1,506 @@ #!/usr/local/bin/python2 # # Copyright (c) 2014 The FreeBSD Foundation # All rights reserved. # # This software was developed by John-Mark Gurney under # the sponsorship from the FreeBSD Foundation. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # # $FreeBSD$ # from __future__ import print_function import binascii import errno import cryptodev import itertools import os import struct import unittest from cryptodev import * from glob import iglob katdir = '/usr/local/share/nist-kat' def katg(base, glob): assert os.path.exists(katdir), "Please 'pkg install nist-kat'" if not os.path.exists(os.path.join(katdir, base)): raise unittest.SkipTest("Missing %s test vectors" % (base)) return iglob(os.path.join(katdir, base, glob)) aesmodules = [ 'cryptosoft0', 'aesni0', 'ccr0', 'ccp0' ] desmodules = [ 'cryptosoft0', ] shamodules = [ 'cryptosoft0', 'aesni0', 'ccr0', 'ccp0' ] def GenTestCase(cname): try: crid = cryptodev.Crypto.findcrid(cname) except IOError: return None class GendCryptoTestCase(unittest.TestCase): ############### ##### AES ##### ############### @unittest.skipIf(cname not in aesmodules, 'skipping AES-XTS on %s' % (cname)) def test_xts(self): for i in katg('XTSTestVectors/format tweak value input - data unit seq no', '*.rsp'): self.runXTS(i, cryptodev.CRYPTO_AES_XTS) @unittest.skipIf(cname not in aesmodules, 'skipping AES-CBC on %s' % (cname)) def test_cbc(self): for i in katg('KAT_AES', 'CBC[GKV]*.rsp'): self.runCBC(i) @unittest.skipIf(cname not in aesmodules, 'skipping AES-CCM on %s' % (cname)) def test_ccm(self): for i in katg('ccmtestvectors', 'V*.rsp'): self.runCCMEncrypt(i) for i in katg('ccmtestvectors', 'D*.rsp'): self.runCCMDecrypt(i) @unittest.skipIf(cname not in aesmodules, 'skipping AES-GCM on %s' % (cname)) def test_gcm(self): for i in katg('gcmtestvectors', 'gcmEncrypt*'): self.runGCM(i, 'ENCRYPT') for i in katg('gcmtestvectors', 'gcmDecrypt*'): self.runGCM(i, 'DECRYPT') _gmacsizes = { 32: cryptodev.CRYPTO_AES_256_NIST_GMAC, 24: cryptodev.CRYPTO_AES_192_NIST_GMAC, 16: cryptodev.CRYPTO_AES_128_NIST_GMAC, } def runGCM(self, fname, mode): curfun = None if mode == 'ENCRYPT': swapptct = False curfun = Crypto.encrypt elif mode == 'DECRYPT': swapptct = True curfun = Crypto.decrypt else: raise RuntimeError('unknown mode: %r' % repr(mode)) columns = [ 'Count', 'Key', 'IV', 'CT', 'AAD', 'Tag', 'PT', ] with cryptodev.KATParser(fname, columns) as parser: self.runGCMWithParser(parser, mode) def runGCMWithParser(self, parser, mode): for _, lines in next(parser): for data in lines: curcnt = int(data['Count']) cipherkey = binascii.unhexlify(data['Key']) iv = binascii.unhexlify(data['IV']) aad = binascii.unhexlify(data['AAD']) tag = binascii.unhexlify(data['Tag']) if 'FAIL' not in data: pt = binascii.unhexlify(data['PT']) ct = binascii.unhexlify(data['CT']) if len(iv) != 12: # XXX - isn't supported continue try: c = Crypto(cryptodev.CRYPTO_AES_NIST_GCM_16, cipherkey, mac=self._gmacsizes[len(cipherkey)], mackey=cipherkey, crid=crid, maclen=16) except EnvironmentError as e: # Can't test algorithms the driver does not support. if e.errno != errno.EOPNOTSUPP: raise continue if mode == 'ENCRYPT': try: rct, rtag = c.encrypt(pt, iv, aad) except EnvironmentError as e: # Can't test inputs the driver does not support. if e.errno != errno.EINVAL: raise continue rtag = rtag[:len(tag)] data['rct'] = binascii.hexlify(rct) data['rtag'] = binascii.hexlify(rtag) self.assertEqual(rct, ct, repr(data)) self.assertEqual(rtag, tag, repr(data)) else: if len(tag) != 16: continue args = (ct, iv, aad, tag) if 'FAIL' in data: self.assertRaises(IOError, c.decrypt, *args) else: try: rpt, rtag = c.decrypt(*args) except EnvironmentError as e: # Can't test inputs the driver does not support. if e.errno != errno.EINVAL: raise continue data['rpt'] = binascii.hexlify(rpt) data['rtag'] = binascii.hexlify(rtag) self.assertEqual(rpt, pt, repr(data)) def runCBC(self, fname): columns = [ 'COUNT', 'KEY', 'IV', 'PLAINTEXT', 'CIPHERTEXT', ] with cryptodev.KATParser(fname, columns) as parser: self.runCBCWithParser(parser) def runCBCWithParser(self, parser): curfun = None for mode, lines in next(parser): if mode == 'ENCRYPT': swapptct = False curfun = Crypto.encrypt elif mode == 'DECRYPT': swapptct = True curfun = Crypto.decrypt else: raise RuntimeError('unknown mode: %r' % repr(mode)) for data in lines: curcnt = int(data['COUNT']) cipherkey = binascii.unhexlify(data['KEY']) iv = binascii.unhexlify(data['IV']) pt = binascii.unhexlify(data['PLAINTEXT']) ct = binascii.unhexlify(data['CIPHERTEXT']) if swapptct: pt, ct = ct, pt # run the fun c = Crypto(cryptodev.CRYPTO_AES_CBC, cipherkey, crid=crid) r = curfun(c, pt, iv) self.assertEqual(r, ct) def runXTS(self, fname, meth): columns = [ 'COUNT', 'DataUnitLen', 'Key', 'DataUnitSeqNumber', 'PT', 'CT'] with cryptodev.KATParser(fname, columns) as parser: self.runXTSWithParser(parser, meth) def runXTSWithParser(self, parser, meth): curfun = None for mode, lines in next(parser): if mode == 'ENCRYPT': swapptct = False curfun = Crypto.encrypt elif mode == 'DECRYPT': swapptct = True curfun = Crypto.decrypt else: raise RuntimeError('unknown mode: %r' % repr(mode)) for data in lines: curcnt = int(data['COUNT']) nbits = int(data['DataUnitLen']) cipherkey = binascii.unhexlify(data['Key']) iv = struct.pack('QQ', int(data['DataUnitSeqNumber']), 0) pt = binascii.unhexlify(data['PT']) ct = binascii.unhexlify(data['CT']) if nbits % 128 != 0: # XXX - mark as skipped continue if swapptct: pt, ct = ct, pt # run the fun try: c = Crypto(meth, cipherkey, crid=crid) r = curfun(c, pt, iv) except EnvironmentError as e: # Can't test hashes the driver does not support. if e.errno != errno.EOPNOTSUPP: raise continue self.assertEqual(r, ct) def runCCMEncrypt(self, fname): with cryptodev.KATCCMParser(fname) as parser: self.runCCMEncryptWithParser(parser) def runCCMEncryptWithParser(self, parser): for data in next(parser): Nlen = int(data['Nlen']) if Nlen != 12: # OCF only supports 12 byte IVs continue key = binascii.unhexlify(data['Key']) nonce = binascii.unhexlify(data['Nonce']) Alen = int(data['Alen']) if Alen != 0: aad = binascii.unhexlify(data['Adata']) else: aad = None payload = binascii.unhexlify(data['Payload']) ct = binascii.unhexlify(data['CT']) try: c = Crypto(crid=crid, cipher=cryptodev.CRYPTO_AES_CCM_16, key=key, mac=cryptodev.CRYPTO_AES_CCM_CBC_MAC, mackey=key, maclen=16) r, tag = Crypto.encrypt(c, payload, nonce, aad) except EnvironmentError as e: if e.errno != errno.EOPNOTSUPP: raise continue out = r + tag self.assertEqual(out, ct, "Count " + data['Count'] + " Actual: " + \ - repr(out.encode("hex")) + " Expected: " + \ + repr(binascii.hexlify(out)) + " Expected: " + \ repr(data) + " on " + cname) def runCCMDecrypt(self, fname): with cryptodev.KATCCMParser(fname) as parser: self.runCCMDecryptWithParser(parser) def runCCMDecryptWithParser(self, parser): # XXX: Note that all of the current CCM # decryption test vectors use IV and tag sizes # that aren't supported by OCF none of the # tests are actually ran. for data in next(parser): Nlen = int(data['Nlen']) if Nlen != 12: # OCF only supports 12 byte IVs continue Tlen = int(data['Tlen']) if Tlen != 16: # OCF only supports 16 byte tags continue key = binascii.unhexlify(data['Key']) nonce = binascii.unhexlify(data['Nonce']) Alen = int(data['Alen']) if Alen != 0: aad = binascii.unhexlify(data['Adata']) else: aad = None ct = binascii.unhexlify(data['CT']) tag = ct[-16:] ct = ct[:-16] try: c = Crypto(crid=crid, cipher=cryptodev.CRYPTO_AES_CCM_16, key=key, mac=cryptodev.CRYPTO_AES_CCM_CBC_MAC, mackey=key, maclen=16) except EnvironmentError as e: if e.errno != errno.EOPNOTSUPP: raise continue if data['Result'] == 'Fail': self.assertRaises(IOError, c.decrypt, payload, nonce, aad, tag) else: r = Crypto.decrypt(c, payload, nonce, aad, tag) payload = binascii.unhexlify(data['Payload']) plen = int(data('Plen')) payload = payload[:plen] self.assertEqual(r, payload, "Count " + data['Count'] + \ - " Actual: " + repr(r.encode("hex")) + \ + " Actual: " + repr(binascii.hexlify(r)) + \ " Expected: " + repr(data) + \ " on " + cname) ############### ##### DES ##### ############### @unittest.skipIf(cname not in desmodules, 'skipping DES on %s' % (cname)) def test_tdes(self): for i in katg('KAT_TDES', 'TCBC[a-z]*.rsp'): self.runTDES(i) def runTDES(self, fname): columns = [ 'COUNT', 'KEYs', 'IV', 'PLAINTEXT', 'CIPHERTEXT', ] with cryptodev.KATParser(fname, columns) as parser: self.runTDESWithParser(parser) def runTDESWithParser(self, parser): curfun = None for mode, lines in next(parser): if mode == 'ENCRYPT': swapptct = False curfun = Crypto.encrypt elif mode == 'DECRYPT': swapptct = True curfun = Crypto.decrypt else: raise RuntimeError('unknown mode: %r' % repr(mode)) for data in lines: curcnt = int(data['COUNT']) key = data['KEYs'] * 3 cipherkey = binascii.unhexlify(key) iv = binascii.unhexlify(data['IV']) pt = binascii.unhexlify(data['PLAINTEXT']) ct = binascii.unhexlify(data['CIPHERTEXT']) if swapptct: pt, ct = ct, pt # run the fun c = Crypto(cryptodev.CRYPTO_3DES_CBC, cipherkey, crid=crid) r = curfun(c, pt, iv) self.assertEqual(r, ct) ############### ##### SHA ##### ############### @unittest.skipIf(cname not in shamodules, 'skipping SHA on %s' % str(cname)) def test_sha(self): for i in katg('shabytetestvectors', 'SHA*Msg.rsp'): self.runSHA(i) def runSHA(self, fname): # Skip SHA512_(224|256) tests if fname.find('SHA512_') != -1: return columns = [ 'Len', 'Msg', 'MD' ] with cryptodev.KATParser(fname, columns) as parser: self.runSHAWithParser(parser) def runSHAWithParser(self, parser): for hashlength, lines in next(parser): # E.g., hashlength will be "L=20" (bytes) hashlen = int(hashlength.split("=")[1]) if hashlen == 20: alg = cryptodev.CRYPTO_SHA1 elif hashlen == 28: alg = cryptodev.CRYPTO_SHA2_224 elif hashlen == 32: alg = cryptodev.CRYPTO_SHA2_256 elif hashlen == 48: alg = cryptodev.CRYPTO_SHA2_384 elif hashlen == 64: alg = cryptodev.CRYPTO_SHA2_512 else: # Skip unsupported hashes # Slurp remaining input in section for data in lines: continue continue for data in lines: msg = binascii.unhexlify(data['Msg']) msg = msg[:int(data['Len'])] md = binascii.unhexlify(data['MD']) try: c = Crypto(mac=alg, crid=crid, maclen=hashlen) except EnvironmentError as e: # Can't test hashes the driver does not support. if e.errno != errno.EOPNOTSUPP: raise continue _, r = c.encrypt(msg, iv="") self.assertEqual(r, md, "Actual: " + \ - repr(r.encode("hex")) + " Expected: " + repr(data) + " on " + cname) + repr(binascii.hexlify(r)) + " Expected: " + repr(data) + " on " + cname) @unittest.skipIf(cname not in shamodules, 'skipping SHA-HMAC on %s' % str(cname)) def test_sha1hmac(self): for i in katg('hmactestvectors', 'HMAC.rsp'): self.runSHA1HMAC(i) def runSHA1HMAC(self, fname): columns = [ 'Count', 'Klen', 'Tlen', 'Key', 'Msg', 'Mac' ] with cryptodev.KATParser(fname, columns) as parser: self.runSHA1HMACWithParser(parser) def runSHA1HMACWithParser(self, parser): for hashlength, lines in next(parser): # E.g., hashlength will be "L=20" (bytes) hashlen = int(hashlength.split("=")[1]) blocksize = None if hashlen == 20: alg = cryptodev.CRYPTO_SHA1_HMAC blocksize = 64 elif hashlen == 28: alg = cryptodev.CRYPTO_SHA2_224_HMAC blocksize = 64 elif hashlen == 32: alg = cryptodev.CRYPTO_SHA2_256_HMAC blocksize = 64 elif hashlen == 48: alg = cryptodev.CRYPTO_SHA2_384_HMAC blocksize = 128 elif hashlen == 64: alg = cryptodev.CRYPTO_SHA2_512_HMAC blocksize = 128 else: # Skip unsupported hashes # Slurp remaining input in section for data in lines: continue continue for data in lines: key = binascii.unhexlify(data['Key']) msg = binascii.unhexlify(data['Msg']) mac = binascii.unhexlify(data['Mac']) tlen = int(data['Tlen']) if len(key) > blocksize: continue try: c = Crypto(mac=alg, mackey=key, crid=crid, maclen=hashlen) except EnvironmentError as e: # Can't test hashes the driver does not support. if e.errno != errno.EOPNOTSUPP: raise continue _, r = c.encrypt(msg, iv="") self.assertEqual(r[:tlen], mac, "Actual: " + \ - repr(r.encode("hex")) + " Expected: " + repr(data)) + repr(binascii.hexlify(r)) + " Expected: " + repr(data)) return GendCryptoTestCase cryptosoft = GenTestCase('cryptosoft0') aesni = GenTestCase('aesni0') ccr = GenTestCase('ccr0') ccp = GenTestCase('ccp0') if __name__ == '__main__': unittest.main() Index: user/ngie/bug-237403 =================================================================== --- user/ngie/bug-237403 (revision 348028) +++ user/ngie/bug-237403 (revision 348029) Property changes on: user/ngie/bug-237403 ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r348023-348028