Index: projects/make-check-sandbox/share/misc/organization.dot =================================================================== --- projects/make-check-sandbox/share/misc/organization.dot (revision 321993) +++ projects/make-check-sandbox/share/misc/organization.dot (revision 321994) @@ -1,103 +1,103 @@ # $FreeBSD$ # This file is meant to show the infrastructural organization of the # FreeBSD Project; what kind of teams we have and how they relate to # each other. # For a detailed description of the responsibilities and duties of the listed # teams, please see our Administration page at # http://www.freebsd.org/administration.html . # # The graphical output can be generated from this file with the following # command: # $ dot -T png -o file.png organization.dot # # The dot binary is part of the graphics/graphviz port. digraph org { node [color=lightblue2, style=filled, bgcolor=black]; # Meta-categories go here _devel [label="FreeBSD Developers"] _admin [label="FreeBSD Infrastructure Administrators"] _misc [label="Miscellaneous Hats"] # Development teams go here alphabetically sorted core [label="Core Team\ncore@FreeBSD.org\nallanjude, bapt, bcr,\nbenno, emaste, gnn,\nhrs, jhb, kmoore"] coresecretary [label="Core Team Secretary\ncore-secretary@FreeBSD.org\nmatthew"] doccommitters [label="Doc/www Committers\ndoc-committers@FreeBSD.org"] doceng [label="Documentation Engineering Team\ndoceng@FreeBSD.org\ngjb, blackend,\ngabor, hrs,\nwblock"] portscommitters [label="Ports Committers\nports-committers@FreeBSD.org"] portmgr [label="Port Management Team\nportmgr@FreeBSD.org\nadamw, antoine, bapt, bdrewery\nfeld, mat, rene, swills"] portmgrsecretary [label="Port Management Team Secretary\nportmgr-secretary@FreeBSD.org\nrene"] re [label="Primary Release Engineering Team\nre@FreeBSD.org\ngjb, kib,\nbdrewery, blackend,\nrgrimes, delphij,\nhrs, glebius,\nmarius, rwatson"] secteam [label="Security Team\nsecteam@FreeBSD.org\ndelphij,\ndes, gavin, gjb,\nglebius, remko"] portssecteam [label="Ports Security Team\nports-secteam@FreeBSD.org\ndelphij, amdmi3, eadler, feld, jgh, junovitch, rea, sbz, simon, swills, zi"] secteamsecretary [label="Security Team Secretary\nsecteam-secretary@FreeBSD.org\nremko"] securityofficer [label="Security Officer Team\nsecurity-officer@FreeBSD.org\ndelphij, des,\ngavin, gjb,\nglebius, remko"] srccommitters [label="Src Committers\nsrc-committers@FreeBSD.org"] # Admin teams go here alphabetically sorted accounts [label="Accounts Team\naccounts@FreeBSD.org\nmarkm, simon, kensmith,\ndhw"] backups [label="Backup Administrators\nbackups@FreeBSD.org\nsimon, kensmith,\ndhw"] bugmeister [label="Bugmeister Team\nbugmeister@FreeBSD.org\neadler, gavin, gonzo"] clusteradm [label="Cluster Administrators\nclusteradm@FreeBSD.org\nallanjude, brd,\ndhw, gavin,\ngjb, peter,\nsbruno, simon,\nzi"] cvsupmaster [label="CVSup Mirror Site Coordinators\ncvsup-master@FreeBSD.org\nkuriyama, jdp,\nkensmith"] dnsadm [label="DNS Administrators\ndnsadm@FreeBSD.org\nbillf, dg, ps,\nkensmith, peter"] mirroradmin [label="FTP/WWW Mirror Site Coordinators\nmirror-admin@FreeBSD.org\nkuriyama, kensmith"] ncvs [label="CVS src Repository Managers\nncvs@FreeBSD.org\njoe, kuriyama, markm,\nsimon, peter"] perforceadmin [label="Perforce Repository Administrators\nperforce-admin@FreeBSD.org\nscottl, kensmith, gordon,\nrwatson, peter, dhw"] -postmaster [label="Postmaster Team\npostmaster@FreeBSD.org\njmb, brd, sahil, dhw"] +postmaster [label="Postmaster Team\npostmaster@FreeBSD.org\ndhw, ler, pi, rea, remko, zi"] refadm [label="Reference Systems Administrators\nrefadm@FreeBSD.org\njake, billf, markm, simon,\nobrien, ps, kensmith,\npeter, dhw"] webmaster [label="Webmaster Team\nwebmaster@FreeBSD.org\ngjb, wblock, blackend,\ngabor, hrs, wosch"] # Misc hats go here alphabetically sorted donations [label="Donations Team\ndonations@FreeBSD.org\nwilko, gahr, pgolluci,\nobrien, trhodes, ds,\nrwatson"] marketing [label="Marketing Team\nmarketing@FreeBSD.org\nSteven Beedle, Denise Ebery, deb,\njkoshy, dru, mwlucas, imp,\nKris Moore, murray, mattt,\nJeremy C. Reed, rwatson"] vendorrelations [label="Vendor Relations\nvendor-relations@FreeBSD.org\ncore, FreeBSD Foundation"] # Here are the team relationships. # Group together all the entries for the superior team. # Keep the list sorted by the superior team entry. _admin -> accounts _admin -> backups _admin -> bugmeister _admin -> clusteradm _admin -> ncvs _admin -> cvsupmaster _admin -> dnsadm _admin -> mirroradmin _admin -> perforceadmin _admin -> refadm _admin -> postmaster _admin -> webmaster _devel -> core _misc -> donations _misc -> marketing _misc -> vendorrelations core -> coresecretary core -> doceng core -> portmgr core -> re core -> securityofficer core -> srccommitters doceng -> doccommitters portmgr -> portmgrsecretary portmgr -> portscommitters securityofficer -> secteam securityofficer -> portssecteam secteam -> secteamsecretary } Index: projects/make-check-sandbox/sys/boot/arm/at91/libat91/lib.h =================================================================== --- projects/make-check-sandbox/sys/boot/arm/at91/libat91/lib.h (revision 321993) +++ projects/make-check-sandbox/sys/boot/arm/at91/libat91/lib.h (revision 321994) @@ -1,65 +1,65 @@ /*- * Copyright (c) 2006 M. Warner Losh. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef ARM_BOOT_LIB_H #define ARM_BOOT_LIB_H int getc(int); -int putchar(int); -int xputchar(int); -int printf(const char *fmt,...); +void putchar(int); +void xputchar(int); +void printf(const char *fmt,...); /* The following function write eeprom at ee_addr using data */ /* from data_add for size bytes. */ int ReadEEPROM(unsigned eeoff, unsigned char *data_addr, unsigned size); void WriteEEPROM(unsigned eeoff, char *data_addr, unsigned size); void InitEEPROM(void); /* XMODEM protocol */ int xmodem_rx(char *dst); /* */ void start_wdog(int n); void reset(void); /* Delay us */ void Delay(int us); #define ToASCII(x) ((x > 9) ? (x + 'A' - 0xa) : (x + '0')) int p_IsWhiteSpace(char cValue); unsigned p_HexCharValue(char cValue); unsigned p_ASCIIToHex(const char *buf); unsigned p_ASCIIToDec(const char *buf); void p_memset(char *buffer, char value, int size); int p_strlen(const char *buffer); char *strcpy(char *to, const char *from); void memcpy(void *to, const void *from, unsigned size); int p_memcmp(const char *to, const char *from, unsigned size); int strcmp(const char *to, const char *from); #endif Index: projects/make-check-sandbox/sys/boot/arm/at91/libat91/printf.c =================================================================== --- projects/make-check-sandbox/sys/boot/arm/at91/libat91/printf.c (revision 321993) +++ projects/make-check-sandbox/sys/boot/arm/at91/libat91/printf.c (revision 321994) @@ -1,71 +1,70 @@ /*- * Copyright (c) 1998 Robert Nordier * All rights reserved. * Copyright (c) 2006 M. Warner Losh * All rights reserved. * * Redistribution and use in source and binary forms are freely * permitted provided that the above copyright notice and this * paragraph and the following disclaimer are duplicated in all * such forms. * * This software is provided "AS IS" and without any express or * implied warranties, including, without limitation, the implied * warranties of merchantability and fitness for a particular * purpose. * * $FreeBSD$ */ #include #include "lib.h" -int +void printf(const char *fmt,...) { va_list ap; const char *hex = "0123456789abcdef"; char buf[10]; - const char *fmt_orig = fmt; char *s; unsigned u; int c; va_start(ap, fmt); while ((c = *fmt++)) { if (c == '%') { c = *fmt++; switch (c) { case 'c': xputchar(va_arg(ap, int)); continue; case 's': for (s = va_arg(ap, char *); *s; s++) xputchar(*s); continue; case 'd': /* A lie, always prints unsigned */ case 'u': u = va_arg(ap, unsigned); s = buf; do *s++ = '0' + u % 10U; while (u /= 10U); dumpbuf:; while (--s >= buf) xputchar(*s); continue; case 'x': u = va_arg(ap, unsigned); s = buf; do *s++ = hex[u & 0xfu]; while (u >>= 4); goto dumpbuf; } } xputchar(c); } va_end(ap); - return (int)(fmt - fmt_orig); + return; } Index: projects/make-check-sandbox/sys/boot/arm/at91/libat91/putchar.c =================================================================== --- projects/make-check-sandbox/sys/boot/arm/at91/libat91/putchar.c (revision 321993) +++ projects/make-check-sandbox/sys/boot/arm/at91/libat91/putchar.c (revision 321994) @@ -1,64 +1,62 @@ /*- * Copyright (c) 2006 M. Warner Losh. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * This software is derived from software provided by kwikbyte without * copyright as follows: * * No warranty, expressed or implied, is included with this software. It is * provided "AS IS" and no warranty of any kind including statutory or aspects * relating to merchantability or fitness for any purpose is provided. All * intellectual property rights of others is maintained with the respective * owners. This software is not copyrighted and is intended for reference * only. * * $FreeBSD$ */ #include "at91rm9200.h" #include "at91rm9200_lowlevel.h" #include "lib.h" /* - * int putchar(int ch) + * void putchar(int ch) * Writes a character to the DBGU port. It assumes that DBGU has * already been initialized. */ -int +void putchar(int ch) { AT91PS_USART pUSART = (AT91PS_USART)AT91C_BASE_DBGU; while (!(pUSART->US_CSR & AT91C_US_TXRDY)) continue; pUSART->US_THR = (ch & 0xFF); - return (1); } -int +void xputchar(int ch) { - if (ch == '\n') - putchar('\r'); - putchar(ch); - return (ch == '\n' ? 2 : 1); + if (ch == '\n') + putchar('\r'); + putchar(ch); } Index: projects/make-check-sandbox/sys/boot/arm/ixp425/boot2/ixp425_board.c =================================================================== --- projects/make-check-sandbox/sys/boot/arm/ixp425/boot2/ixp425_board.c (revision 321993) +++ projects/make-check-sandbox/sys/boot/arm/ixp425/boot2/ixp425_board.c (revision 321994) @@ -1,773 +1,771 @@ /*- * Copyright (c) 2008 John Hay. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include "lib.h" #include "cf_ata.h" #include #include #include struct board_config { const char *desc; int (*probe)(int boardtype_hint); void (*init)(void); }; /* set of registered boards */ SET_DECLARE(boards, struct board_config); #define BOARD_CONFIG(name, _desc) \ static struct board_config name##config = { \ .desc = _desc, \ .probe = name##_probe, \ .init = name##_init, \ }; \ DATA_SET(boards, name##config) static u_int cputype; #define cpu_is_ixp43x() (cputype == CPU_ID_IXP435) static u_int8_t *ubase; static u_int8_t uart_getreg(u_int8_t *, int); static void uart_setreg(u_int8_t *, int, u_int8_t); static void cf_init(void); static void cf_clr(void); #ifdef DEBUG #define DPRINTF(fmt, ...) printf(fmt, __VA_ARGS__) #else #define DPRINTF(fmt, ...) #endif const char * board_init(void) { struct board_config **pbp; cputype = cpu_id() & CPU_ID_CPU_MASK; SET_FOREACH(pbp, boards) /* XXX pass down redboot board type */ if ((*pbp)->probe(0)) { (*pbp)->init(); return (*pbp)->desc; } /* XXX panic, unknown board type */ return "???"; } /* * This should be called just before starting the kernel. This is so * that one can undo incompatible hardware settings. */ void clr_board(void) { cf_clr(); } /* * General support functions. */ /* * DELAY should delay for the number of microseconds. * The idea is that the inner loop should take 1us, so val is the * number of usecs to delay. */ void DELAY(int val) { volatile int sub; volatile int subsub; sub = val; while(sub) { subsub = 3; while(subsub) subsub--; sub--; } } u_int32_t swap32(u_int32_t a) { return (((a & 0xff) << 24) | ((a & 0xff00) << 8) | ((a & 0xff0000) >> 8) | ((a & 0xff000000) >> 24)); } u_int16_t swap16(u_int16_t val) { return (val << 8) | (val >> 8); } /* * uart related funcs */ static u_int8_t uart_getreg(u_int8_t *bas, int off) { return *((volatile u_int32_t *)(bas + (off << 2))) & 0xff; } static void uart_setreg(u_int8_t *bas, int off, u_int8_t val) { *((volatile u_int32_t *)(bas + (off << 2))) = (u_int32_t)val; } int getc(int seconds) { int c, delay, limit; c = 0; delay = 10000; limit = seconds * 1000000/10000; while ((uart_getreg(ubase, REG_LSR) & LSR_RXRDY) == 0 && --limit) DELAY(delay); if ((uart_getreg(ubase, REG_LSR) & LSR_RXRDY) == LSR_RXRDY) c = uart_getreg(ubase, REG_DATA); return c; } -int +void putchar(int ch) { int delay, limit; delay = 500; limit = 20; while ((uart_getreg(ubase, REG_LSR) & LSR_THRE) == 0 && --limit) DELAY(delay); uart_setreg(ubase, REG_DATA, ch); limit = 40; while ((uart_getreg(ubase, REG_LSR) & LSR_TEMT) == 0 && --limit) DELAY(delay); - return (1); } -int +void xputchar(int ch) { if (ch == '\n') putchar('\r'); putchar(ch); - return (ch == '\n' ? 2 : 1); } void putstr(const char *str) { while(*str) xputchar(*str++); } void puthex8(u_int8_t ch) { const char *hex = "0123456789abcdef"; putchar(hex[ch >> 4]); putchar(hex[ch & 0xf]); } void puthexlist(const u_int8_t *str, int length) { while(length) { puthex8(*str); putchar(' '); str++; length--; } } /* * * CF/IDE functions. * */ struct { u_int64_t dsize; u_int64_t total_secs; u_int8_t heads; u_int8_t sectors; u_int32_t cylinders; u_int32_t *cs1to; u_int32_t *cs2to; u_int8_t *cs1; u_int8_t *cs2; u_int32_t use_lba; u_int32_t use_stream8; u_int32_t debug; u_int8_t status; u_int8_t error; } dskinf; static void cfenable16(void); static void cfdisable16(void); static u_int8_t cfread8(u_int32_t off); static u_int16_t cfread16(u_int32_t off); static void cfreadstream8(void *buf, int length); static void cfreadstream16(void *buf, int length); static void cfwrite8(u_int32_t off, u_int8_t val); static u_int8_t cfaltread8(u_int32_t off); static void cfaltwrite8(u_int32_t off, u_int8_t val); static int cfwait(u_int8_t mask); static int cfaltwait(u_int8_t mask); static int cfcmd(u_int32_t cmd, u_int32_t cylinder, u_int32_t head, u_int32_t sector, u_int32_t count, u_int32_t feature); static void cfreset(void); #ifdef DEBUG static int cfgetparams(void); #endif static void cfprintregs(void); static void cf_init(void) { u_int8_t status; #ifdef DEBUG int rval; #endif /* NB: board init routines setup other parts of dskinf */ dskinf.use_stream8 = 0; dskinf.use_lba = 0; dskinf.debug = 1; DPRINTF("cs1 %x, cs2 %x\n", dskinf.cs1, dskinf.cs2); /* Setup the CF window */ *dskinf.cs1to |= (EXP_BYTE_EN | EXP_WR_EN | EXP_BYTE_RD16 | EXP_CS_EN); DPRINTF("t1 %x, ", *dskinf.cs1to); *dskinf.cs2to |= (EXP_BYTE_EN | EXP_WR_EN | EXP_BYTE_RD16 | EXP_CS_EN); DPRINTF("t2 %x\n", *dskinf.cs2to); /* Detect if there is a disk. */ cfwrite8(CF_DRV_HEAD, CF_D_IBM); DELAY(1000); status = cfread8(CF_STATUS); if (status != 0x50) printf("cf-ata0 %x\n", (u_int32_t)status); if (status == 0xff) { printf("cf_ata0: No disk!\n"); return; } cfreset(); if (dskinf.use_stream8) { DPRINTF("setting %d bit mode.\n", 8); cfwrite8(CF_FEATURE, 0x01); /* Enable 8 bit transfers */ cfwrite8(CF_COMMAND, ATA_SETFEATURES); cfaltwait(CF_S_READY); } #ifdef DEBUG rval = cfgetparams(); if (rval) return; #endif dskinf.use_lba = 1; dskinf.debug = 0; } static void cf_clr(void) { cfwrite8(CF_DRV_HEAD, CF_D_IBM); cfaltwait(CF_S_READY); cfwrite8(CF_FEATURE, 0x81); /* Enable 8 bit transfers */ cfwrite8(CF_COMMAND, ATA_SETFEATURES); cfaltwait(CF_S_READY); } static void cfenable16(void) { u_int32_t val; val = *dskinf.cs1to; *dskinf.cs1to = val &~ EXP_BYTE_EN; DELAY(100); #if 0 DPRINTF("%s: cs1 timing reg %x\n", *dskinf.cs1to, __func__); #endif } static void cfdisable16(void) { u_int32_t val; DELAY(100); val = *dskinf.cs1to; *dskinf.cs1to = val | EXP_BYTE_EN; #if 0 DPRINTF("%s: cs1 timing reg %x\n", *dskinf.cs1to, __func__); #endif } static u_int8_t cfread8(u_int32_t off) { volatile u_int8_t *vp; vp = (volatile u_int8_t *)(dskinf.cs1 + off); return *vp; } static void cfreadstream8(void *buf, int length) { u_int8_t *lbuf; u_int8_t tmp; lbuf = buf; while (length) { tmp = cfread8(CF_DATA); *lbuf = tmp; #ifdef DEBUG if (dskinf.debug && (length > (512 - 32))) { if ((length % 16) == 0) xputchar('\n'); puthex8(tmp); putchar(' '); } #endif lbuf++; length--; } #ifdef DEBUG if (dskinf.debug) xputchar('\n'); #endif } static u_int16_t cfread16(u_int32_t off) { volatile u_int16_t *vp; vp = (volatile u_int16_t *)(dskinf.cs1 + off); return swap16(*vp); } static void cfreadstream16(void *buf, int length) { u_int16_t *lbuf; length = length / 2; cfenable16(); lbuf = buf; while (length--) { *lbuf = cfread16(CF_DATA); lbuf++; } cfdisable16(); } static void cfwrite8(u_int32_t off, u_int8_t val) { volatile u_int8_t *vp; vp = (volatile u_int8_t *)(dskinf.cs1 + off); *vp = val; } #if 0 static void cfwrite16(u_int32_t off, u_int16_t val) { volatile u_int16_t *vp; vp = (volatile u_int16_t *)(dskinf.cs1 + off); *vp = val; } #endif static u_int8_t cfaltread8(u_int32_t off) { volatile u_int8_t *vp; off &= 0x0f; vp = (volatile u_int8_t *)(dskinf.cs2 + off); return *vp; } static void cfaltwrite8(u_int32_t off, u_int8_t val) { volatile u_int8_t *vp; /* * This is documented in the Intel appnote 302456. */ off &= 0x0f; vp = (volatile u_int8_t *)(dskinf.cs2 + off); *vp = val; } static int cfwait(u_int8_t mask) { u_int8_t status; u_int32_t tout; tout = 0; while (tout <= 5000000) { status = cfread8(CF_STATUS); if (status == 0xff) { printf("%s: master: no status, reselecting\n", __func__); cfwrite8(CF_DRV_HEAD, CF_D_IBM); DELAY(1); status = cfread8(CF_STATUS); } if (status == 0xff) return -1; dskinf.status = status; if (!(status & CF_S_BUSY)) { if (status & CF_S_ERROR) { dskinf.error = cfread8(CF_ERROR); printf("%s: error, status 0x%x error 0x%x\n", __func__, status, dskinf.error); } if ((status & mask) == mask) { DPRINTF("%s: status 0x%x mask 0x%x tout %u\n", __func__, status, mask, tout); return (status & CF_S_ERROR); } } if (tout > 1000) { tout += 1000; DELAY(1000); } else { tout += 10; DELAY(10); } } return -1; } static int cfaltwait(u_int8_t mask) { u_int8_t status; u_int32_t tout; tout = 0; while (tout <= 5000000) { status = cfaltread8(CF_ALT_STATUS); if (status == 0xff) { printf("cfaltwait: master: no status, reselecting\n"); cfwrite8(CF_DRV_HEAD, CF_D_IBM); DELAY(1); status = cfread8(CF_STATUS); } if (status == 0xff) return -1; dskinf.status = status; if (!(status & CF_S_BUSY)) { if (status & CF_S_ERROR) dskinf.error = cfread8(CF_ERROR); if ((status & mask) == mask) { DPRINTF("cfaltwait: tout %u\n", tout); return (status & CF_S_ERROR); } } if (tout > 1000) { tout += 1000; DELAY(1000); } else { tout += 10; DELAY(10); } } return -1; } static int cfcmd(u_int32_t cmd, u_int32_t cylinder, u_int32_t head, u_int32_t sector, u_int32_t count, u_int32_t feature) { if (cfwait(0) < 0) { printf("cfcmd: timeout\n"); return -1; } cfwrite8(CF_FEATURE, feature); cfwrite8(CF_CYL_L, cylinder); cfwrite8(CF_CYL_H, cylinder >> 8); if (dskinf.use_lba) cfwrite8(CF_DRV_HEAD, CF_D_IBM | CF_D_LBA | head); else cfwrite8(CF_DRV_HEAD, CF_D_IBM | head); cfwrite8(CF_SECT_NUM, sector); cfwrite8(CF_SECT_CNT, count); cfwrite8(CF_COMMAND, cmd); return 0; } static void cfreset(void) { u_int8_t status; u_int32_t tout; cfwrite8(CF_DRV_HEAD, CF_D_IBM); DELAY(1); #ifdef DEBUG cfprintregs(); #endif cfread8(CF_STATUS); cfaltwrite8(CF_ALT_DEV_CTR, CF_A_IDS | CF_A_RESET); DELAY(10000); cfaltwrite8(CF_ALT_DEV_CTR, CF_A_IDS); DELAY(10000); cfread8(CF_ERROR); DELAY(3000); for (tout = 0; tout < 310000; tout++) { cfwrite8(CF_DRV_HEAD, CF_D_IBM); DELAY(1); status = cfread8(CF_STATUS); if (!(status & CF_S_BUSY)) break; DELAY(100); } DELAY(1); if (status & CF_S_BUSY) { cfprintregs(); printf("cfreset: Status stayed busy after reset.\n"); } DPRINTF("cfreset: finished, tout %u\n", tout); } #ifdef DEBUG static int cfgetparams(void) { u_int8_t *buf; buf = (u_int8_t *)(0x170000); p_memset((char *)buf, 0, 1024); /* Select the drive. */ cfwrite8(CF_DRV_HEAD, CF_D_IBM); DELAY(1); cfcmd(ATA_ATA_IDENTIFY, 0, 0, 0, 0, 0); if (cfaltwait(CF_S_READY | CF_S_DSC | CF_S_DRQ)) { printf("cfgetparams: ATA_IDENTIFY failed.\n"); return -1; } if (dskinf.use_stream8) cfreadstream8(buf, 512); else cfreadstream16(buf, 512); if (dskinf.debug) cfprintregs(); #if 0 memcpy(&dskinf.ata_params, buf, sizeof(struct ata_params)); dskinf.cylinders = dskinf.ata_params.cylinders; dskinf.heads = dskinf.ata_params.heads; dskinf.sectors = dskinf.ata_params.sectors; printf("dsk0: sec %x, hd %x, cyl %x, stat %x, err %x\n", (u_int32_t)dskinf.ata_params.sectors, (u_int32_t)dskinf.ata_params.heads, (u_int32_t)dskinf.ata_params.cylinders, (u_int32_t)dskinf.status, (u_int32_t)dskinf.error); #endif dskinf.status = cfread8(CF_STATUS); if (dskinf.debug) printf("cfgetparams: ata_params * %x, stat %x\n", (u_int32_t)buf, (u_int32_t)dskinf.status); return 0; } #endif /* DEBUG */ static void cfprintregs(void) { u_int8_t rv; putstr("cfprintregs: regs error "); rv = cfread8(CF_ERROR); puthex8(rv); putstr(", count "); rv = cfread8(CF_SECT_CNT); puthex8(rv); putstr(", sect "); rv = cfread8(CF_SECT_NUM); puthex8(rv); putstr(", cyl low "); rv = cfread8(CF_CYL_L); puthex8(rv); putstr(", cyl high "); rv = cfread8(CF_CYL_H); puthex8(rv); putstr(", drv head "); rv = cfread8(CF_DRV_HEAD); puthex8(rv); putstr(", status "); rv = cfread8(CF_STATUS); puthex8(rv); putstr("\n"); } int avila_read(char *dest, unsigned source, unsigned length) { if (dskinf.use_lba == 0 && source == 0) source++; if (dskinf.debug) printf("avila_read: 0x%x, sect %d num secs %d\n", (u_int32_t)dest, source, length); while (length) { cfwait(CF_S_READY); /* cmd, cyl, head, sect, count, feature */ cfcmd(ATA_READ, (source >> 8) & 0xffff, source >> 24, source & 0xff, 1, 0); cfwait(CF_S_READY | CF_S_DRQ | CF_S_DSC); if (dskinf.use_stream8) cfreadstream8(dest, 512); else cfreadstream16(dest, 512); length--; source++; dest += 512; } return 0; } /* * Gateworks Avila Support. */ static int avila_probe(int boardtype_hint) { volatile u_int32_t *cs; /* * Redboot only configure the chip selects that are needed, so * use that to figure out if it is an Avila or ADI board. The * Avila boards use CS2 and ADI does not. */ cs = (u_int32_t *)(IXP425_EXP_HWBASE + EXP_TIMING_CS2_OFFSET); return (*cs != 0); } static void avila_init(void) { /* Config the serial port. RedBoot should do the rest. */ ubase = (u_int8_t *)(IXP425_UART0_HWBASE); dskinf.cs1to = (u_int32_t *)(IXP425_EXP_HWBASE + EXP_TIMING_CS1_OFFSET); dskinf.cs2to = (u_int32_t *)(IXP425_EXP_HWBASE + EXP_TIMING_CS2_OFFSET); dskinf.cs1 = (u_int8_t *)IXP425_EXP_BUS_CS1_HWBASE; dskinf.cs2 = (u_int8_t *)IXP425_EXP_BUS_CS2_HWBASE; cf_init(); } BOARD_CONFIG(avila, "Gateworks Avila"); /* * Gateworks Cambria Support. */ static int cambria_probe(int boardtype_hint) { return cpu_is_ixp43x(); } static void cambria_init(void) { /* Config the serial port. RedBoot should do the rest. */ ubase = (u_int8_t *)(IXP425_UART0_HWBASE); dskinf.cs1to = (u_int32_t *)(IXP425_EXP_HWBASE + EXP_TIMING_CS3_OFFSET); dskinf.cs2to = (u_int32_t *)(IXP425_EXP_HWBASE + EXP_TIMING_CS4_OFFSET); dskinf.cs1 = (u_int8_t *)CAMBRIA_CFSEL0_HWBASE; dskinf.cs2 = (u_int8_t *)CAMBRIA_CFSEL1_HWBASE; cf_init(); } BOARD_CONFIG(cambria, "Gateworks Cambria"); /* * Pronghorn Metro Support. */ static int pronghorn_probe(int boardtype_hint) { volatile u_int32_t *cs; /* * Redboot only configure the chip selects that are needed, so * use that to figure out if it is an Avila or ADI board. The * Avila boards use CS2 and ADI does not. */ cs = (u_int32_t *)(IXP425_EXP_HWBASE + EXP_TIMING_CS2_OFFSET); return (*cs == 0); } static void pronghorn_init(void) { /* Config the serial port. RedBoot should do the rest. */ ubase = (u_int8_t *)(IXP425_UART1_HWBASE); dskinf.cs1to = (u_int32_t *)(IXP425_EXP_HWBASE + EXP_TIMING_CS3_OFFSET); dskinf.cs2to = (u_int32_t *)(IXP425_EXP_HWBASE + EXP_TIMING_CS4_OFFSET); dskinf.cs1 = (u_int8_t *)IXP425_EXP_BUS_CS3_HWBASE; dskinf.cs2 = (u_int8_t *)IXP425_EXP_BUS_CS4_HWBASE; cf_init(); } BOARD_CONFIG(pronghorn, "Pronghorn Metro"); Index: projects/make-check-sandbox/sys/boot/arm/ixp425/boot2/lib.h =================================================================== --- projects/make-check-sandbox/sys/boot/arm/ixp425/boot2/lib.h (revision 321993) +++ projects/make-check-sandbox/sys/boot/arm/ixp425/boot2/lib.h (revision 321994) @@ -1,67 +1,67 @@ /*- * Copyright (c) 2008 John Hay. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef ARM_BOOT_LIB_H #define ARM_BOOT_LIB_H #include #include int main(void); void DELAY(int); int getc(int); -int putchar(int); -int xputchar(int); +void putchar(int); +void xputchar(int); void putstr(const char *); void puthex8(u_int8_t); void puthexlist(const u_int8_t *, int); -int printf(const char *fmt,...); +void printf(const char *fmt,...); void bzero(void *, size_t); char *strcpy(char *to, const char *from); int strcmp(const char *to, const char *from); int p_strlen(const char *); int p_memcmp(const char *, const char *, unsigned); void *memchr(const void *, int, size_t); void memcpy(void *to, const void *from, unsigned size); void *memmem(const void *, size_t, const void *, size_t); void p_memset(char *buffer, char value, int size); #define strlen p_strlen #define memcmp p_memcmp #define memset p_memset u_int16_t swap16(u_int16_t); u_int32_t swap32(u_int32_t); const char *board_init(void); void clr_board(void); int avila_read(char*, unsigned, unsigned); u_int cpu_id(void); #endif /* !ARM_BOOT_LIB_H */ Index: projects/make-check-sandbox/sys/boot/i386/boot2/boot2.c =================================================================== --- projects/make-check-sandbox/sys/boot/i386/boot2/boot2.c (revision 321993) +++ projects/make-check-sandbox/sys/boot/i386/boot2/boot2.c (revision 321994) @@ -1,648 +1,646 @@ /*- * Copyright (c) 1998 Robert Nordier * All rights reserved. * * Redistribution and use in source and binary forms are freely * permitted provided that the above copyright notice and this * paragraph and the following disclaimer are duplicated in all * such forms. * * This software is provided "AS IS" and without any express or * implied warranties, including, without limitation, the implied * warranties of merchantability and fitness for a particular * purpose. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include "boot2.h" #include "lib.h" #include "paths.h" #include "rbx.h" /* Define to 0 to omit serial support */ #ifndef SERIAL #define SERIAL 1 #endif #define IO_KEYBOARD 1 #define IO_SERIAL 2 #if SERIAL #define DO_KBD (ioctrl & IO_KEYBOARD) #define DO_SIO (ioctrl & IO_SERIAL) #else #define DO_KBD (1) #define DO_SIO (0) #endif #define SECOND 18 /* Circa that many ticks in a second. */ #define ARGS 0x900 #define NOPT 14 #define NDEV 3 #define MEM_BASE 0x12 #define MEM_EXT 0x15 #define DRV_HARD 0x80 #define DRV_MASK 0x7f #define TYPE_AD 0 #define TYPE_DA 1 #define TYPE_MAXHARD TYPE_DA #define TYPE_FD 2 extern uint32_t _end; static const char optstr[NOPT] = "DhaCcdgmnpqrsv"; /* Also 'P', 'S' */ static const unsigned char flags[NOPT] = { RBX_DUAL, RBX_SERIAL, RBX_ASKNAME, RBX_CDROM, RBX_CONFIG, RBX_KDB, RBX_GDB, RBX_MUTE, RBX_NOINTR, RBX_PAUSE, RBX_QUIET, RBX_DFLTROOT, RBX_SINGLE, RBX_VERBOSE }; static const char *const dev_nm[NDEV] = {"ad", "da", "fd"}; static const unsigned char dev_maj[NDEV] = {30, 4, 2}; static struct dsk { unsigned drive; unsigned type; unsigned unit; uint8_t slice; uint8_t part; unsigned start; int init; } dsk; static char cmd[512], cmddup[512], knamebuf[1024]; static const char *kname; uint32_t opts; static struct bootinfo bootinfo; #if SERIAL static int comspeed = SIOSPD; static uint8_t ioctrl = IO_KEYBOARD; #endif int main(void); void exit(int); static void load(void); static int parse(void); static int dskread(void *, unsigned, unsigned); -static int printf(const char *,...); -static int putchar(int); +static void printf(const char *,...); +static void putchar(int); static int drvread(void *, unsigned, unsigned); static int keyhit(unsigned); static int xputc(int); static int xgetc(int); static inline int getc(int); static void memcpy(void *, const void *, int); static void memcpy(void *dst, const void *src, int len) { const char *s = src; char *d = dst; while (len--) *d++ = *s++; } static inline int strcmp(const char *s1, const char *s2) { for (; *s1 == *s2 && *s1; s1++, s2++); return (unsigned char)*s1 - (unsigned char)*s2; } #define UFS_SMALL_CGBASE #include "ufsread.c" static int xfsread(ufs_ino_t inode, void *buf, size_t nbyte) { if ((size_t)fsread(inode, buf, nbyte) != nbyte) { printf("Invalid %s\n", "format"); return -1; } return 0; } static inline void getstr(void) { char *s; int c; s = cmd; for (;;) { switch (c = xgetc(0)) { case 0: break; case '\177': case '\b': if (s > cmd) { s--; printf("\b \b"); } break; case '\n': case '\r': *s = 0; return; default: if (s - cmd < sizeof(cmd) - 1) *s++ = c; putchar(c); } } } static inline void putc(int c) { v86.addr = 0x10; v86.eax = 0xe00 | (c & 0xff); v86.ebx = 0x7; v86int(); } int main(void) { uint8_t autoboot; ufs_ino_t ino; size_t nbyte; dmadat = (void *)(roundup2(__base + (int32_t)&_end, 0x10000) - __base); v86.ctl = V86_FLAGS; v86.efl = PSL_RESERVED_DEFAULT | PSL_I; dsk.drive = *(uint8_t *)PTOV(ARGS); dsk.type = dsk.drive & DRV_HARD ? TYPE_AD : TYPE_FD; dsk.unit = dsk.drive & DRV_MASK; dsk.slice = *(uint8_t *)PTOV(ARGS + 1) + 1; bootinfo.bi_version = BOOTINFO_VERSION; bootinfo.bi_size = sizeof(bootinfo); /* Process configuration file */ autoboot = 1; if ((ino = lookup(PATH_CONFIG)) || (ino = lookup(PATH_DOTCONFIG))) { nbyte = fsread(ino, cmd, sizeof(cmd) - 1); cmd[nbyte] = '\0'; } if (*cmd) { memcpy(cmddup, cmd, sizeof(cmd)); if (parse()) autoboot = 0; if (!OPT_CHECK(RBX_QUIET)) printf("%s: %s", PATH_CONFIG, cmddup); /* Do not process this command twice */ *cmd = 0; } /* * Try to exec stage 3 boot loader. If interrupted by a keypress, * or in case of failure, try to load a kernel directly instead. */ if (!kname) { kname = PATH_LOADER; if (autoboot && !keyhit(3*SECOND)) { load(); kname = PATH_KERNEL; } } /* Present the user with the boot2 prompt. */ for (;;) { if (!autoboot || !OPT_CHECK(RBX_QUIET)) printf("\nFreeBSD/x86 boot\n" "Default: %u:%s(%u,%c)%s\n" "boot: ", dsk.drive & DRV_MASK, dev_nm[dsk.type], dsk.unit, 'a' + dsk.part, kname); if (DO_SIO) sio_flush(); if (!autoboot || keyhit(3*SECOND)) getstr(); else if (!autoboot || !OPT_CHECK(RBX_QUIET)) putchar('\n'); autoboot = 0; if (parse()) putchar('\a'); else load(); } } /* XXX - Needed for btxld to link the boot2 binary; do not remove. */ void exit(int x) { } static void load(void) { union { struct exec ex; Elf32_Ehdr eh; } hdr; static Elf32_Phdr ep[2]; static Elf32_Shdr es[2]; caddr_t p; ufs_ino_t ino; uint32_t addr; int k; uint8_t i, j; if (!(ino = lookup(kname))) { if (!ls) printf("No %s\n", kname); return; } if (xfsread(ino, &hdr, sizeof(hdr))) return; if (N_GETMAGIC(hdr.ex) == ZMAGIC) { addr = hdr.ex.a_entry & 0xffffff; p = PTOV(addr); fs_off = PAGE_SIZE; if (xfsread(ino, p, hdr.ex.a_text)) return; p += roundup2(hdr.ex.a_text, PAGE_SIZE); if (xfsread(ino, p, hdr.ex.a_data)) return; } else if (IS_ELF(hdr.eh)) { fs_off = hdr.eh.e_phoff; for (j = k = 0; k < hdr.eh.e_phnum && j < 2; k++) { if (xfsread(ino, ep + j, sizeof(ep[0]))) return; if (ep[j].p_type == PT_LOAD) j++; } for (i = 0; i < 2; i++) { p = PTOV(ep[i].p_paddr & 0xffffff); fs_off = ep[i].p_offset; if (xfsread(ino, p, ep[i].p_filesz)) return; } p += roundup2(ep[1].p_memsz, PAGE_SIZE); bootinfo.bi_symtab = VTOP(p); if (hdr.eh.e_shnum == hdr.eh.e_shstrndx + 3) { fs_off = hdr.eh.e_shoff + sizeof(es[0]) * (hdr.eh.e_shstrndx + 1); if (xfsread(ino, &es, sizeof(es))) return; for (i = 0; i < 2; i++) { *(Elf32_Word *)p = es[i].sh_size; p += sizeof(es[i].sh_size); fs_off = es[i].sh_offset; if (xfsread(ino, p, es[i].sh_size)) return; p += es[i].sh_size; } } addr = hdr.eh.e_entry & 0xffffff; bootinfo.bi_esymtab = VTOP(p); } else { printf("Invalid %s\n", "format"); return; } bootinfo.bi_kernelname = VTOP(kname); bootinfo.bi_bios_dev = dsk.drive; __exec((caddr_t)addr, RB_BOOTINFO | (opts & RBX_MASK), MAKEBOOTDEV(dev_maj[dsk.type], dsk.slice, dsk.unit, dsk.part), 0, 0, 0, VTOP(&bootinfo)); } static int parse() { char *arg = cmd; char *ep, *p, *q; const char *cp; unsigned int drv; int c, i, j; size_t k; while ((c = *arg++)) { if (c == ' ' || c == '\t' || c == '\n') continue; for (p = arg; *p && *p != '\n' && *p != ' ' && *p != '\t'; p++); ep = p; if (*p) *p++ = 0; if (c == '-') { while ((c = *arg++)) { if (c == 'P') { if (*(uint8_t *)PTOV(0x496) & 0x10) { cp = "yes"; } else { opts |= OPT_SET(RBX_DUAL) | OPT_SET(RBX_SERIAL); cp = "no"; } printf("Keyboard: %s\n", cp); continue; #if SERIAL } else if (c == 'S') { j = 0; while ((unsigned int)(i = *arg++ - '0') <= 9) j = j * 10 + i; if (j > 0 && i == -'0') { comspeed = j; break; } /* Fall through to error below ('S' not in optstr[]). */ #endif } for (i = 0; c != optstr[i]; i++) if (i == NOPT - 1) return -1; opts ^= OPT_SET(flags[i]); } #if SERIAL ioctrl = OPT_CHECK(RBX_DUAL) ? (IO_SERIAL|IO_KEYBOARD) : OPT_CHECK(RBX_SERIAL) ? IO_SERIAL : IO_KEYBOARD; if (DO_SIO) { if (sio_init(115200 / comspeed) != 0) ioctrl &= ~IO_SERIAL; } #endif } else { for (q = arg--; *q && *q != '('; q++); if (*q) { drv = -1; if (arg[1] == ':') { drv = *arg - '0'; if (drv > 9) return (-1); arg += 2; } if (q - arg != 2) return -1; for (i = 0; arg[0] != dev_nm[i][0] || arg[1] != dev_nm[i][1]; i++) if (i == NDEV - 1) return -1; dsk.type = i; arg += 3; dsk.unit = *arg - '0'; if (arg[1] != ',' || dsk.unit > 9) return -1; arg += 2; dsk.slice = WHOLE_DISK_SLICE; if (arg[1] == ',') { dsk.slice = *arg - '0' + 1; if (dsk.slice > NDOSPART + 1) return -1; arg += 2; } if (arg[1] != ')') return -1; dsk.part = *arg - 'a'; if (dsk.part > 7) return (-1); arg += 2; if (drv == -1) drv = dsk.unit; dsk.drive = (dsk.type <= TYPE_MAXHARD ? DRV_HARD : 0) + drv; dsk_meta = 0; } k = ep - arg; if (k > 0) { if (k >= sizeof(knamebuf)) return -1; memcpy(knamebuf, arg, k + 1); kname = knamebuf; } } arg = p; } return 0; } static int dskread(void *buf, unsigned lba, unsigned nblk) { struct dos_partition *dp; struct disklabel *d; char *sec; unsigned i; uint8_t sl; const char *reason; if (!dsk_meta) { sec = dmadat->secbuf; dsk.start = 0; if (drvread(sec, DOSBBSECTOR, 1)) return -1; dp = (void *)(sec + DOSPARTOFF); sl = dsk.slice; if (sl < BASE_SLICE) { for (i = 0; i < NDOSPART; i++) if (dp[i].dp_typ == DOSPTYP_386BSD && (dp[i].dp_flag & 0x80 || sl < BASE_SLICE)) { sl = BASE_SLICE + i; if (dp[i].dp_flag & 0x80 || dsk.slice == COMPATIBILITY_SLICE) break; } if (dsk.slice == WHOLE_DISK_SLICE) dsk.slice = sl; } if (sl != WHOLE_DISK_SLICE) { if (sl != COMPATIBILITY_SLICE) dp += sl - BASE_SLICE; if (dp->dp_typ != DOSPTYP_386BSD) { reason = "slice"; goto error; } dsk.start = dp->dp_start; } if (drvread(sec, dsk.start + LABELSECTOR, 1)) return -1; d = (void *)(sec + LABELOFFSET); if (d->d_magic != DISKMAGIC || d->d_magic2 != DISKMAGIC) { if (dsk.part != RAW_PART) { reason = "label"; goto error; } } else { if (!dsk.init) { if (d->d_type == DTYPE_SCSI) dsk.type = TYPE_DA; dsk.init++; } if (dsk.part >= d->d_npartitions || !d->d_partitions[dsk.part].p_size) { reason = "partition"; goto error; } dsk.start += d->d_partitions[dsk.part].p_offset; dsk.start -= d->d_partitions[RAW_PART].p_offset; } } return drvread(buf, dsk.start + lba, nblk); error: printf("Invalid %s\n", reason); return -1; } -static int +static void printf(const char *fmt,...) { va_list ap; static char buf[10]; - const char *fmt_orig = fmt; char *s; unsigned u; int c; va_start(ap, fmt); while ((c = *fmt++)) { if (c == '%') { c = *fmt++; switch (c) { case 'c': putchar(va_arg(ap, int)); continue; case 's': for (s = va_arg(ap, char *); *s; s++) putchar(*s); continue; case 'u': u = va_arg(ap, unsigned); s = buf; do *s++ = '0' + u % 10U; while (u /= 10U); while (--s >= buf) putchar(*s); continue; } } putchar(c); } va_end(ap); - return (int)(fmt - fmt_orig); + return; } -static int +static void putchar(int c) { if (c == '\n') xputc('\r'); xputc(c); - return (c == '\n' ? 2 : 1); } static int drvread(void *buf, unsigned lba, unsigned nblk) { static unsigned c = 0x2d5c7c2f; if (!OPT_CHECK(RBX_QUIET)) { xputc(c = c << 8 | c >> 24); xputc('\b'); } v86.ctl = V86_ADDR | V86_CALLF | V86_FLAGS; v86.addr = XREADORG; /* call to xread in boot1 */ v86.es = VTOPSEG(buf); v86.eax = lba; v86.ebx = VTOPOFF(buf); v86.ecx = lba >> 16; v86.edx = nblk << 8 | dsk.drive; v86int(); v86.ctl = V86_FLAGS; if (V86_CY(v86.efl)) { printf("error %u lba %u\n", v86.eax >> 8 & 0xff, lba); return -1; } return 0; } static int keyhit(unsigned ticks) { uint32_t t0, t1; if (OPT_CHECK(RBX_NOINTR)) return 0; t0 = 0; for (;;) { if (xgetc(1)) return 1; t1 = *(uint32_t *)PTOV(0x46c); if (!t0) t0 = t1; if ((uint32_t)(t1 - t0) >= ticks) return 0; } } static int xputc(int c) { if (DO_KBD) putc(c); if (DO_SIO) sio_putc(c); return c; } static int getc(int fn) { v86.addr = 0x16; v86.eax = fn << 8; v86int(); return fn == 0 ? v86.eax & 0xff : !V86_ZR(v86.efl); } static int xgetc(int fn) { if (OPT_CHECK(RBX_NOINTR)) return 0; for (;;) { if (DO_KBD && getc(1)) return fn ? 1 : getc(0); if (DO_SIO && sio_ischar()) return fn ? 1 : sio_getc(); if (fn) return 0; } } Index: projects/make-check-sandbox/sys/dev/ksyms/ksyms.c =================================================================== --- projects/make-check-sandbox/sys/dev/ksyms/ksyms.c (revision 321993) +++ projects/make-check-sandbox/sys/dev/ksyms/ksyms.c (revision 321994) @@ -1,512 +1,512 @@ /*- * Copyright (c) 2008-2009, Stacey Son * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "linker_if.h" #define SHDR_NULL 0 #define SHDR_SYMTAB 1 #define SHDR_STRTAB 2 #define SHDR_SHSTRTAB 3 #define SHDR_NUM 4 #define STR_SYMTAB ".symtab" #define STR_STRTAB ".strtab" #define STR_SHSTRTAB ".shstrtab" #define KSYMS_DNAME "ksyms" static d_open_t ksyms_open; static d_read_t ksyms_read; static d_mmap_single_t ksyms_mmap_single; static struct cdevsw ksyms_cdevsw = { .d_version = D_VERSION, - .d_flags = D_TRACKCLOSE, + .d_flags = 0, .d_open = ksyms_open, .d_read = ksyms_read, .d_mmap_single = ksyms_mmap_single, .d_name = KSYMS_DNAME }; struct ksyms_softc { LIST_ENTRY(ksyms_softc) sc_list; vm_offset_t sc_uaddr; size_t sc_usize; vm_object_t sc_obj; vm_size_t sc_objsz; struct proc *sc_proc; }; static struct sx ksyms_mtx; static struct cdev *ksyms_dev; static LIST_HEAD(, ksyms_softc) ksyms_list = LIST_HEAD_INITIALIZER(ksyms_list); static const char ksyms_shstrtab[] = "\0" STR_SYMTAB "\0" STR_STRTAB "\0" STR_SHSTRTAB "\0"; struct ksyms_hdr { Elf_Ehdr kh_ehdr; Elf_Phdr kh_txtphdr; Elf_Phdr kh_datphdr; Elf_Shdr kh_shdr[SHDR_NUM]; char kh_shstrtab[sizeof(ksyms_shstrtab)]; }; struct tsizes { size_t ts_symsz; size_t ts_strsz; }; struct toffsets { struct ksyms_softc *to_sc; vm_offset_t to_symoff; vm_offset_t to_stroff; unsigned to_stridx; size_t to_resid; }; static MALLOC_DEFINE(M_KSYMS, "KSYMS", "Kernel Symbol Table"); /* * Get the symbol and string table sizes for a kernel module. Add it to the * running total. */ static int ksyms_size_permod(linker_file_t lf, void *arg) { struct tsizes *ts; const Elf_Sym *symtab; caddr_t strtab; long syms; ts = arg; syms = LINKER_SYMTAB_GET(lf, &symtab); ts->ts_symsz += syms * sizeof(Elf_Sym); ts->ts_strsz += LINKER_STRTAB_GET(lf, &strtab); return (0); } /* * For kernel module get the symbol and string table sizes, returning the * totals in *ts. */ static void ksyms_size_calc(struct tsizes *ts) { ts->ts_symsz = 0; ts->ts_strsz = 0; (void)linker_file_foreach(ksyms_size_permod, ts); } static int ksyms_emit(struct ksyms_softc *sc, void *buf, off_t off, size_t sz) { struct iovec iov; struct uio uio; iov.iov_base = buf; iov.iov_len = sz; uio.uio_iov = &iov; uio.uio_iovcnt = 1; uio.uio_offset = off; uio.uio_resid = (ssize_t)sz; uio.uio_segflg = UIO_SYSSPACE; uio.uio_rw = UIO_WRITE; uio.uio_td = curthread; return (uiomove_object(sc->sc_obj, sc->sc_objsz, &uio)); } #define SYMBLKSZ (256 * sizeof(Elf_Sym)) /* * For a kernel module, add the symbol and string tables into the * snapshot buffer. Fix up the offsets in the tables. */ static int ksyms_add(linker_file_t lf, void *arg) { char *buf; struct ksyms_softc *sc; struct toffsets *to; const Elf_Sym *symtab; Elf_Sym *symp; caddr_t strtab; size_t len, numsyms, strsz, symsz; linker_symval_t symval; int error, i, nsyms; buf = malloc(SYMBLKSZ, M_KSYMS, M_WAITOK); to = arg; sc = to->to_sc; MOD_SLOCK; numsyms = LINKER_SYMTAB_GET(lf, &symtab); strsz = LINKER_STRTAB_GET(lf, &strtab); symsz = numsyms * sizeof(Elf_Sym); while (symsz > 0) { len = min(SYMBLKSZ, symsz); bcopy(symtab, buf, len); /* * Fix up symbol table for kernel modules: * string offsets need adjusted * symbol values made absolute */ symp = (Elf_Sym *) buf; nsyms = len / sizeof(Elf_Sym); for (i = 0; i < nsyms; i++) { symp[i].st_name += to->to_stridx; if (lf->id > 1 && LINKER_SYMBOL_VALUES(lf, (c_linker_sym_t)&symtab[i], &symval) == 0) { symp[i].st_value = (uintptr_t)symval.value; } } if (len > to->to_resid) { MOD_SUNLOCK; free(buf, M_KSYMS); return (ENXIO); } to->to_resid -= len; error = ksyms_emit(sc, buf, to->to_symoff, len); to->to_symoff += len; if (error != 0) { MOD_SUNLOCK; free(buf, M_KSYMS); return (error); } symtab += nsyms; symsz -= len; } free(buf, M_KSYMS); MOD_SUNLOCK; if (strsz > to->to_resid) return (ENXIO); to->to_resid -= strsz; error = ksyms_emit(sc, strtab, to->to_stroff, strsz); to->to_stroff += strsz; to->to_stridx += strsz; return (error); } /* * Create a single ELF symbol table for the kernel and kernel modules loaded * at this time. Write this snapshot out in the process address space. Return * 0 on success, otherwise error. */ static int ksyms_snapshot(struct ksyms_softc *sc, struct tsizes *ts) { struct toffsets to; struct ksyms_hdr *hdr; int error; hdr = malloc(sizeof(*hdr), M_KSYMS, M_WAITOK | M_ZERO); /* * Create the ELF header. */ hdr->kh_ehdr.e_ident[EI_PAD] = 0; hdr->kh_ehdr.e_ident[EI_MAG0] = ELFMAG0; hdr->kh_ehdr.e_ident[EI_MAG1] = ELFMAG1; hdr->kh_ehdr.e_ident[EI_MAG2] = ELFMAG2; hdr->kh_ehdr.e_ident[EI_MAG3] = ELFMAG3; hdr->kh_ehdr.e_ident[EI_DATA] = ELF_DATA; hdr->kh_ehdr.e_ident[EI_OSABI] = ELFOSABI_FREEBSD; hdr->kh_ehdr.e_ident[EI_CLASS] = ELF_CLASS; hdr->kh_ehdr.e_ident[EI_VERSION] = EV_CURRENT; hdr->kh_ehdr.e_ident[EI_ABIVERSION] = 0; hdr->kh_ehdr.e_type = ET_EXEC; hdr->kh_ehdr.e_machine = ELF_ARCH; hdr->kh_ehdr.e_version = EV_CURRENT; hdr->kh_ehdr.e_entry = 0; hdr->kh_ehdr.e_phoff = offsetof(struct ksyms_hdr, kh_txtphdr); hdr->kh_ehdr.e_shoff = offsetof(struct ksyms_hdr, kh_shdr); hdr->kh_ehdr.e_flags = 0; hdr->kh_ehdr.e_ehsize = sizeof(Elf_Ehdr); hdr->kh_ehdr.e_phentsize = sizeof(Elf_Phdr); hdr->kh_ehdr.e_phnum = 2; /* Text and Data */ hdr->kh_ehdr.e_shentsize = sizeof(Elf_Shdr); hdr->kh_ehdr.e_shnum = SHDR_NUM; hdr->kh_ehdr.e_shstrndx = SHDR_SHSTRTAB; /* * Add both the text and data program headers. */ hdr->kh_txtphdr.p_type = PT_LOAD; /* XXX - is there a way to put the actual .text addr/size here? */ hdr->kh_txtphdr.p_vaddr = 0; hdr->kh_txtphdr.p_memsz = 0; hdr->kh_txtphdr.p_flags = PF_R | PF_X; hdr->kh_datphdr.p_type = PT_LOAD; /* XXX - is there a way to put the actual .data addr/size here? */ hdr->kh_datphdr.p_vaddr = 0; hdr->kh_datphdr.p_memsz = 0; hdr->kh_datphdr.p_flags = PF_R | PF_W | PF_X; /* * Add the section headers: null, symtab, strtab, shstrtab. */ /* First section header - null */ /* Second section header - symtab */ hdr->kh_shdr[SHDR_SYMTAB].sh_name = 1; /* String offset (skip null) */ hdr->kh_shdr[SHDR_SYMTAB].sh_type = SHT_SYMTAB; hdr->kh_shdr[SHDR_SYMTAB].sh_flags = 0; hdr->kh_shdr[SHDR_SYMTAB].sh_addr = 0; hdr->kh_shdr[SHDR_SYMTAB].sh_offset = sizeof(*hdr); hdr->kh_shdr[SHDR_SYMTAB].sh_size = ts->ts_symsz; hdr->kh_shdr[SHDR_SYMTAB].sh_link = SHDR_STRTAB; hdr->kh_shdr[SHDR_SYMTAB].sh_info = ts->ts_symsz / sizeof(Elf_Sym); hdr->kh_shdr[SHDR_SYMTAB].sh_addralign = sizeof(long); hdr->kh_shdr[SHDR_SYMTAB].sh_entsize = sizeof(Elf_Sym); /* Third section header - strtab */ hdr->kh_shdr[SHDR_STRTAB].sh_name = 1 + sizeof(STR_SYMTAB); hdr->kh_shdr[SHDR_STRTAB].sh_type = SHT_STRTAB; hdr->kh_shdr[SHDR_STRTAB].sh_flags = 0; hdr->kh_shdr[SHDR_STRTAB].sh_addr = 0; hdr->kh_shdr[SHDR_STRTAB].sh_offset = hdr->kh_shdr[SHDR_SYMTAB].sh_offset + ts->ts_symsz; hdr->kh_shdr[SHDR_STRTAB].sh_size = ts->ts_strsz; hdr->kh_shdr[SHDR_STRTAB].sh_link = 0; hdr->kh_shdr[SHDR_STRTAB].sh_info = 0; hdr->kh_shdr[SHDR_STRTAB].sh_addralign = sizeof(char); hdr->kh_shdr[SHDR_STRTAB].sh_entsize = 0; /* Fourth section - shstrtab */ hdr->kh_shdr[SHDR_SHSTRTAB].sh_name = 1 + sizeof(STR_SYMTAB) + sizeof(STR_STRTAB); hdr->kh_shdr[SHDR_SHSTRTAB].sh_type = SHT_STRTAB; hdr->kh_shdr[SHDR_SHSTRTAB].sh_flags = 0; hdr->kh_shdr[SHDR_SHSTRTAB].sh_addr = 0; hdr->kh_shdr[SHDR_SHSTRTAB].sh_offset = offsetof(struct ksyms_hdr, kh_shstrtab); hdr->kh_shdr[SHDR_SHSTRTAB].sh_size = sizeof(ksyms_shstrtab); hdr->kh_shdr[SHDR_SHSTRTAB].sh_link = 0; hdr->kh_shdr[SHDR_SHSTRTAB].sh_info = 0; hdr->kh_shdr[SHDR_SHSTRTAB].sh_addralign = 0 /* sizeof(char) */; hdr->kh_shdr[SHDR_SHSTRTAB].sh_entsize = 0; /* Copy shstrtab into the header. */ bcopy(ksyms_shstrtab, hdr->kh_shstrtab, sizeof(ksyms_shstrtab)); to.to_sc = sc; to.to_symoff = hdr->kh_shdr[SHDR_SYMTAB].sh_offset; to.to_stroff = hdr->kh_shdr[SHDR_STRTAB].sh_offset; to.to_stridx = 0; to.to_resid = sc->sc_objsz - sizeof(struct ksyms_hdr); /* emit header */ error = ksyms_emit(sc, hdr, 0, sizeof(*hdr)); free(hdr, M_KSYMS); if (error != 0) return (error); /* Add symbol and string tables for each kernel module. */ error = linker_file_foreach(ksyms_add, &to); if (error != 0) return (error); if (to.to_resid != 0) return (ENXIO); return (0); } static void ksyms_cdevpriv_dtr(void *data) { struct ksyms_softc *sc; vm_object_t obj; sc = (struct ksyms_softc *)data; sx_xlock(&ksyms_mtx); LIST_REMOVE(sc, sc_list); sx_xunlock(&ksyms_mtx); obj = sc->sc_obj; if (obj != NULL) vm_object_deallocate(obj); free(sc, M_KSYMS); } static int ksyms_open(struct cdev *dev, int flags, int fmt __unused, struct thread *td) { struct tsizes ts; struct ksyms_softc *sc; vm_size_t elfsz; int error, try; /* * Limit one open() per process. The process must close() * before open()'ing again. */ sx_xlock(&ksyms_mtx); LIST_FOREACH(sc, &ksyms_list, sc_list) { if (sc->sc_proc == td->td_proc) { sx_xunlock(&ksyms_mtx); return (EBUSY); } } sc = malloc(sizeof(*sc), M_KSYMS, M_WAITOK | M_ZERO); sc->sc_proc = td->td_proc; LIST_INSERT_HEAD(&ksyms_list, sc, sc_list); sx_xunlock(&ksyms_mtx); error = devfs_set_cdevpriv(sc, ksyms_cdevpriv_dtr); if (error != 0) { ksyms_cdevpriv_dtr(sc); return (error); } /* * MOD_SLOCK doesn't work here (because of a lock reversal with * KLD_SLOCK). Therefore, simply try up to 3 times to get a "clean" * snapshot of the kernel symbol table. This should work fine in the * rare case of a kernel module being loaded/unloaded at the same * time. */ for (try = 0; try < 3; try++) { ksyms_size_calc(&ts); elfsz = sizeof(struct ksyms_hdr) + ts.ts_symsz + ts.ts_strsz; sc->sc_obj = vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(round_page(elfsz))); sc->sc_objsz = elfsz; error = ksyms_snapshot(sc, &ts); if (error == 0) break; vm_object_deallocate(sc->sc_obj); sc->sc_obj = NULL; } return (error); } static int ksyms_read(struct cdev *dev, struct uio *uio, int flags __unused) { struct ksyms_softc *sc; int error; error = devfs_get_cdevpriv((void **)&sc); if (error != 0) return (error); return (uiomove_object(sc->sc_obj, sc->sc_objsz, uio)); } static int ksyms_mmap_single(struct cdev *dev, vm_ooffset_t *offset, vm_size_t size, vm_object_t *objp, int nprot) { struct ksyms_softc *sc; vm_object_t obj; int error; error = devfs_get_cdevpriv((void **)&sc); if (error != 0) return (error); if (*offset < 0 || *offset >= round_page(sc->sc_objsz) || size > round_page(sc->sc_objsz) - *offset || (nprot & ~PROT_READ) != 0) return (EINVAL); obj = sc->sc_obj; vm_object_reference(obj); *objp = obj; return (0); } static int ksyms_modevent(module_t mod __unused, int type, void *data __unused) { int error; error = 0; switch (type) { case MOD_LOAD: sx_init(&ksyms_mtx, "KSyms mtx"); ksyms_dev = make_dev(&ksyms_cdevsw, 0, UID_ROOT, GID_WHEEL, 0400, KSYMS_DNAME); break; case MOD_UNLOAD: if (!LIST_EMPTY(&ksyms_list)) return (EBUSY); destroy_dev(ksyms_dev); sx_destroy(&ksyms_mtx); break; case MOD_SHUTDOWN: break; default: error = EOPNOTSUPP; break; } return (error); } DEV_MODULE(ksyms, ksyms_modevent, NULL); MODULE_VERSION(ksyms, 1); Index: projects/make-check-sandbox/sys/dev/mlx5/mlx5_core/mlx5_cmd.c =================================================================== --- projects/make-check-sandbox/sys/dev/mlx5/mlx5_core/mlx5_cmd.c (revision 321993) +++ projects/make-check-sandbox/sys/dev/mlx5/mlx5_core/mlx5_cmd.c (revision 321994) @@ -1,1816 +1,1815 @@ /*- * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include "mlx5_core.h" static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size); static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg); static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg); enum { CMD_IF_REV = 5, }; enum { CMD_MODE_POLLING, CMD_MODE_EVENTS }; enum { NUM_LONG_LISTS = 2, NUM_MED_LISTS = 64, LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 + MLX5_CMD_DATA_BLOCK_SIZE, MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE, }; enum { MLX5_CMD_DELIVERY_STAT_OK = 0x0, MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1, MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2, MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3, MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4, MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5, MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6, MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7, MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8, MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9, MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10, }; static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in, int uin_size, struct mlx5_cmd_msg *out, void *uout, int uout_size, mlx5_cmd_cbk_t cbk, void *context, int page_queue) { gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL; struct mlx5_cmd_work_ent *ent; ent = kzalloc(sizeof(*ent), alloc_flags); if (!ent) return ERR_PTR(-ENOMEM); ent->in = in; ent->uin_size = uin_size; ent->out = out; ent->uout = uout; ent->uout_size = uout_size; ent->callback = cbk; ent->context = context; ent->cmd = cmd; ent->page_queue = page_queue; return ent; } static u8 alloc_token(struct mlx5_cmd *cmd) { u8 token; spin_lock(&cmd->token_lock); cmd->token++; if (cmd->token == 0) cmd->token++; token = cmd->token; spin_unlock(&cmd->token_lock); return token; } static int alloc_ent(struct mlx5_cmd_work_ent *ent) { unsigned long flags; struct mlx5_cmd *cmd = ent->cmd; struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd); int ret = cmd->max_reg_cmds; spin_lock_irqsave(&cmd->alloc_lock, flags); if (!ent->page_queue) { ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds); if (ret >= cmd->max_reg_cmds) ret = -1; } if (dev->state != MLX5_DEVICE_STATE_UP) ret = -1; if (ret != -1) { ent->busy = 1; ent->idx = ret; clear_bit(ent->idx, &cmd->bitmask); cmd->ent_arr[ent->idx] = ent; } spin_unlock_irqrestore(&cmd->alloc_lock, flags); return ret; } static void free_ent(struct mlx5_cmd *cmd, int idx) { unsigned long flags; spin_lock_irqsave(&cmd->alloc_lock, flags); set_bit(idx, &cmd->bitmask); spin_unlock_irqrestore(&cmd->alloc_lock, flags); } static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx) { return cmd->cmd_buf + (idx << cmd->log_stride); } static u8 xor8_buf(void *buf, int len) { u8 *ptr = buf; u8 sum = 0; int i; for (i = 0; i < len; i++) sum ^= ptr[i]; return sum; } static int verify_block_sig(struct mlx5_cmd_prot_block *block) { if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff) return -EINVAL; if (xor8_buf(block, sizeof(*block)) != 0xff) return -EINVAL; return 0; } static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token, int csum) { block->token = token; if (csum) { block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 2); block->sig = ~xor8_buf(block, sizeof(*block) - 1); } } static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum) { size_t i; for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) { struct mlx5_cmd_prot_block *block; block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE); /* compute signature */ calc_block_sig(block, token, csum); /* check for last block */ if (block->next == 0) break; } /* make sure data gets written to RAM */ mlx5_fwp_flush(msg); } static void set_signature(struct mlx5_cmd_work_ent *ent, int csum) { ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay)); calc_chain_sig(ent->in, ent->token, csum); calc_chain_sig(ent->out, ent->token, csum); } static void poll_timeout(struct mlx5_cmd_work_ent *ent) { struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev, cmd); int poll_end = jiffies + msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000); u8 own; do { own = ent->lay->status_own; if (!(own & CMD_OWNER_HW) || dev->state != MLX5_DEVICE_STATE_UP) { ent->ret = 0; return; } usleep_range(5000, 10000); } while (time_before(jiffies, poll_end)); ent->ret = -ETIMEDOUT; } static void free_cmd(struct mlx5_cmd_work_ent *ent) { kfree(ent); } static int verify_signature(struct mlx5_cmd_work_ent *ent) { struct mlx5_cmd_msg *msg = ent->out; size_t i; int err; u8 sig; sig = xor8_buf(ent->lay, sizeof(*ent->lay)); if (sig != 0xff) return -EINVAL; for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) { struct mlx5_cmd_prot_block *block; block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE); /* compute signature */ err = verify_block_sig(block); if (err != 0) return (err); /* check for last block */ if (block->next == 0) break; } return (0); } static void dump_buf(void *buf, int size, int data_only, int offset) { __be32 *p = buf; int i; for (i = 0; i < size; i += 16) { pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]), be32_to_cpu(p[1]), be32_to_cpu(p[2]), be32_to_cpu(p[3])); p += 4; offset += 16; } if (!data_only) pr_debug("\n"); } const char *mlx5_command_str(int command) { switch (command) { case MLX5_CMD_OP_QUERY_HCA_CAP: return "QUERY_HCA_CAP"; case MLX5_CMD_OP_SET_HCA_CAP: return "SET_HCA_CAP"; case MLX5_CMD_OP_QUERY_ADAPTER: return "QUERY_ADAPTER"; case MLX5_CMD_OP_INIT_HCA: return "INIT_HCA"; case MLX5_CMD_OP_TEARDOWN_HCA: return "TEARDOWN_HCA"; case MLX5_CMD_OP_ENABLE_HCA: return "MLX5_CMD_OP_ENABLE_HCA"; case MLX5_CMD_OP_DISABLE_HCA: return "MLX5_CMD_OP_DISABLE_HCA"; case MLX5_CMD_OP_QUERY_PAGES: return "QUERY_PAGES"; case MLX5_CMD_OP_MANAGE_PAGES: return "MANAGE_PAGES"; case MLX5_CMD_OP_QUERY_ISSI: return "QUERY_ISSI"; case MLX5_CMD_OP_SET_ISSI: return "SET_ISSI"; case MLX5_CMD_OP_CREATE_MKEY: return "CREATE_MKEY"; case MLX5_CMD_OP_QUERY_MKEY: return "QUERY_MKEY"; case MLX5_CMD_OP_DESTROY_MKEY: return "DESTROY_MKEY"; case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS: return "QUERY_SPECIAL_CONTEXTS"; case MLX5_CMD_OP_PAGE_FAULT_RESUME: return "PAGE_FAULT_RESUME"; case MLX5_CMD_OP_CREATE_EQ: return "CREATE_EQ"; case MLX5_CMD_OP_DESTROY_EQ: return "DESTROY_EQ"; case MLX5_CMD_OP_QUERY_EQ: return "QUERY_EQ"; case MLX5_CMD_OP_GEN_EQE: return "GEN_EQE"; case MLX5_CMD_OP_CREATE_CQ: return "CREATE_CQ"; case MLX5_CMD_OP_DESTROY_CQ: return "DESTROY_CQ"; case MLX5_CMD_OP_QUERY_CQ: return "QUERY_CQ"; case MLX5_CMD_OP_MODIFY_CQ: return "MODIFY_CQ"; case MLX5_CMD_OP_CREATE_QP: return "CREATE_QP"; case MLX5_CMD_OP_DESTROY_QP: return "DESTROY_QP"; case MLX5_CMD_OP_RST2INIT_QP: return "RST2INIT_QP"; case MLX5_CMD_OP_INIT2RTR_QP: return "INIT2RTR_QP"; case MLX5_CMD_OP_RTR2RTS_QP: return "RTR2RTS_QP"; case MLX5_CMD_OP_RTS2RTS_QP: return "RTS2RTS_QP"; case MLX5_CMD_OP_SQERR2RTS_QP: return "SQERR2RTS_QP"; case MLX5_CMD_OP_2ERR_QP: return "2ERR_QP"; case MLX5_CMD_OP_2RST_QP: return "2RST_QP"; case MLX5_CMD_OP_QUERY_QP: return "QUERY_QP"; case MLX5_CMD_OP_SQD_RTS_QP: return "SQD_RTS_QP"; case MLX5_CMD_OP_MAD_IFC: return "MAD_IFC"; case MLX5_CMD_OP_INIT2INIT_QP: return "INIT2INIT_QP"; case MLX5_CMD_OP_CREATE_PSV: return "CREATE_PSV"; case MLX5_CMD_OP_DESTROY_PSV: return "DESTROY_PSV"; case MLX5_CMD_OP_CREATE_SRQ: return "CREATE_SRQ"; case MLX5_CMD_OP_DESTROY_SRQ: return "DESTROY_SRQ"; case MLX5_CMD_OP_QUERY_SRQ: return "QUERY_SRQ"; case MLX5_CMD_OP_ARM_RQ: return "ARM_RQ"; case MLX5_CMD_OP_CREATE_XRC_SRQ: return "CREATE_XRC_SRQ"; case MLX5_CMD_OP_DESTROY_XRC_SRQ: return "DESTROY_XRC_SRQ"; case MLX5_CMD_OP_QUERY_XRC_SRQ: return "QUERY_XRC_SRQ"; case MLX5_CMD_OP_ARM_XRC_SRQ: return "ARM_XRC_SRQ"; case MLX5_CMD_OP_CREATE_DCT: return "CREATE_DCT"; case MLX5_CMD_OP_SET_DC_CNAK_TRACE: return "SET_DC_CNAK_TRACE"; case MLX5_CMD_OP_DESTROY_DCT: return "DESTROY_DCT"; case MLX5_CMD_OP_DRAIN_DCT: return "DRAIN_DCT"; case MLX5_CMD_OP_QUERY_DCT: return "QUERY_DCT"; case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: return "ARM_DCT_FOR_KEY_VIOLATION"; case MLX5_CMD_OP_QUERY_VPORT_STATE: return "QUERY_VPORT_STATE"; case MLX5_CMD_OP_MODIFY_VPORT_STATE: return "MODIFY_VPORT_STATE"; case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT: return "QUERY_ESW_VPORT_CONTEXT"; case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT: return "MODIFY_ESW_VPORT_CONTEXT"; case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT: return "QUERY_NIC_VPORT_CONTEXT"; case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT: return "MODIFY_NIC_VPORT_CONTEXT"; case MLX5_CMD_OP_QUERY_ROCE_ADDRESS: return "QUERY_ROCE_ADDRESS"; case MLX5_CMD_OP_SET_ROCE_ADDRESS: return "SET_ROCE_ADDRESS"; case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT: return "QUERY_HCA_VPORT_CONTEXT"; case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT: return "MODIFY_HCA_VPORT_CONTEXT"; case MLX5_CMD_OP_QUERY_HCA_VPORT_GID: return "QUERY_HCA_VPORT_GID"; case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY: return "QUERY_HCA_VPORT_PKEY"; case MLX5_CMD_OP_QUERY_VPORT_COUNTER: return "QUERY_VPORT_COUNTER"; case MLX5_CMD_OP_SET_WOL_ROL: return "SET_WOL_ROL"; case MLX5_CMD_OP_QUERY_WOL_ROL: return "QUERY_WOL_ROL"; case MLX5_CMD_OP_ALLOC_Q_COUNTER: return "ALLOC_Q_COUNTER"; case MLX5_CMD_OP_DEALLOC_Q_COUNTER: return "DEALLOC_Q_COUNTER"; case MLX5_CMD_OP_QUERY_Q_COUNTER: return "QUERY_Q_COUNTER"; case MLX5_CMD_OP_ALLOC_PD: return "ALLOC_PD"; case MLX5_CMD_OP_DEALLOC_PD: return "DEALLOC_PD"; case MLX5_CMD_OP_ALLOC_UAR: return "ALLOC_UAR"; case MLX5_CMD_OP_DEALLOC_UAR: return "DEALLOC_UAR"; case MLX5_CMD_OP_CONFIG_INT_MODERATION: return "CONFIG_INT_MODERATION"; case MLX5_CMD_OP_ATTACH_TO_MCG: return "ATTACH_TO_MCG"; case MLX5_CMD_OP_DETACH_FROM_MCG: return "DETACH_FROM_MCG"; case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG: return "GET_DROPPED_PACKET_LOG"; case MLX5_CMD_OP_QUERY_MAD_DEMUX: return "QUERY_MAD_DEMUX"; case MLX5_CMD_OP_SET_MAD_DEMUX: return "SET_MAD_DEMUX"; case MLX5_CMD_OP_NOP: return "NOP"; case MLX5_CMD_OP_ALLOC_XRCD: return "ALLOC_XRCD"; case MLX5_CMD_OP_DEALLOC_XRCD: return "DEALLOC_XRCD"; case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN: return "ALLOC_TRANSPORT_DOMAIN"; case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN: return "DEALLOC_TRANSPORT_DOMAIN"; case MLX5_CMD_OP_QUERY_CONG_STATUS: return "QUERY_CONG_STATUS"; case MLX5_CMD_OP_MODIFY_CONG_STATUS: return "MODIFY_CONG_STATUS"; case MLX5_CMD_OP_QUERY_CONG_PARAMS: return "QUERY_CONG_PARAMS"; case MLX5_CMD_OP_MODIFY_CONG_PARAMS: return "MODIFY_CONG_PARAMS"; case MLX5_CMD_OP_QUERY_CONG_STATISTICS: return "QUERY_CONG_STATISTICS"; case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: return "ADD_VXLAN_UDP_DPORT"; case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT: return "DELETE_VXLAN_UDP_DPORT"; case MLX5_CMD_OP_SET_L2_TABLE_ENTRY: return "SET_L2_TABLE_ENTRY"; case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY: return "QUERY_L2_TABLE_ENTRY"; case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY: return "DELETE_L2_TABLE_ENTRY"; case MLX5_CMD_OP_CREATE_RMP: return "CREATE_RMP"; case MLX5_CMD_OP_MODIFY_RMP: return "MODIFY_RMP"; case MLX5_CMD_OP_DESTROY_RMP: return "DESTROY_RMP"; case MLX5_CMD_OP_QUERY_RMP: return "QUERY_RMP"; case MLX5_CMD_OP_CREATE_RQT: return "CREATE_RQT"; case MLX5_CMD_OP_MODIFY_RQT: return "MODIFY_RQT"; case MLX5_CMD_OP_DESTROY_RQT: return "DESTROY_RQT"; case MLX5_CMD_OP_QUERY_RQT: return "QUERY_RQT"; case MLX5_CMD_OP_ACCESS_REG: return "MLX5_CMD_OP_ACCESS_REG"; case MLX5_CMD_OP_CREATE_SQ: return "CREATE_SQ"; case MLX5_CMD_OP_MODIFY_SQ: return "MODIFY_SQ"; case MLX5_CMD_OP_DESTROY_SQ: return "DESTROY_SQ"; case MLX5_CMD_OP_QUERY_SQ: return "QUERY_SQ"; case MLX5_CMD_OP_CREATE_RQ: return "CREATE_RQ"; case MLX5_CMD_OP_MODIFY_RQ: return "MODIFY_RQ"; case MLX5_CMD_OP_DESTROY_RQ: return "DESTROY_RQ"; case MLX5_CMD_OP_QUERY_RQ: return "QUERY_RQ"; case MLX5_CMD_OP_CREATE_TIR: return "CREATE_TIR"; case MLX5_CMD_OP_MODIFY_TIR: return "MODIFY_TIR"; case MLX5_CMD_OP_DESTROY_TIR: return "DESTROY_TIR"; case MLX5_CMD_OP_QUERY_TIR: return "QUERY_TIR"; case MLX5_CMD_OP_CREATE_TIS: return "CREATE_TIS"; case MLX5_CMD_OP_MODIFY_TIS: return "MODIFY_TIS"; case MLX5_CMD_OP_DESTROY_TIS: return "DESTROY_TIS"; case MLX5_CMD_OP_QUERY_TIS: return "QUERY_TIS"; case MLX5_CMD_OP_CREATE_FLOW_TABLE: return "CREATE_FLOW_TABLE"; case MLX5_CMD_OP_DESTROY_FLOW_TABLE: return "DESTROY_FLOW_TABLE"; case MLX5_CMD_OP_QUERY_FLOW_TABLE: return "QUERY_FLOW_TABLE"; case MLX5_CMD_OP_CREATE_FLOW_GROUP: return "CREATE_FLOW_GROUP"; case MLX5_CMD_OP_DESTROY_FLOW_GROUP: return "DESTROY_FLOW_GROUP"; case MLX5_CMD_OP_QUERY_FLOW_GROUP: return "QUERY_FLOW_GROUP"; case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: return "SET_FLOW_TABLE_ENTRY"; case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: return "QUERY_FLOW_TABLE_ENTRY"; case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY: return "DELETE_FLOW_TABLE_ENTRY"; case MLX5_CMD_OP_SET_DIAGNOSTICS: return "MLX5_CMD_OP_SET_DIAGNOSTICS"; case MLX5_CMD_OP_QUERY_DIAGNOSTICS: return "MLX5_CMD_OP_QUERY_DIAGNOSTICS"; default: return "unknown command opcode"; } } static void dump_command(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent, int input) { u16 op = be16_to_cpu(((struct mlx5_inbox_hdr *)(ent->lay->in))->opcode); struct mlx5_cmd_msg *msg = input ? ent->in : ent->out; size_t i; int data_only; int offset = 0; int msg_len = input ? ent->uin_size : ent->uout_size; int dump_len; data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA)); if (data_only) mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA, "dump command data %s(0x%x) %s\n", mlx5_command_str(op), op, input ? "INPUT" : "OUTPUT"); else mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n", mlx5_command_str(op), op, input ? "INPUT" : "OUTPUT"); if (data_only) { if (input) { dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset); offset += sizeof(ent->lay->in); } else { dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset); offset += sizeof(ent->lay->out); } } else { dump_buf(ent->lay, sizeof(*ent->lay), 0, offset); offset += sizeof(*ent->lay); } for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) { struct mlx5_cmd_prot_block *block; block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE); if (data_only) { if (offset >= msg_len) break; dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg_len - offset); dump_buf(block->data, dump_len, 1, offset); offset += MLX5_CMD_DATA_BLOCK_SIZE; } else { mlx5_core_dbg(dev, "command block:\n"); dump_buf(block, sizeof(*block), 0, offset); offset += sizeof(*block); } /* check for last block */ if (block->next == 0) break; } if (data_only) pr_debug("\n"); } static int set_internal_err_outbox(struct mlx5_core_dev *dev, u16 opcode, struct mlx5_outbox_hdr *hdr) { hdr->status = 0; hdr->syndrome = 0; switch (opcode) { case MLX5_CMD_OP_TEARDOWN_HCA: case MLX5_CMD_OP_DISABLE_HCA: case MLX5_CMD_OP_MANAGE_PAGES: case MLX5_CMD_OP_DESTROY_MKEY: case MLX5_CMD_OP_DESTROY_EQ: case MLX5_CMD_OP_DESTROY_CQ: case MLX5_CMD_OP_DESTROY_QP: case MLX5_CMD_OP_DESTROY_PSV: case MLX5_CMD_OP_DESTROY_SRQ: case MLX5_CMD_OP_DESTROY_XRC_SRQ: case MLX5_CMD_OP_DESTROY_DCT: case MLX5_CMD_OP_DEALLOC_Q_COUNTER: case MLX5_CMD_OP_DEALLOC_PD: case MLX5_CMD_OP_DEALLOC_UAR: case MLX5_CMD_OP_DETACH_FROM_MCG: case MLX5_CMD_OP_DEALLOC_XRCD: case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN: case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT: case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY: case MLX5_CMD_OP_DESTROY_LAG: case MLX5_CMD_OP_DESTROY_VPORT_LAG: case MLX5_CMD_OP_DESTROY_TIR: case MLX5_CMD_OP_DESTROY_SQ: case MLX5_CMD_OP_DESTROY_RQ: case MLX5_CMD_OP_DESTROY_RMP: case MLX5_CMD_OP_DESTROY_TIS: case MLX5_CMD_OP_DESTROY_RQT: case MLX5_CMD_OP_DESTROY_FLOW_TABLE: case MLX5_CMD_OP_DESTROY_FLOW_GROUP: case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY: case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER: case MLX5_CMD_OP_2ERR_QP: case MLX5_CMD_OP_2RST_QP: case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT: case MLX5_CMD_OP_MODIFY_FLOW_TABLE: case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT: case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER: case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT: case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT: case MLX5_CMD_OP_MODIFY_VPORT_STATE: case MLX5_CMD_OP_MODIFY_SQ: case MLX5_CMD_OP_MODIFY_RQ: case MLX5_CMD_OP_MODIFY_TIS: case MLX5_CMD_OP_MODIFY_LAG: case MLX5_CMD_OP_MODIFY_TIR: case MLX5_CMD_OP_MODIFY_RMP: case MLX5_CMD_OP_MODIFY_RQT: case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT: case MLX5_CMD_OP_MODIFY_CONG_PARAMS: case MLX5_CMD_OP_MODIFY_CONG_STATUS: case MLX5_CMD_OP_MODIFY_CQ: case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT: case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT: case MLX5_CMD_OP_MODIFY_OTHER_HCA_CAP: case MLX5_CMD_OP_ACCESS_REG: case MLX5_CMD_OP_DRAIN_DCT: return 0; case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: case MLX5_CMD_OP_ALLOC_ENCAP_HEADER: case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: case MLX5_CMD_OP_ALLOC_PD: case MLX5_CMD_OP_ALLOC_Q_COUNTER: case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN: case MLX5_CMD_OP_ALLOC_UAR: case MLX5_CMD_OP_ALLOC_XRCD: case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: case MLX5_CMD_OP_ARM_RQ: case MLX5_CMD_OP_ARM_XRC_SRQ: case MLX5_CMD_OP_ATTACH_TO_MCG: case MLX5_CMD_OP_CONFIG_INT_MODERATION: case MLX5_CMD_OP_CREATE_CQ: case MLX5_CMD_OP_CREATE_DCT: case MLX5_CMD_OP_CREATE_EQ: case MLX5_CMD_OP_CREATE_FLOW_GROUP: case MLX5_CMD_OP_CREATE_FLOW_TABLE: case MLX5_CMD_OP_CREATE_LAG: case MLX5_CMD_OP_CREATE_MKEY: case MLX5_CMD_OP_CREATE_PSV: case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT: case MLX5_CMD_OP_CREATE_QP: case MLX5_CMD_OP_CREATE_RMP: case MLX5_CMD_OP_CREATE_RQ: case MLX5_CMD_OP_CREATE_RQT: case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT: case MLX5_CMD_OP_CREATE_SQ: case MLX5_CMD_OP_CREATE_SRQ: case MLX5_CMD_OP_CREATE_TIR: case MLX5_CMD_OP_CREATE_TIS: case MLX5_CMD_OP_CREATE_VPORT_LAG: case MLX5_CMD_OP_CREATE_XRC_SRQ: case MLX5_CMD_OP_ENABLE_HCA: case MLX5_CMD_OP_GEN_EQE: case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG: case MLX5_CMD_OP_INIT2INIT_QP: case MLX5_CMD_OP_INIT2RTR_QP: case MLX5_CMD_OP_INIT_HCA: case MLX5_CMD_OP_MAD_IFC: case MLX5_CMD_OP_NOP: case MLX5_CMD_OP_PAGE_FAULT_RESUME: case MLX5_CMD_OP_QUERY_ADAPTER: case MLX5_CMD_OP_QUERY_CONG_PARAMS: case MLX5_CMD_OP_QUERY_CONG_STATISTICS: case MLX5_CMD_OP_QUERY_CONG_STATUS: case MLX5_CMD_OP_QUERY_CQ: case MLX5_CMD_OP_QUERY_DCT: case MLX5_CMD_OP_QUERY_EQ: case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT: case MLX5_CMD_OP_QUERY_FLOW_COUNTER: case MLX5_CMD_OP_QUERY_FLOW_GROUP: case MLX5_CMD_OP_QUERY_FLOW_TABLE: case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: case MLX5_CMD_OP_QUERY_HCA_CAP: case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT: case MLX5_CMD_OP_QUERY_HCA_VPORT_GID: case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY: case MLX5_CMD_OP_QUERY_ISSI: case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY: case MLX5_CMD_OP_QUERY_LAG: case MLX5_CMD_OP_QUERY_MAD_DEMUX: case MLX5_CMD_OP_QUERY_MKEY: case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT: case MLX5_CMD_OP_QUERY_OTHER_HCA_CAP: case MLX5_CMD_OP_QUERY_PAGES: case MLX5_CMD_OP_QUERY_QP: case MLX5_CMD_OP_QUERY_Q_COUNTER: case MLX5_CMD_OP_QUERY_RMP: case MLX5_CMD_OP_QUERY_ROCE_ADDRESS: case MLX5_CMD_OP_QUERY_RQ: case MLX5_CMD_OP_QUERY_RQT: case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT: case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS: case MLX5_CMD_OP_QUERY_SQ: case MLX5_CMD_OP_QUERY_SRQ: case MLX5_CMD_OP_QUERY_TIR: case MLX5_CMD_OP_QUERY_TIS: case MLX5_CMD_OP_QUERY_VPORT_COUNTER: case MLX5_CMD_OP_QUERY_VPORT_STATE: case MLX5_CMD_OP_QUERY_XRC_SRQ: case MLX5_CMD_OP_RST2INIT_QP: case MLX5_CMD_OP_RTR2RTS_QP: case MLX5_CMD_OP_RTS2RTS_QP: case MLX5_CMD_OP_SET_DC_CNAK_TRACE: case MLX5_CMD_OP_SET_HCA_CAP: case MLX5_CMD_OP_SET_ISSI: case MLX5_CMD_OP_SET_L2_TABLE_ENTRY: case MLX5_CMD_OP_SET_MAD_DEMUX: case MLX5_CMD_OP_SET_ROCE_ADDRESS: case MLX5_CMD_OP_SQD_RTS_QP: case MLX5_CMD_OP_SQERR2RTS_QP: hdr->status = MLX5_CMD_STAT_INT_ERR; hdr->syndrome = 0xFFFFFFFF; return -ECANCELED; default: mlx5_core_err(dev, "Unknown FW command (%d)\n", opcode); return -EINVAL; } } static void complete_command(struct mlx5_cmd_work_ent *ent) { struct mlx5_cmd *cmd = ent->cmd; struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd); mlx5_cmd_cbk_t callback; void *context; s64 ds; struct mlx5_cmd_stats *stats; unsigned long flags; int err; struct semaphore *sem; if (ent->page_queue) sem = &cmd->pages_sem; else sem = &cmd->sem; if (dev->state != MLX5_DEVICE_STATE_UP) { struct mlx5_outbox_hdr *out_hdr = (struct mlx5_outbox_hdr *)ent->out; struct mlx5_inbox_hdr *in_hdr = (struct mlx5_inbox_hdr *)(ent->in->first.data); u16 opcode = be16_to_cpu(in_hdr->opcode); ent->ret = set_internal_err_outbox(dev, opcode, out_hdr); } if (ent->callback) { ds = ent->ts2 - ent->ts1; if (ent->op < ARRAY_SIZE(cmd->stats)) { stats = &cmd->stats[ent->op]; spin_lock_irqsave(&stats->lock, flags); stats->sum += ds; ++stats->n; spin_unlock_irqrestore(&stats->lock, flags); } callback = ent->callback; context = ent->context; err = ent->ret; if (!err) err = mlx5_copy_from_msg(ent->uout, ent->out, ent->uout_size); mlx5_free_cmd_msg(dev, ent->out); free_msg(dev, ent->in); free_cmd(ent); callback(err, context); } else { complete(&ent->done); } up(sem); } static void cmd_work_handler(struct work_struct *work) { struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); struct mlx5_cmd *cmd = ent->cmd; struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd); struct mlx5_cmd_layout *lay; struct semaphore *sem; sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; if (cmd->moving_to_polling) { mlx5_core_warn(dev, "not expecting command execution, ignoring...\n"); return; } down(sem); if (alloc_ent(ent) < 0) { complete_command(ent); return; } ent->token = alloc_token(cmd); lay = get_inst(cmd, ent->idx); ent->lay = lay; memset(lay, 0, sizeof(*lay)); memcpy(lay->in, ent->in->first.data, sizeof(lay->in)); ent->op = be32_to_cpu(lay->in[0]) >> 16; if (ent->in->numpages != 0) lay->in_ptr = cpu_to_be64(mlx5_fwp_get_dma(ent->in, 0)); if (ent->out->numpages != 0) lay->out_ptr = cpu_to_be64(mlx5_fwp_get_dma(ent->out, 0)); lay->inlen = cpu_to_be32(ent->uin_size); lay->outlen = cpu_to_be32(ent->uout_size); lay->type = MLX5_PCI_CMD_XPORT; lay->token = ent->token; lay->status_own = CMD_OWNER_HW; set_signature(ent, !cmd->checksum_disabled); dump_command(dev, ent, 1); ent->ts1 = ktime_get_ns(); ent->busy = 0; /* ring doorbell after the descriptor is valid */ mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); /* make sure data is written to RAM */ mlx5_fwp_flush(cmd->cmd_page); iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); mmiowb(); /* if not in polling don't use ent after this point*/ if (cmd->mode == CMD_MODE_POLLING) { poll_timeout(ent); /* make sure we read the descriptor after ownership is SW */ mlx5_cmd_comp_handler(dev, 1U << ent->idx); } } static const char *deliv_status_to_str(u8 status) { switch (status) { case MLX5_CMD_DELIVERY_STAT_OK: return "no errors"; case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR: return "signature error"; case MLX5_CMD_DELIVERY_STAT_TOK_ERR: return "token error"; case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR: return "bad block number"; case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR: return "output pointer not aligned to block size"; case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR: return "input pointer not aligned to block size"; case MLX5_CMD_DELIVERY_STAT_FW_ERR: return "firmware internal error"; case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR: return "command input length error"; case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR: return "command ouput length error"; case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR: return "reserved fields not cleared"; case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR: return "bad command descriptor type"; default: return "unknown status code"; } } static u16 msg_to_opcode(struct mlx5_cmd_msg *in) { struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data); return be16_to_cpu(hdr->opcode); } static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) { int timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC); struct mlx5_cmd *cmd = &dev->cmd; int err; if (cmd->mode == CMD_MODE_POLLING) { wait_for_completion(&ent->done); err = ent->ret; } else { if (!wait_for_completion_timeout(&ent->done, timeout)) err = -ETIMEDOUT; else err = 0; } if (err == -ETIMEDOUT) { mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); } mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", err, deliv_status_to_str(ent->status), ent->status); return err; } /* Notes: * 1. Callback functions may not sleep * 2. page queue commands do not support asynchrous completion */ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, int uin_size, struct mlx5_cmd_msg *out, void *uout, int uout_size, mlx5_cmd_cbk_t callback, void *context, int page_queue, u8 *status) { struct mlx5_cmd *cmd = &dev->cmd; struct mlx5_cmd_work_ent *ent; struct mlx5_cmd_stats *stats; int err = 0; s64 ds; u16 op; if (callback && page_queue) return -EINVAL; ent = alloc_cmd(cmd, in, uin_size, out, uout, uout_size, callback, context, page_queue); if (IS_ERR(ent)) return PTR_ERR(ent); if (!callback) init_completion(&ent->done); INIT_WORK(&ent->work, cmd_work_handler); if (page_queue) { cmd_work_handler(&ent->work); } else if (!queue_work(cmd->wq, &ent->work)) { mlx5_core_warn(dev, "failed to queue work\n"); err = -ENOMEM; goto out_free; } if (!callback) { err = wait_func(dev, ent); if (err == -ETIMEDOUT) goto out; ds = ent->ts2 - ent->ts1; op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode); if (op < ARRAY_SIZE(cmd->stats)) { stats = &cmd->stats[op]; spin_lock_irq(&stats->lock); stats->sum += ds; ++stats->n; spin_unlock_irq(&stats->lock); } mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME, "fw exec time for %s is %lld nsec\n", mlx5_command_str(op), (long long)ds); *status = ent->status; free_cmd(ent); } return err; out_free: free_cmd(ent); out: return err; } static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, size_t size) { size_t delta; size_t i; if (to == NULL || from == NULL) return (-ENOMEM); delta = min_t(size_t, size, sizeof(to->first.data)); memcpy(to->first.data, from, delta); from = (char *)from + delta; size -= delta; for (i = 0; size != 0; i++) { struct mlx5_cmd_prot_block *block; block = mlx5_fwp_get_virt(to, i * MLX5_CMD_MBOX_SIZE); delta = min_t(size_t, size, MLX5_CMD_DATA_BLOCK_SIZE); memcpy(block->data, from, delta); from = (char *)from + delta; size -= delta; } return (0); } static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size) { size_t delta; size_t i; if (to == NULL || from == NULL) return (-ENOMEM); delta = min_t(size_t, size, sizeof(from->first.data)); memcpy(to, from->first.data, delta); to = (char *)to + delta; size -= delta; for (i = 0; size != 0; i++) { struct mlx5_cmd_prot_block *block; block = mlx5_fwp_get_virt(from, i * MLX5_CMD_MBOX_SIZE); delta = min_t(size_t, size, MLX5_CMD_DATA_BLOCK_SIZE); memcpy(to, block->data, delta); to = (char *)to + delta; size -= delta; } return (0); } static struct mlx5_cmd_msg * mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, gfp_t flags, size_t size) { struct mlx5_cmd_msg *msg; size_t blen; size_t n; size_t i; blen = size - min_t(size_t, sizeof(msg->first.data), size); n = howmany(blen, MLX5_CMD_DATA_BLOCK_SIZE); msg = mlx5_fwp_alloc(dev, flags, howmany(n, MLX5_NUM_CMDS_IN_ADAPTER_PAGE)); if (msg == NULL) return (ERR_PTR(-ENOMEM)); for (i = 0; i != n; i++) { struct mlx5_cmd_prot_block *block; block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE); memset(block, 0, MLX5_CMD_MBOX_SIZE); if (i != (n - 1)) { u64 dma = mlx5_fwp_get_dma(msg, (i + 1) * MLX5_CMD_MBOX_SIZE); block->next = cpu_to_be64(dma); } block->block_num = cpu_to_be32(i); } /* make sure initial data is written to RAM */ mlx5_fwp_flush(msg); return (msg); } static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) { mlx5_fwp_free(msg); } static void set_wqname(struct mlx5_core_dev *dev) { struct mlx5_cmd *cmd = &dev->cmd; snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s", dev_name(&dev->pdev->dev)); } static void clean_debug_files(struct mlx5_core_dev *dev) { } void mlx5_cmd_use_events(struct mlx5_core_dev *dev) { struct mlx5_cmd *cmd = &dev->cmd; int i; for (i = 0; i < cmd->max_reg_cmds; i++) down(&cmd->sem); down(&cmd->pages_sem); flush_workqueue(cmd->wq); cmd->mode = CMD_MODE_EVENTS; up(&cmd->pages_sem); for (i = 0; i < cmd->max_reg_cmds; i++) up(&cmd->sem); } void mlx5_cmd_use_polling(struct mlx5_core_dev *dev) { struct mlx5_cmd *cmd = &dev->cmd; synchronize_irq(dev->priv.eq_table.pages_eq.irqn); flush_workqueue(dev->priv.pg_wq); cmd->moving_to_polling = 1; flush_workqueue(cmd->wq); cmd->mode = CMD_MODE_POLLING; cmd->moving_to_polling = 0; } static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) { unsigned long flags; if (msg->cache) { spin_lock_irqsave(&msg->cache->lock, flags); list_add_tail(&msg->list, &msg->cache->head); spin_unlock_irqrestore(&msg->cache->lock, flags); } else { mlx5_free_cmd_msg(dev, msg); } } void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u32 vector) { struct mlx5_cmd *cmd = &dev->cmd; struct mlx5_cmd_work_ent *ent; int i; /* make sure data gets read from RAM */ mlx5_fwp_invalidate(cmd->cmd_page); while (vector != 0) { i = ffs(vector) - 1; vector &= ~(1U << i); ent = cmd->ent_arr[i]; ent->ts2 = ktime_get_ns(); memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out)); /* make sure data gets read from RAM */ mlx5_fwp_invalidate(ent->out); dump_command(dev, ent, 0); if (!ent->ret) { if (!cmd->checksum_disabled) ent->ret = verify_signature(ent); else ent->ret = 0; ent->status = ent->lay->status_own >> 1; mlx5_core_dbg(dev, "FW command ret 0x%x, status %s(0x%x)\n", ent->ret, deliv_status_to_str(ent->status), ent->status); } free_ent(cmd, ent->idx); complete_command(ent); } } EXPORT_SYMBOL(mlx5_cmd_comp_handler); void mlx5_trigger_cmd_completions(struct mlx5_core_dev *dev) { unsigned long vector; int i = 0; unsigned long flags; synchronize_irq(dev->priv.eq_table.cmd_eq.irqn); spin_lock_irqsave(&dev->cmd.alloc_lock, flags); vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1); spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); if (!vector) return; for (i = 0; i < (1 << dev->cmd.log_sz); i++) { struct mlx5_cmd_work_ent *ent = dev->cmd.ent_arr[i]; if (!test_bit(i, &vector)) continue; while (ent->busy) usleep_range(1000, 1100); free_ent(&dev->cmd, i); complete_command(ent); } } EXPORT_SYMBOL(mlx5_trigger_cmd_completions); static int status_to_err(u8 status) { return status ? -1 : 0; /* TBD more meaningful codes */ } static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size, gfp_t gfp) { struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM); struct mlx5_cmd *cmd = &dev->cmd; struct cache_ent *ent = NULL; if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE) ent = &cmd->cache.large; else if (in_size > 16 && in_size <= MED_LIST_SIZE) ent = &cmd->cache.med; if (ent) { spin_lock_irq(&ent->lock); if (!list_empty(&ent->head)) { msg = list_entry(ent->head.next, struct mlx5_cmd_msg, list); list_del(&msg->list); } spin_unlock_irq(&ent->lock); } if (IS_ERR(msg)) msg = mlx5_alloc_cmd_msg(dev, gfp, in_size); return msg; } static int is_manage_pages(struct mlx5_inbox_hdr *in) { return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES; } static int cmd_exec_helper(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size, mlx5_cmd_cbk_t callback, void *context) { struct mlx5_cmd_msg *inb; struct mlx5_cmd_msg *outb; int pages_queue; - gfp_t gfp; + const gfp_t gfp = GFP_KERNEL; int err; u8 status = 0; pages_queue = is_manage_pages(in); - gfp = callback ? GFP_ATOMIC : GFP_KERNEL; inb = alloc_msg(dev, in_size, gfp); if (IS_ERR(inb)) { err = PTR_ERR(inb); return err; } err = mlx5_copy_to_msg(inb, in, in_size); if (err) { mlx5_core_warn(dev, "err %d\n", err); goto out_in; } outb = mlx5_alloc_cmd_msg(dev, gfp, out_size); if (IS_ERR(outb)) { err = PTR_ERR(outb); goto out_in; } err = mlx5_cmd_invoke(dev, inb, in_size, outb, out, out_size, callback, context, pages_queue, &status); if (err) { if (err == -ETIMEDOUT) return err; goto out_out; } mlx5_core_dbg(dev, "err %d, status %d\n", err, status); if (status) { err = status_to_err(status); goto out_out; } if (callback) return err; err = mlx5_copy_from_msg(out, outb, out_size); out_out: mlx5_free_cmd_msg(dev, outb); out_in: free_msg(dev, inb); return err; } int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size) { return cmd_exec_helper(dev, in, in_size, out, out_size, NULL, NULL); } EXPORT_SYMBOL(mlx5_cmd_exec); int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size, mlx5_cmd_cbk_t callback, void *context) { return cmd_exec_helper(dev, in, in_size, out, out_size, callback, context); } EXPORT_SYMBOL(mlx5_cmd_exec_cb); static void destroy_msg_cache(struct mlx5_core_dev *dev) { struct mlx5_cmd *cmd = &dev->cmd; struct mlx5_cmd_msg *msg; struct mlx5_cmd_msg *n; list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) { list_del(&msg->list); mlx5_free_cmd_msg(dev, msg); } list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) { list_del(&msg->list); mlx5_free_cmd_msg(dev, msg); } } static int create_msg_cache(struct mlx5_core_dev *dev) { struct mlx5_cmd *cmd = &dev->cmd; struct mlx5_cmd_msg *msg; int err; int i; spin_lock_init(&cmd->cache.large.lock); INIT_LIST_HEAD(&cmd->cache.large.head); spin_lock_init(&cmd->cache.med.lock); INIT_LIST_HEAD(&cmd->cache.med.head); for (i = 0; i < NUM_LONG_LISTS; i++) { msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE); if (IS_ERR(msg)) { err = PTR_ERR(msg); goto ex_err; } msg->cache = &cmd->cache.large; list_add_tail(&msg->list, &cmd->cache.large.head); } for (i = 0; i < NUM_MED_LISTS; i++) { msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE); if (IS_ERR(msg)) { err = PTR_ERR(msg); goto ex_err; } msg->cache = &cmd->cache.med; list_add_tail(&msg->list, &cmd->cache.med.head); } return 0; ex_err: destroy_msg_cache(dev); return err; } static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) { int err; sx_init(&cmd->dma_sx, "MLX5-DMA-SX"); mtx_init(&cmd->dma_mtx, "MLX5-DMA-MTX", NULL, MTX_DEF); cv_init(&cmd->dma_cv, "MLX5-DMA-CV"); /* * Create global DMA descriptor tag for allocating * 4K firmware pages: */ err = -bus_dma_tag_create( bus_get_dma_tag(dev->pdev->dev.bsddev), MLX5_ADAPTER_PAGE_SIZE, /* alignment */ 0, /* no boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MLX5_ADAPTER_PAGE_SIZE, /* maxsize */ 1, /* nsegments */ MLX5_ADAPTER_PAGE_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &cmd->dma_tag); if (err != 0) goto failure_destroy_sx; cmd->cmd_page = mlx5_fwp_alloc(dev, GFP_KERNEL, 1); if (cmd->cmd_page == NULL) { err = -ENOMEM; goto failure_alloc_page; } cmd->dma = mlx5_fwp_get_dma(cmd->cmd_page, 0); cmd->cmd_buf = mlx5_fwp_get_virt(cmd->cmd_page, 0); return (0); failure_alloc_page: bus_dma_tag_destroy(cmd->dma_tag); failure_destroy_sx: cv_destroy(&cmd->dma_cv); mtx_destroy(&cmd->dma_mtx); sx_destroy(&cmd->dma_sx); return (err); } static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) { mlx5_fwp_free(cmd->cmd_page); bus_dma_tag_destroy(cmd->dma_tag); cv_destroy(&cmd->dma_cv); mtx_destroy(&cmd->dma_mtx); sx_destroy(&cmd->dma_sx); } int mlx5_cmd_init(struct mlx5_core_dev *dev) { struct mlx5_cmd *cmd = &dev->cmd; u32 cmd_h, cmd_l; u16 cmd_if_rev; int err; int i; cmd_if_rev = cmdif_rev_get(dev); if (cmd_if_rev != CMD_IF_REV) { device_printf((&dev->pdev->dev)->bsddev, "ERR: ""Driver cmdif rev(%d) differs from firmware's(%d)\n", CMD_IF_REV, cmd_if_rev); return -EINVAL; } err = alloc_cmd_page(dev, cmd); if (err) goto err_free_pool; cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff; cmd->log_sz = cmd_l >> 4 & 0xf; cmd->log_stride = cmd_l & 0xf; if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) { device_printf((&dev->pdev->dev)->bsddev, "ERR: ""firmware reports too many outstanding commands %d\n", 1 << cmd->log_sz); err = -EINVAL; goto err_free_page; } if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) { device_printf((&dev->pdev->dev)->bsddev, "ERR: ""command queue size overflow\n"); err = -EINVAL; goto err_free_page; } cmd->checksum_disabled = 1; cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; cmd->bitmask = (1 << cmd->max_reg_cmds) - 1; cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; if (cmd->cmdif_rev > CMD_IF_REV) { device_printf((&dev->pdev->dev)->bsddev, "ERR: ""driver does not support command interface version. driver %d, firmware %d\n", CMD_IF_REV, cmd->cmdif_rev); err = -ENOTSUPP; goto err_free_page; } spin_lock_init(&cmd->alloc_lock); spin_lock_init(&cmd->token_lock); for (i = 0; i < ARRAY_SIZE(cmd->stats); i++) spin_lock_init(&cmd->stats[i].lock); sema_init(&cmd->sem, cmd->max_reg_cmds); sema_init(&cmd->pages_sem, 1); cmd_h = (u32)((u64)(cmd->dma) >> 32); cmd_l = (u32)(cmd->dma); if (cmd_l & 0xfff) { device_printf((&dev->pdev->dev)->bsddev, "ERR: ""invalid command queue address\n"); err = -ENOMEM; goto err_free_page; } iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h); iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz); /* Make sure firmware sees the complete address before we proceed */ wmb(); mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma)); cmd->mode = CMD_MODE_POLLING; err = create_msg_cache(dev); if (err) { device_printf((&dev->pdev->dev)->bsddev, "ERR: ""failed to create command cache\n"); goto err_free_page; } set_wqname(dev); cmd->wq = create_singlethread_workqueue(cmd->wq_name); if (!cmd->wq) { device_printf((&dev->pdev->dev)->bsddev, "ERR: ""failed to create command workqueue\n"); err = -ENOMEM; goto err_cache; } return 0; err_cache: destroy_msg_cache(dev); err_free_page: free_cmd_page(dev, cmd); err_free_pool: return err; } EXPORT_SYMBOL(mlx5_cmd_init); void mlx5_cmd_cleanup(struct mlx5_core_dev *dev) { struct mlx5_cmd *cmd = &dev->cmd; clean_debug_files(dev); destroy_workqueue(cmd->wq); destroy_msg_cache(dev); free_cmd_page(dev, cmd); } EXPORT_SYMBOL(mlx5_cmd_cleanup); static const char *cmd_status_str(u8 status) { switch (status) { case MLX5_CMD_STAT_OK: return "OK"; case MLX5_CMD_STAT_INT_ERR: return "internal error"; case MLX5_CMD_STAT_BAD_OP_ERR: return "bad operation"; case MLX5_CMD_STAT_BAD_PARAM_ERR: return "bad parameter"; case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return "bad system state"; case MLX5_CMD_STAT_BAD_RES_ERR: return "bad resource"; case MLX5_CMD_STAT_RES_BUSY: return "resource busy"; case MLX5_CMD_STAT_LIM_ERR: return "limits exceeded"; case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return "bad resource state"; case MLX5_CMD_STAT_IX_ERR: return "bad index"; case MLX5_CMD_STAT_NO_RES_ERR: return "no resources"; case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return "bad input length"; case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return "bad output length"; case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return "bad QP state"; case MLX5_CMD_STAT_BAD_PKT_ERR: return "bad packet (discarded)"; case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return "bad size too many outstanding CQEs"; default: return "unknown status"; } } static int cmd_status_to_err_helper(u8 status) { switch (status) { case MLX5_CMD_STAT_OK: return 0; case MLX5_CMD_STAT_INT_ERR: return -EIO; case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL; case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL; case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL; case MLX5_CMD_STAT_RES_BUSY: return -EBUSY; case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM; case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; case MLX5_CMD_STAT_IX_ERR: return -EINVAL; case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN; case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO; case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO; case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL; case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL; case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL; default: return -EIO; } } /* this will be available till all the commands use set/get macros */ int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr) { if (!hdr->status) return 0; printf("mlx5_core: WARN: ""command failed, status %s(0x%x), syndrome 0x%x\n", cmd_status_str(hdr->status), hdr->status, be32_to_cpu(hdr->syndrome)); return cmd_status_to_err_helper(hdr->status); } int mlx5_cmd_status_to_err_v2(void *ptr) { u32 syndrome; u8 status; status = be32_to_cpu(*(__be32 *)ptr) >> 24; if (!status) return 0; syndrome = be32_to_cpu(*(__be32 *)(ptr + 4)); printf("mlx5_core: WARN: ""command failed, status %s(0x%x), syndrome 0x%x\n", cmd_status_str(status), status, syndrome); return cmd_status_to_err_helper(status); } Index: projects/make-check-sandbox/sys/dev/mlx5/mlx5_core/mlx5_pagealloc.c =================================================================== --- projects/make-check-sandbox/sys/dev/mlx5/mlx5_core/mlx5_pagealloc.c (revision 321993) +++ projects/make-check-sandbox/sys/dev/mlx5/mlx5_core/mlx5_pagealloc.c (revision 321994) @@ -1,622 +1,619 @@ /*- * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include "mlx5_core.h" CTASSERT((uintptr_t)PAGE_MASK > (uintptr_t)PAGE_SIZE); struct mlx5_pages_req { struct mlx5_core_dev *dev; u16 func_id; s32 npages; struct work_struct work; }; struct mlx5_manage_pages_inbox { struct mlx5_inbox_hdr hdr; __be16 rsvd; __be16 func_id; __be32 num_entries; __be64 pas[0]; }; struct mlx5_manage_pages_outbox { struct mlx5_outbox_hdr hdr; __be32 num_entries; u8 rsvd[4]; __be64 pas[0]; }; enum { MAX_RECLAIM_TIME_MSECS = 5000, }; static void mlx5_fwp_load_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct mlx5_fw_page *fwp; uint8_t owned; fwp = (struct mlx5_fw_page *)arg; owned = MLX5_DMA_OWNED(fwp->dev); if (!owned) MLX5_DMA_LOCK(fwp->dev); if (error == 0) { KASSERT(nseg == 1, ("Number of segments is different from 1")); fwp->dma_addr = segs->ds_addr; fwp->load_done = MLX5_LOAD_ST_SUCCESS; } else { fwp->load_done = MLX5_LOAD_ST_FAILURE; } MLX5_DMA_DONE(fwp->dev); if (!owned) MLX5_DMA_UNLOCK(fwp->dev); } void mlx5_fwp_flush(struct mlx5_fw_page *fwp) { unsigned num = fwp->numpages; while (num--) bus_dmamap_sync(fwp[num].dev->cmd.dma_tag, fwp[num].dma_map, BUS_DMASYNC_PREWRITE); } void mlx5_fwp_invalidate(struct mlx5_fw_page *fwp) { unsigned num = fwp->numpages; while (num--) { bus_dmamap_sync(fwp[num].dev->cmd.dma_tag, fwp[num].dma_map, BUS_DMASYNC_POSTREAD); bus_dmamap_sync(fwp[num].dev->cmd.dma_tag, fwp[num].dma_map, BUS_DMASYNC_PREREAD); } } struct mlx5_fw_page * mlx5_fwp_alloc(struct mlx5_core_dev *dev, gfp_t flags, unsigned num) { struct mlx5_fw_page *fwp; unsigned x; int err; /* check for special case */ if (num == 0) { fwp = kzalloc(sizeof(*fwp), flags); if (fwp != NULL) fwp->dev = dev; return (fwp); } /* we need sleeping context for this function */ if (flags & M_NOWAIT) return (NULL); fwp = kzalloc(sizeof(*fwp) * num, flags); /* serialize loading the DMA map(s) */ sx_xlock(&dev->cmd.dma_sx); for (x = 0; x != num; x++) { /* store pointer to MLX5 core device */ fwp[x].dev = dev; /* store number of pages left from the array */ fwp[x].numpages = num - x; /* allocate memory */ err = bus_dmamem_alloc(dev->cmd.dma_tag, &fwp[x].virt_addr, BUS_DMA_WAITOK | BUS_DMA_COHERENT, &fwp[x].dma_map); if (err != 0) goto failure; /* load memory into DMA */ MLX5_DMA_LOCK(dev); err = bus_dmamap_load( dev->cmd.dma_tag, fwp[x].dma_map, fwp[x].virt_addr, MLX5_ADAPTER_PAGE_SIZE, &mlx5_fwp_load_mem_cb, fwp + x, BUS_DMA_WAITOK | BUS_DMA_COHERENT); while (fwp[x].load_done == MLX5_LOAD_ST_NONE) MLX5_DMA_WAIT(dev); MLX5_DMA_UNLOCK(dev); /* check for error */ if (fwp[x].load_done != MLX5_LOAD_ST_SUCCESS) { bus_dmamem_free(dev->cmd.dma_tag, fwp[x].virt_addr, fwp[x].dma_map); goto failure; } } sx_xunlock(&dev->cmd.dma_sx); return (fwp); failure: while (x--) { bus_dmamap_unload(dev->cmd.dma_tag, fwp[x].dma_map); bus_dmamem_free(dev->cmd.dma_tag, fwp[x].virt_addr, fwp[x].dma_map); } sx_xunlock(&dev->cmd.dma_sx); return (NULL); } void mlx5_fwp_free(struct mlx5_fw_page *fwp) { struct mlx5_core_dev *dev; unsigned num; /* be NULL safe */ if (fwp == NULL) return; /* check for special case */ if (fwp->numpages == 0) { kfree(fwp); return; } num = fwp->numpages; dev = fwp->dev; - /* serialize unloading the DMA maps */ - sx_xlock(&dev->cmd.dma_sx); while (num--) { bus_dmamap_unload(dev->cmd.dma_tag, fwp[num].dma_map); bus_dmamem_free(dev->cmd.dma_tag, fwp[num].virt_addr, fwp[num].dma_map); } - sx_xunlock(&dev->cmd.dma_sx); kfree(fwp); } u64 mlx5_fwp_get_dma(struct mlx5_fw_page *fwp, size_t offset) { size_t index = (offset / MLX5_ADAPTER_PAGE_SIZE); KASSERT(index < fwp->numpages, ("Invalid offset: %lld", (long long)offset)); return ((fwp + index)->dma_addr + (offset % MLX5_ADAPTER_PAGE_SIZE)); } void * mlx5_fwp_get_virt(struct mlx5_fw_page *fwp, size_t offset) { size_t index = (offset / MLX5_ADAPTER_PAGE_SIZE); KASSERT(index < fwp->numpages, ("Invalid offset: %lld", (long long)offset)); return ((char *)(fwp + index)->virt_addr + (offset % MLX5_ADAPTER_PAGE_SIZE)); } static int mlx5_insert_fw_page_locked(struct mlx5_core_dev *dev, struct mlx5_fw_page *nfp) { struct rb_root *root = &dev->priv.page_root; struct rb_node **new = &root->rb_node; struct rb_node *parent = NULL; struct mlx5_fw_page *tfp; while (*new) { parent = *new; tfp = rb_entry(parent, struct mlx5_fw_page, rb_node); if (tfp->dma_addr < nfp->dma_addr) new = &parent->rb_left; else if (tfp->dma_addr > nfp->dma_addr) new = &parent->rb_right; else return (-EEXIST); } rb_link_node(&nfp->rb_node, parent, new); rb_insert_color(&nfp->rb_node, root); return (0); } static struct mlx5_fw_page * mlx5_remove_fw_page_locked(struct mlx5_core_dev *dev, bus_addr_t addr) { struct rb_root *root = &dev->priv.page_root; struct rb_node *tmp = root->rb_node; struct mlx5_fw_page *result = NULL; struct mlx5_fw_page *tfp; while (tmp) { tfp = rb_entry(tmp, struct mlx5_fw_page, rb_node); if (tfp->dma_addr < addr) { tmp = tmp->rb_left; } else if (tfp->dma_addr > addr) { tmp = tmp->rb_right; } else { rb_erase(&tfp->rb_node, &dev->priv.page_root); result = tfp; break; } } return (result); } static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u16 func_id) { struct mlx5_fw_page *fwp; int err; fwp = mlx5_fwp_alloc(dev, GFP_KERNEL, 1); if (fwp == NULL) return (-ENOMEM); fwp->func_id = func_id; MLX5_DMA_LOCK(dev); err = mlx5_insert_fw_page_locked(dev, fwp); MLX5_DMA_UNLOCK(dev); if (err != 0) { mlx5_fwp_free(fwp); } else { /* make sure cached data is cleaned */ mlx5_fwp_invalidate(fwp); /* store DMA address */ *addr = fwp->dma_addr; } return (err); } static void free_4k(struct mlx5_core_dev *dev, u64 addr) { struct mlx5_fw_page *fwp; MLX5_DMA_LOCK(dev); fwp = mlx5_remove_fw_page_locked(dev, addr); MLX5_DMA_UNLOCK(dev); if (fwp == NULL) { mlx5_core_warn(dev, "Cannot free 4K page at 0x%llx\n", (long long)addr); return; } mlx5_fwp_free(fwp); } static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, s32 *npages, int boot) { u32 in[MLX5_ST_SZ_DW(query_pages_in)]; u32 out[MLX5_ST_SZ_DW(query_pages_out)]; int err; memset(in, 0, sizeof(in)); MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES); MLX5_SET(query_pages_in, in, op_mod, boot ? MLX5_BOOT_PAGES : MLX5_INIT_PAGES); memset(out, 0, sizeof(out)); err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); if (err) return err; *npages = MLX5_GET(query_pages_out, out, num_pages); *func_id = MLX5_GET(query_pages_out, out, function_id); return 0; } static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, int notify_fail) { struct mlx5_manage_pages_inbox *in; struct mlx5_manage_pages_outbox out; struct mlx5_manage_pages_inbox *nin; int inlen; u64 addr; int err; int i = 0; inlen = sizeof(*in) + npages * sizeof(in->pas[0]); in = mlx5_vzalloc(inlen); if (!in) { mlx5_core_warn(dev, "vzalloc failed %d\n", inlen); err = -ENOMEM; goto out_alloc; } memset(&out, 0, sizeof(out)); for (i = 0; i < npages; i++) { err = alloc_4k(dev, &addr, func_id); if (err) goto out_alloc; in->pas[i] = cpu_to_be64(addr); } in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE); in->func_id = cpu_to_be16(func_id); in->num_entries = cpu_to_be32(npages); err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); if (err) { mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", func_id, npages, err); goto out_alloc; } dev->priv.fw_pages += npages; dev->priv.pages_per_func[func_id] += npages; if (out.hdr.status) { err = mlx5_cmd_status_to_err(&out.hdr); if (err) { mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n", func_id, npages, out.hdr.status); goto out_alloc; } } mlx5_core_dbg(dev, "err %d\n", err); goto out_free; out_alloc: if (notify_fail) { nin = kzalloc(sizeof(*nin), GFP_KERNEL); if (!nin) goto out_4k; memset(&out, 0, sizeof(out)); nin->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); nin->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE); nin->func_id = cpu_to_be16(func_id); if (mlx5_cmd_exec(dev, nin, sizeof(*nin), &out, sizeof(out))) mlx5_core_warn(dev, "page notify failed\n"); kfree(nin); } out_4k: for (i--; i >= 0; i--) free_4k(dev, be64_to_cpu(in->pas[i])); out_free: kvfree(in); return err; } static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, int *nclaimed) { struct mlx5_manage_pages_inbox in; struct mlx5_manage_pages_outbox *out; int num_claimed; int outlen; u64 addr; int err; int i; if (nclaimed) *nclaimed = 0; memset(&in, 0, sizeof(in)); outlen = sizeof(*out) + npages * sizeof(out->pas[0]); out = mlx5_vzalloc(outlen); if (!out) return -ENOMEM; in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE); in.func_id = cpu_to_be16(func_id); in.num_entries = cpu_to_be32(npages); mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); if (err) { mlx5_core_err(dev, "failed reclaiming pages\n"); goto out_free; } if (out->hdr.status) { err = mlx5_cmd_status_to_err(&out->hdr); goto out_free; } num_claimed = be32_to_cpu(out->num_entries); if (nclaimed) *nclaimed = num_claimed; dev->priv.fw_pages -= num_claimed; dev->priv.pages_per_func[func_id] -= num_claimed; for (i = 0; i < num_claimed; i++) { addr = be64_to_cpu(out->pas[i]); free_4k(dev, addr); } out_free: kvfree(out); return err; } static void pages_work_handler(struct work_struct *work) { struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work); struct mlx5_core_dev *dev = req->dev; int err = 0; if (req->npages < 0) err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL); else if (req->npages > 0) err = give_pages(dev, req->func_id, req->npages, 1); if (err) mlx5_core_warn(dev, "%s fail %d\n", req->npages < 0 ? "reclaim" : "give", err); kfree(req); } void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, s32 npages) { struct mlx5_pages_req *req; req = kzalloc(sizeof(*req), GFP_ATOMIC); if (!req) { mlx5_core_warn(dev, "failed to allocate pages request\n"); return; } req->dev = dev; req->func_id = func_id; req->npages = npages; INIT_WORK(&req->work, pages_work_handler); if (!queue_work(dev->priv.pg_wq, &req->work)) mlx5_core_warn(dev, "failed to queue pages handler work\n"); } int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot) { u16 uninitialized_var(func_id); s32 uninitialized_var(npages); int err; err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot); if (err) return err; mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n", npages, boot ? "boot" : "init", func_id); return give_pages(dev, func_id, npages, 0); } enum { MLX5_BLKS_FOR_RECLAIM_PAGES = 12 }; s64 mlx5_wait_for_reclaim_vfs_pages(struct mlx5_core_dev *dev) { int end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); s64 prevpages = 0; s64 npages = 0; while (!time_after(jiffies, end)) { /* exclude own function, VFs only */ npages = dev->priv.fw_pages - dev->priv.pages_per_func[0]; if (!npages) break; if (npages != prevpages) end = end + msecs_to_jiffies(100); prevpages = npages; msleep(1); } if (npages) mlx5_core_warn(dev, "FW did not return all VFs pages, will cause to memory leak\n"); return -npages; } static int optimal_reclaimed_pages(void) { struct mlx5_cmd_prot_block *block; struct mlx5_cmd_layout *lay; int ret; ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) - sizeof(struct mlx5_manage_pages_outbox)) / FIELD_SIZEOF(struct mlx5_manage_pages_outbox, pas[0]); return ret; } int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) { int end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); struct mlx5_fw_page *fwp; struct rb_node *p; int nclaimed = 0; int err; do { p = rb_first(&dev->priv.page_root); if (p) { fwp = rb_entry(p, struct mlx5_fw_page, rb_node); if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { --dev->priv.fw_pages; free_4k(dev, fwp->dma_addr); nclaimed = 1; } else { err = reclaim_pages(dev, fwp->func_id, optimal_reclaimed_pages(), &nclaimed); if (err) { mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", err); return err; } } if (nclaimed) end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); } if (time_after(jiffies, end)) { mlx5_core_warn(dev, "FW did not return all pages. giving up...\n"); break; } } while (p); return 0; } void mlx5_pagealloc_init(struct mlx5_core_dev *dev) { dev->priv.page_root = RB_ROOT; } void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev) { /* nothing */ } int mlx5_pagealloc_start(struct mlx5_core_dev *dev) { dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator"); if (!dev->priv.pg_wq) return -ENOMEM; return 0; } void mlx5_pagealloc_stop(struct mlx5_core_dev *dev) { destroy_workqueue(dev->priv.pg_wq); } Index: projects/make-check-sandbox/sys/ofed/drivers/infiniband/core/addr.c =================================================================== --- projects/make-check-sandbox/sys/ofed/drivers/infiniband/core/addr.c (revision 321993) +++ projects/make-check-sandbox/sys/ofed/drivers/infiniband/core/addr.c (revision 321994) @@ -1,686 +1,686 @@ /* * Copyright (c) 2005 Voltaire Inc. All rights reserved. * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved. * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved. * Copyright (c) 2005 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include MODULE_AUTHOR("Sean Hefty"); MODULE_DESCRIPTION("IB Address Translation"); MODULE_LICENSE("Dual BSD/GPL"); struct addr_req { struct list_head list; struct sockaddr_storage src_addr; struct sockaddr_storage dst_addr; struct rdma_dev_addr *addr; struct rdma_addr_client *client; void *context; void (*callback)(int status, struct sockaddr *src_addr, struct rdma_dev_addr *addr, void *context); unsigned long timeout; int status; }; static void process_req(struct work_struct *work); static DEFINE_MUTEX(lock); static LIST_HEAD(req_list); static struct delayed_work work; static struct workqueue_struct *addr_wq; static struct rdma_addr_client self; void rdma_addr_register_client(struct rdma_addr_client *client) { atomic_set(&client->refcount, 1); init_completion(&client->comp); } EXPORT_SYMBOL(rdma_addr_register_client); static inline void put_client(struct rdma_addr_client *client) { if (atomic_dec_and_test(&client->refcount)) complete(&client->comp); } void rdma_addr_unregister_client(struct rdma_addr_client *client) { put_client(client); wait_for_completion(&client->comp); } EXPORT_SYMBOL(rdma_addr_unregister_client); int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct ifnet *dev, const unsigned char *dst_dev_addr) { if (dev->if_type == IFT_INFINIBAND) dev_addr->dev_type = ARPHRD_INFINIBAND; else if (dev->if_type == IFT_ETHER) dev_addr->dev_type = ARPHRD_ETHER; else dev_addr->dev_type = 0; memcpy(dev_addr->src_dev_addr, IF_LLADDR(dev), dev->if_addrlen); memcpy(dev_addr->broadcast, __DECONST(char *, dev->if_broadcastaddr), dev->if_addrlen); if (dst_dev_addr) memcpy(dev_addr->dst_dev_addr, dst_dev_addr, dev->if_addrlen); dev_addr->bound_dev_if = dev->if_index; return 0; } EXPORT_SYMBOL(rdma_copy_addr); #define SCOPE_ID_CACHE(_scope_id, _addr6) do { \ (_addr6)->sin6_addr.s6_addr[3] = (_scope_id); \ (_addr6)->sin6_scope_id = 0; } while (0) #define SCOPE_ID_RESTORE(_scope_id, _addr6) do { \ (_addr6)->sin6_scope_id = (_scope_id); \ (_addr6)->sin6_addr.s6_addr[3] = 0; } while (0) int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr, u16 *vlan_id) { struct net_device *dev; int ret = -EADDRNOTAVAIL; if (dev_addr->bound_dev_if) { dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if); if (!dev) return -ENODEV; ret = rdma_copy_addr(dev_addr, dev, NULL); dev_put(dev); return ret; } switch (addr->sa_family) { case AF_INET: dev = ip_dev_find(&init_net, ((struct sockaddr_in *) addr)->sin_addr.s_addr); if (!dev) return ret; ret = rdma_copy_addr(dev_addr, dev, NULL); if (vlan_id) *vlan_id = rdma_vlan_dev_vlan_id(dev); dev_put(dev); break; #if defined(INET6) case AF_INET6: { struct sockaddr_in6 *sin6; struct ifaddr *ifa; in_port_t port; uint32_t scope_id; sin6 = (struct sockaddr_in6 *)addr; port = sin6->sin6_port; sin6->sin6_port = 0; scope_id = sin6->sin6_scope_id; if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) SCOPE_ID_CACHE(scope_id, sin6); CURVNET_SET_QUIET(&init_net); ifa = ifa_ifwithaddr(addr); CURVNET_RESTORE(); sin6->sin6_port = port; if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) SCOPE_ID_RESTORE(scope_id, sin6); if (ifa == NULL) { ret = -ENODEV; break; } ret = rdma_copy_addr(dev_addr, ifa->ifa_ifp, NULL); if (vlan_id) *vlan_id = rdma_vlan_dev_vlan_id(ifa->ifa_ifp); ifa_free(ifa); break; } #endif default: break; } return ret; } EXPORT_SYMBOL(rdma_translate_ip); static void set_timeout(unsigned long time) { - unsigned long delay; + int delay; /* under FreeBSD ticks are 32-bit */ delay = time - jiffies; - if ((long)delay <= 0) + if (delay <= 0) delay = 1; mod_delayed_work(addr_wq, &work, delay); } static void queue_req(struct addr_req *req) { struct addr_req *temp_req; mutex_lock(&lock); list_for_each_entry_reverse(temp_req, &req_list, list) { if (time_after_eq(req->timeout, temp_req->timeout)) break; } list_add(&req->list, &temp_req->list); if (req_list.next == &req->list) set_timeout(req->timeout); mutex_unlock(&lock); } static int addr_resolve(struct sockaddr *src_in, struct sockaddr *dst_in, struct rdma_dev_addr *addr) { struct sockaddr_in *sin; struct sockaddr_in6 *sin6; struct ifaddr *ifa; struct ifnet *ifp; struct rtentry *rte; #if defined(INET) || defined(INET6) in_port_t port; #endif #ifdef INET6 uint32_t scope_id; #endif u_char edst[MAX_ADDR_LEN]; int multi; int bcast; int is_gw = 0; int error = 0; CURVNET_SET_QUIET(&init_net); /* * Determine whether the address is unicast, multicast, or broadcast * and whether the source interface is valid. */ multi = 0; bcast = 0; sin = NULL; sin6 = NULL; ifp = NULL; rte = NULL; ifa = NULL; ifp = NULL; memset(edst, 0, sizeof(edst)); #ifdef INET6 scope_id = -1U; #endif switch (dst_in->sa_family) { #ifdef INET case AF_INET: sin = (struct sockaddr_in *)dst_in; if (sin->sin_addr.s_addr == INADDR_BROADCAST) bcast = 1; if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) multi = 1; sin = (struct sockaddr_in *)src_in; if (sin->sin_addr.s_addr != INADDR_ANY) { /* * Address comparison fails if the port is set * cache it here to be restored later. */ port = sin->sin_port; sin->sin_port = 0; memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); /* * If we have a source address to use look it * up first and verify that it is a local * interface: */ CURVNET_SET_QUIET(&init_net); ifa = ifa_ifwithaddr(src_in); CURVNET_RESTORE(); sin->sin_port = port; if (ifa == NULL) { error = ENETUNREACH; goto done; } ifp = ifa->ifa_ifp; ifa_free(ifa); if (bcast || multi) goto mcast; } break; #endif #ifdef INET6 case AF_INET6: sin6 = (struct sockaddr_in6 *)dst_in; if (IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) multi = 1; if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { /* * The IB address comparison fails if the * scope ID is set and not part of the addr: */ scope_id = sin6->sin6_scope_id; if (scope_id < 256) SCOPE_ID_CACHE(scope_id, sin6); } sin6 = (struct sockaddr_in6 *)src_in; if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { port = sin6->sin6_port; sin6->sin6_port = 0; if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { if (scope_id < 256) SCOPE_ID_CACHE(scope_id, sin6); } /* * If we have a source address to use look it * up first and verify that it is a local * interface: */ CURVNET_SET_QUIET(&init_net); ifa = ifa_ifwithaddr(src_in); CURVNET_RESTORE(); sin6->sin6_port = port; if (ifa == NULL) { error = ENETUNREACH; goto done; } ifp = ifa->ifa_ifp; ifa_free(ifa); if (bcast || multi) goto mcast; } break; #endif default: error = EINVAL; goto done; } /* * Make sure the route exists and has a valid link. */ rte = rtalloc1(dst_in, 1, 0); if (rte == NULL || rte->rt_ifp == NULL || !RT_LINK_IS_UP(rte->rt_ifp)) { if (rte) RTFREE_LOCKED(rte); error = EHOSTUNREACH; goto done; } if (rte->rt_flags & RTF_GATEWAY) is_gw = 1; /* * If it's not multicast or broadcast and the route doesn't match the * requested interface return unreachable. Otherwise fetch the * correct interface pointer and unlock the route. */ if (multi || bcast) { if (ifp == NULL) { ifp = rte->rt_ifp; /* rt_ifa holds the route answer source address */ ifa = rte->rt_ifa; } RTFREE_LOCKED(rte); } else if (ifp && ifp != rte->rt_ifp) { RTFREE_LOCKED(rte); error = ENETUNREACH; goto done; } else { if (ifp == NULL) { ifp = rte->rt_ifp; ifa = rte->rt_ifa; } RT_UNLOCK(rte); } #if defined(INET) || defined(INET6) mcast: #endif if (bcast) { memcpy(edst, ifp->if_broadcastaddr, ifp->if_addrlen); goto done; } else if (multi) { struct sockaddr *llsa; struct sockaddr_dl sdl; sdl.sdl_len = sizeof(sdl); llsa = (struct sockaddr *)&sdl; if (ifp->if_resolvemulti == NULL) { error = EOPNOTSUPP; goto done; } error = ifp->if_resolvemulti(ifp, &llsa, dst_in); if (error == 0) { memcpy(edst, LLADDR((struct sockaddr_dl *)llsa), ifp->if_addrlen); } goto done; } /* * Resolve the link local address. */ switch (dst_in->sa_family) { #ifdef INET case AF_INET: error = arpresolve(ifp, is_gw, NULL, is_gw ? rte->rt_gateway : dst_in, edst, NULL, NULL); break; #endif #ifdef INET6 case AF_INET6: error = nd6_resolve(ifp, is_gw, NULL, is_gw ? rte->rt_gateway : dst_in, edst, NULL, NULL); break; #endif default: KASSERT(0, ("rdma_addr_resolve: Unreachable")); error = EINVAL; break; } RTFREE(rte); done: if (error == 0) error = -rdma_copy_addr(addr, ifp, edst); if (error == 0) memcpy(src_in, ifa->ifa_addr, ip_addr_size(ifa->ifa_addr)); #ifdef INET6 if (scope_id < 256) { sin6 = (struct sockaddr_in6 *)src_in; if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) SCOPE_ID_RESTORE(scope_id, sin6); sin6 = (struct sockaddr_in6 *)dst_in; SCOPE_ID_RESTORE(scope_id, sin6); } #endif if (error == EWOULDBLOCK) error = ENODATA; CURVNET_RESTORE(); return -error; } static void process_req(struct work_struct *work) { struct addr_req *req, *temp_req; struct sockaddr *src_in, *dst_in; struct list_head done_list; INIT_LIST_HEAD(&done_list); mutex_lock(&lock); list_for_each_entry_safe(req, temp_req, &req_list, list) { if (req->status == -ENODATA) { src_in = (struct sockaddr *) &req->src_addr; dst_in = (struct sockaddr *) &req->dst_addr; req->status = addr_resolve(src_in, dst_in, req->addr); if (req->status && time_after_eq(jiffies, req->timeout)) req->status = -ETIMEDOUT; else if (req->status == -ENODATA) continue; } list_move_tail(&req->list, &done_list); } if (!list_empty(&req_list)) { req = list_entry(req_list.next, struct addr_req, list); set_timeout(req->timeout); } mutex_unlock(&lock); list_for_each_entry_safe(req, temp_req, &done_list, list) { list_del(&req->list); req->callback(req->status, (struct sockaddr *) &req->src_addr, req->addr, req->context); put_client(req->client); kfree(req); } } int rdma_resolve_ip(struct rdma_addr_client *client, struct sockaddr *src_addr, struct sockaddr *dst_addr, struct rdma_dev_addr *addr, int timeout_ms, void (*callback)(int status, struct sockaddr *src_addr, struct rdma_dev_addr *addr, void *context), void *context) { struct sockaddr *src_in, *dst_in; struct addr_req *req; int ret = 0; req = kzalloc(sizeof *req, GFP_KERNEL); if (!req) return -ENOMEM; src_in = (struct sockaddr *) &req->src_addr; dst_in = (struct sockaddr *) &req->dst_addr; if (src_addr) { if (src_addr->sa_family != dst_addr->sa_family) { ret = -EINVAL; goto err; } memcpy(src_in, src_addr, ip_addr_size(src_addr)); } else { src_in->sa_family = dst_addr->sa_family; } memcpy(dst_in, dst_addr, ip_addr_size(dst_addr)); req->addr = addr; req->callback = callback; req->context = context; req->client = client; atomic_inc(&client->refcount); req->status = addr_resolve(src_in, dst_in, addr); switch (req->status) { case 0: req->timeout = jiffies; queue_req(req); break; case -ENODATA: req->timeout = msecs_to_jiffies(timeout_ms) + jiffies; queue_req(req); break; default: ret = req->status; atomic_dec(&client->refcount); goto err; } return ret; err: kfree(req); return ret; } EXPORT_SYMBOL(rdma_resolve_ip); void rdma_addr_cancel(struct rdma_dev_addr *addr) { struct addr_req *req, *temp_req; mutex_lock(&lock); list_for_each_entry_safe(req, temp_req, &req_list, list) { if (req->addr == addr) { req->status = -ECANCELED; req->timeout = jiffies; list_move(&req->list, &req_list); set_timeout(req->timeout); break; } } mutex_unlock(&lock); } EXPORT_SYMBOL(rdma_addr_cancel); struct resolve_cb_context { struct rdma_dev_addr *addr; struct completion comp; }; static void resolve_cb(int status, struct sockaddr *src_addr, struct rdma_dev_addr *addr, void *context) { memcpy(((struct resolve_cb_context *)context)->addr, addr, sizeof(struct rdma_dev_addr)); complete(&((struct resolve_cb_context *)context)->comp); } int rdma_addr_find_dmac_by_grh(union ib_gid *sgid, union ib_gid *dgid, u8 *dmac, u16 *vlan_id, u32 scope_id) { int ret = 0; struct rdma_dev_addr dev_addr; struct resolve_cb_context ctx; struct net_device *dev; union { struct sockaddr _sockaddr; struct sockaddr_in _sockaddr_in; struct sockaddr_in6 _sockaddr_in6; } sgid_addr, dgid_addr; ret = rdma_gid2ip(&sgid_addr._sockaddr, sgid, scope_id); if (ret) return ret; ret = rdma_gid2ip(&dgid_addr._sockaddr, dgid, scope_id); if (ret) return ret; memset(&dev_addr, 0, sizeof(dev_addr)); ctx.addr = &dev_addr; init_completion(&ctx.comp); ret = rdma_resolve_ip(&self, &sgid_addr._sockaddr, &dgid_addr._sockaddr, &dev_addr, 1000, resolve_cb, &ctx); if (ret) return ret; wait_for_completion(&ctx.comp); memcpy(dmac, dev_addr.dst_dev_addr, ETH_ALEN); dev = dev_get_by_index(&init_net, dev_addr.bound_dev_if); if (!dev) return -ENODEV; if (vlan_id) *vlan_id = rdma_vlan_dev_vlan_id(dev); dev_put(dev); return ret; } EXPORT_SYMBOL(rdma_addr_find_dmac_by_grh); u32 rdma_get_ipv6_scope_id(struct ib_device *ib, u8 port_num) { #ifdef INET6 struct ifnet *ifp; if (ib->get_netdev == NULL) return (-1U); ifp = ib->get_netdev(ib, port_num); if (ifp == NULL) return (-1U); return (in6_getscopezone(ifp, IPV6_ADDR_SCOPE_LINKLOCAL)); #else return (-1U); #endif } int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id, u32 scope_id) { int ret = 0; struct rdma_dev_addr dev_addr; union { struct sockaddr _sockaddr; struct sockaddr_in _sockaddr_in; struct sockaddr_in6 _sockaddr_in6; } gid_addr; ret = rdma_gid2ip(&gid_addr._sockaddr, sgid, scope_id); if (ret) return ret; memset(&dev_addr, 0, sizeof(dev_addr)); ret = rdma_translate_ip(&gid_addr._sockaddr, &dev_addr, vlan_id); if (ret) return ret; memcpy(smac, dev_addr.src_dev_addr, ETH_ALEN); return ret; } EXPORT_SYMBOL(rdma_addr_find_smac_by_sgid); static int netevent_callback(struct notifier_block *self, unsigned long event, void *ctx) { if (event == NETEVENT_NEIGH_UPDATE) { set_timeout(jiffies); } return 0; } static struct notifier_block nb = { .notifier_call = netevent_callback }; static int __init addr_init(void) { INIT_DELAYED_WORK(&work, process_req); addr_wq = create_singlethread_workqueue("ib_addr"); if (!addr_wq) return -ENOMEM; register_netevent_notifier(&nb); rdma_addr_register_client(&self); return 0; } static void __exit addr_cleanup(void) { rdma_addr_unregister_client(&self); unregister_netevent_notifier(&nb); destroy_workqueue(addr_wq); } module_init(addr_init); module_exit(addr_cleanup); Index: projects/make-check-sandbox/sys/ofed/drivers/infiniband/core/cm.c =================================================================== --- projects/make-check-sandbox/sys/ofed/drivers/infiniband/core/cm.c (revision 321993) +++ projects/make-check-sandbox/sys/ofed/drivers/infiniband/core/cm.c (revision 321994) @@ -1,3986 +1,3986 @@ /* * Copyright (c) 2004-2007 Intel Corporation. All rights reserved. * Copyright (c) 2004 Topspin Corporation. All rights reserved. * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cm_msgs.h" MODULE_AUTHOR("Sean Hefty"); MODULE_DESCRIPTION("InfiniBand CM"); MODULE_LICENSE("Dual BSD/GPL"); #ifdef pr_fmt #undef pr_fmt #endif #define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__ static void cm_add_one(struct ib_device *device); static void cm_remove_one(struct ib_device *device); static struct ib_client cm_client = { .name = "cm", .add = cm_add_one, .remove = cm_remove_one }; static struct ib_cm { spinlock_t lock; struct list_head device_list; rwlock_t device_lock; struct rb_root listen_service_table; u64 listen_service_id; /* struct rb_root peer_service_table; todo: fix peer to peer */ struct rb_root remote_qp_table; struct rb_root remote_id_table; struct rb_root remote_sidr_table; struct idr local_id_table; __be32 random_id_operand; struct list_head timewait_list; struct workqueue_struct *wq; } cm; /* Counter indexes ordered by attribute ID */ enum { CM_REQ_COUNTER, CM_MRA_COUNTER, CM_REJ_COUNTER, CM_REP_COUNTER, CM_RTU_COUNTER, CM_DREQ_COUNTER, CM_DREP_COUNTER, CM_SIDR_REQ_COUNTER, CM_SIDR_REP_COUNTER, CM_LAP_COUNTER, CM_APR_COUNTER, CM_ATTR_COUNT, CM_ATTR_ID_OFFSET = 0x0010, }; enum { CM_XMIT, CM_XMIT_RETRIES, CM_RECV, CM_RECV_DUPLICATES, CM_COUNTER_GROUPS }; static char const counter_group_names[CM_COUNTER_GROUPS] [sizeof("cm_rx_duplicates")] = { "cm_tx_msgs", "cm_tx_retries", "cm_rx_msgs", "cm_rx_duplicates" }; struct cm_counter_group { struct kobject obj; atomic_long_t counter[CM_ATTR_COUNT]; }; struct cm_counter_attribute { struct attribute attr; int index; }; #define CM_COUNTER_ATTR(_name, _index) \ struct cm_counter_attribute cm_##_name##_counter_attr = { \ .attr = { .name = __stringify(_name), .mode = 0444 }, \ .index = _index \ } static CM_COUNTER_ATTR(req, CM_REQ_COUNTER); static CM_COUNTER_ATTR(mra, CM_MRA_COUNTER); static CM_COUNTER_ATTR(rej, CM_REJ_COUNTER); static CM_COUNTER_ATTR(rep, CM_REP_COUNTER); static CM_COUNTER_ATTR(rtu, CM_RTU_COUNTER); static CM_COUNTER_ATTR(dreq, CM_DREQ_COUNTER); static CM_COUNTER_ATTR(drep, CM_DREP_COUNTER); static CM_COUNTER_ATTR(sidr_req, CM_SIDR_REQ_COUNTER); static CM_COUNTER_ATTR(sidr_rep, CM_SIDR_REP_COUNTER); static CM_COUNTER_ATTR(lap, CM_LAP_COUNTER); static CM_COUNTER_ATTR(apr, CM_APR_COUNTER); static struct attribute *cm_counter_default_attrs[] = { &cm_req_counter_attr.attr, &cm_mra_counter_attr.attr, &cm_rej_counter_attr.attr, &cm_rep_counter_attr.attr, &cm_rtu_counter_attr.attr, &cm_dreq_counter_attr.attr, &cm_drep_counter_attr.attr, &cm_sidr_req_counter_attr.attr, &cm_sidr_rep_counter_attr.attr, &cm_lap_counter_attr.attr, &cm_apr_counter_attr.attr, NULL }; struct cm_port { struct cm_device *cm_dev; struct ib_mad_agent *mad_agent; struct kobject port_obj; u8 port_num; struct cm_counter_group counter_group[CM_COUNTER_GROUPS]; }; struct cm_device { struct list_head list; struct ib_device *ib_device; struct device *device; u8 ack_delay; struct cm_port *port[0]; }; struct cm_av { struct cm_port *port; union ib_gid dgid; struct ib_ah_attr ah_attr; u16 pkey_index; u8 timeout; u8 valid; u8 smac[ETH_ALEN]; }; struct cm_work { struct delayed_work work; struct list_head list; struct cm_port *port; struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */ __be32 local_id; /* Established / timewait */ __be32 remote_id; struct ib_cm_event cm_event; struct ib_sa_path_rec path[0]; }; struct cm_timewait_info { struct cm_work work; /* Must be first. */ struct list_head list; struct rb_node remote_qp_node; struct rb_node remote_id_node; __be64 remote_ca_guid; __be32 remote_qpn; u8 inserted_remote_qp; u8 inserted_remote_id; }; struct cm_id_private { struct ib_cm_id id; struct rb_node service_node; struct rb_node sidr_id_node; spinlock_t lock; /* Do not acquire inside cm.lock */ struct completion comp; atomic_t refcount; struct ib_mad_send_buf *msg; struct cm_timewait_info *timewait_info; /* todo: use alternate port on send failure */ struct cm_av av; struct cm_av alt_av; struct ib_cm_compare_data *compare_data; void *private_data; __be64 tid; __be32 local_qpn; __be32 remote_qpn; enum ib_qp_type qp_type; __be32 sq_psn; __be32 rq_psn; int timeout_ms; enum ib_mtu path_mtu; __be16 pkey; u8 private_data_len; u8 max_cm_retries; u8 peer_to_peer; u8 responder_resources; u8 initiator_depth; u8 retry_count; u8 rnr_retry_count; u8 service_timeout; u8 target_ack_delay; struct list_head work_list; atomic_t work_count; }; static void cm_work_handler(struct work_struct *work); static inline void cm_deref_id(struct cm_id_private *cm_id_priv) { if (atomic_dec_and_test(&cm_id_priv->refcount)) complete(&cm_id_priv->comp); } static int cm_alloc_msg(struct cm_id_private *cm_id_priv, struct ib_mad_send_buf **msg) { struct ib_mad_agent *mad_agent; struct ib_mad_send_buf *m; struct ib_ah *ah; mad_agent = cm_id_priv->av.port->mad_agent; ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr); if (IS_ERR(ah)) return PTR_ERR(ah); m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, cm_id_priv->av.pkey_index, 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, GFP_ATOMIC); if (IS_ERR(m)) { ib_destroy_ah(ah); return PTR_ERR(m); } /* Timeout set by caller if response is expected. */ m->ah = ah; m->retries = cm_id_priv->max_cm_retries; atomic_inc(&cm_id_priv->refcount); m->context[0] = cm_id_priv; *msg = m; return 0; } static int cm_alloc_response_msg(struct cm_port *port, struct ib_mad_recv_wc *mad_recv_wc, struct ib_mad_send_buf **msg) { struct ib_mad_send_buf *m; struct ib_ah *ah; ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc, mad_recv_wc->recv_buf.grh, port->port_num); if (IS_ERR(ah)) return PTR_ERR(ah); m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index, 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, GFP_ATOMIC); if (IS_ERR(m)) { ib_destroy_ah(ah); return PTR_ERR(m); } m->ah = ah; *msg = m; return 0; } static void cm_free_msg(struct ib_mad_send_buf *msg) { ib_destroy_ah(msg->ah); if (msg->context[0]) cm_deref_id(msg->context[0]); ib_free_send_mad(msg); } static void * cm_copy_private_data(const void *private_data, u8 private_data_len) { void *data; if (!private_data || !private_data_len) return NULL; data = kmemdup(private_data, private_data_len, GFP_KERNEL); if (!data) return ERR_PTR(-ENOMEM); return data; } static void cm_set_private_data(struct cm_id_private *cm_id_priv, void *private_data, u8 private_data_len) { if (cm_id_priv->private_data && cm_id_priv->private_data_len) kfree(cm_id_priv->private_data); cm_id_priv->private_data = private_data; cm_id_priv->private_data_len = private_data_len; } static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc, struct ib_grh *grh, struct cm_av *av) { av->port = port; av->pkey_index = wc->pkey_index; ib_init_ah_from_wc(port->cm_dev->ib_device, port->port_num, wc, grh, &av->ah_attr); } int ib_update_cm_av(struct ib_cm_id *id, const u8 *smac, const u8 *alt_smac) { struct cm_id_private *cm_id_priv; cm_id_priv = container_of(id, struct cm_id_private, id); if (smac != NULL) memcpy(cm_id_priv->av.smac, smac, sizeof(cm_id_priv->av.smac)); if (alt_smac != NULL) memcpy(cm_id_priv->alt_av.smac, alt_smac, sizeof(cm_id_priv->alt_av.smac)); return 0; } EXPORT_SYMBOL(ib_update_cm_av); static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) { struct cm_device *cm_dev; struct cm_port *port = NULL; unsigned long flags; int ret; u8 p; read_lock_irqsave(&cm.device_lock, flags); list_for_each_entry(cm_dev, &cm.device_list, list) { if (!ib_find_cached_gid(cm_dev->ib_device, &path->sgid, &p, NULL)) { port = cm_dev->port[p-1]; break; } } read_unlock_irqrestore(&cm.device_lock, flags); if (!port) return -EINVAL; ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num, be16_to_cpu(path->pkey), &av->pkey_index); if (ret) return ret; av->port = port; ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path, &av->ah_attr); av->timeout = path->packet_life_time + 1; memcpy(av->smac, path->smac, sizeof(av->smac)); av->valid = 1; return 0; } static int cm_alloc_id(struct cm_id_private *cm_id_priv) { unsigned long flags; int ret, id; static int next_id; do { spin_lock_irqsave(&cm.lock, flags); ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, next_id, &id); if (!ret) next_id = ((unsigned) id + 1) & MAX_IDR_MASK; spin_unlock_irqrestore(&cm.lock, flags); } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) ); cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand; return ret; } static void cm_free_id(__be32 local_id) { spin_lock_irq(&cm.lock); idr_remove(&cm.local_id_table, (__force int) (local_id ^ cm.random_id_operand)); spin_unlock_irq(&cm.lock); } static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id) { struct cm_id_private *cm_id_priv; cm_id_priv = idr_find(&cm.local_id_table, (__force int) (local_id ^ cm.random_id_operand)); if (cm_id_priv) { if (cm_id_priv->id.remote_id == remote_id) atomic_inc(&cm_id_priv->refcount); else cm_id_priv = NULL; } return cm_id_priv; } static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id) { struct cm_id_private *cm_id_priv; spin_lock_irq(&cm.lock); cm_id_priv = cm_get_id(local_id, remote_id); spin_unlock_irq(&cm.lock); return cm_id_priv; } static void cm_mask_copy(u8 *dst, u8 *src, u8 *mask) { int i; for (i = 0; i < IB_CM_COMPARE_SIZE / sizeof(unsigned long); i++) ((unsigned long *) dst)[i] = ((unsigned long *) src)[i] & ((unsigned long *) mask)[i]; } static int cm_compare_data(struct ib_cm_compare_data *src_data, struct ib_cm_compare_data *dst_data) { u8 src[IB_CM_COMPARE_SIZE]; u8 dst[IB_CM_COMPARE_SIZE]; if (!src_data || !dst_data) return 0; cm_mask_copy(src, src_data->data, dst_data->mask); cm_mask_copy(dst, dst_data->data, src_data->mask); return memcmp(src, dst, IB_CM_COMPARE_SIZE); } static int cm_compare_private_data(u8 *private_data, struct ib_cm_compare_data *dst_data) { u8 src[IB_CM_COMPARE_SIZE]; if (!dst_data) return 0; cm_mask_copy(src, private_data, dst_data->mask); return memcmp(src, dst_data->data, IB_CM_COMPARE_SIZE); } /* * Trivial helpers to strip endian annotation and compare; the * endianness doesn't actually matter since we just need a stable * order for the RB tree. */ static int be32_lt(__be32 a, __be32 b) { return (__force u32) a < (__force u32) b; } static int be32_gt(__be32 a, __be32 b) { return (__force u32) a > (__force u32) b; } static int be64_lt(__be64 a, __be64 b) { return (__force u64) a < (__force u64) b; } static int be64_gt(__be64 a, __be64 b) { return (__force u64) a > (__force u64) b; } static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv) { struct rb_node **link = &cm.listen_service_table.rb_node; struct rb_node *parent = NULL; struct cm_id_private *cur_cm_id_priv; __be64 service_id = cm_id_priv->id.service_id; __be64 service_mask = cm_id_priv->id.service_mask; int data_cmp; while (*link) { parent = *link; cur_cm_id_priv = rb_entry(parent, struct cm_id_private, service_node); data_cmp = cm_compare_data(cm_id_priv->compare_data, cur_cm_id_priv->compare_data); if ((cur_cm_id_priv->id.service_mask & service_id) == (service_mask & cur_cm_id_priv->id.service_id) && (cm_id_priv->id.device == cur_cm_id_priv->id.device) && !data_cmp) return cur_cm_id_priv; if (cm_id_priv->id.device < cur_cm_id_priv->id.device) link = &(*link)->rb_left; else if (cm_id_priv->id.device > cur_cm_id_priv->id.device) link = &(*link)->rb_right; else if (be64_lt(service_id, cur_cm_id_priv->id.service_id)) link = &(*link)->rb_left; else if (be64_gt(service_id, cur_cm_id_priv->id.service_id)) link = &(*link)->rb_right; else if (data_cmp < 0) link = &(*link)->rb_left; else link = &(*link)->rb_right; } rb_link_node(&cm_id_priv->service_node, parent, link); rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table); return NULL; } static struct cm_id_private * cm_find_listen(struct ib_device *device, __be64 service_id, u8 *private_data) { struct rb_node *node = cm.listen_service_table.rb_node; struct cm_id_private *cm_id_priv; int data_cmp; while (node) { cm_id_priv = rb_entry(node, struct cm_id_private, service_node); data_cmp = cm_compare_private_data(private_data, cm_id_priv->compare_data); if ((cm_id_priv->id.service_mask & service_id) == cm_id_priv->id.service_id && (cm_id_priv->id.device == device) && !data_cmp) return cm_id_priv; if (device < cm_id_priv->id.device) node = node->rb_left; else if (device > cm_id_priv->id.device) node = node->rb_right; else if (be64_lt(service_id, cm_id_priv->id.service_id)) node = node->rb_left; else if (be64_gt(service_id, cm_id_priv->id.service_id)) node = node->rb_right; else if (data_cmp < 0) node = node->rb_left; else node = node->rb_right; } return NULL; } static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info *timewait_info) { struct rb_node **link = &cm.remote_id_table.rb_node; struct rb_node *parent = NULL; struct cm_timewait_info *cur_timewait_info; __be64 remote_ca_guid = timewait_info->remote_ca_guid; __be32 remote_id = timewait_info->work.remote_id; while (*link) { parent = *link; cur_timewait_info = rb_entry(parent, struct cm_timewait_info, remote_id_node); if (be32_lt(remote_id, cur_timewait_info->work.remote_id)) link = &(*link)->rb_left; else if (be32_gt(remote_id, cur_timewait_info->work.remote_id)) link = &(*link)->rb_right; else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid)) link = &(*link)->rb_left; else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid)) link = &(*link)->rb_right; else return cur_timewait_info; } timewait_info->inserted_remote_id = 1; rb_link_node(&timewait_info->remote_id_node, parent, link); rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table); return NULL; } static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid, __be32 remote_id) { struct rb_node *node = cm.remote_id_table.rb_node; struct cm_timewait_info *timewait_info; while (node) { timewait_info = rb_entry(node, struct cm_timewait_info, remote_id_node); if (be32_lt(remote_id, timewait_info->work.remote_id)) node = node->rb_left; else if (be32_gt(remote_id, timewait_info->work.remote_id)) node = node->rb_right; else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid)) node = node->rb_left; else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid)) node = node->rb_right; else return timewait_info; } return NULL; } static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info *timewait_info) { struct rb_node **link = &cm.remote_qp_table.rb_node; struct rb_node *parent = NULL; struct cm_timewait_info *cur_timewait_info; __be64 remote_ca_guid = timewait_info->remote_ca_guid; __be32 remote_qpn = timewait_info->remote_qpn; while (*link) { parent = *link; cur_timewait_info = rb_entry(parent, struct cm_timewait_info, remote_qp_node); if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn)) link = &(*link)->rb_left; else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn)) link = &(*link)->rb_right; else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid)) link = &(*link)->rb_left; else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid)) link = &(*link)->rb_right; else return cur_timewait_info; } timewait_info->inserted_remote_qp = 1; rb_link_node(&timewait_info->remote_qp_node, parent, link); rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table); return NULL; } static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private *cm_id_priv) { struct rb_node **link = &cm.remote_sidr_table.rb_node; struct rb_node *parent = NULL; struct cm_id_private *cur_cm_id_priv; union ib_gid *port_gid = &cm_id_priv->av.dgid; __be32 remote_id = cm_id_priv->id.remote_id; while (*link) { parent = *link; cur_cm_id_priv = rb_entry(parent, struct cm_id_private, sidr_id_node); if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id)) link = &(*link)->rb_left; else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id)) link = &(*link)->rb_right; else { int cmp; cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid, sizeof *port_gid); if (cmp < 0) link = &(*link)->rb_left; else if (cmp > 0) link = &(*link)->rb_right; else return cur_cm_id_priv; } } rb_link_node(&cm_id_priv->sidr_id_node, parent, link); rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); return NULL; } static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv, enum ib_cm_sidr_status status) { struct ib_cm_sidr_rep_param param; memset(¶m, 0, sizeof param); param.status = status; ib_send_cm_sidr_rep(&cm_id_priv->id, ¶m); } struct ib_cm_id *ib_create_cm_id(struct ib_device *device, ib_cm_handler cm_handler, void *context) { struct cm_id_private *cm_id_priv; int ret; cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL); if (!cm_id_priv) return ERR_PTR(-ENOMEM); cm_id_priv->id.state = IB_CM_IDLE; cm_id_priv->id.device = device; cm_id_priv->id.cm_handler = cm_handler; cm_id_priv->id.context = context; cm_id_priv->id.remote_cm_qpn = 1; ret = cm_alloc_id(cm_id_priv); if (ret) goto error; spin_lock_init(&cm_id_priv->lock); init_completion(&cm_id_priv->comp); INIT_LIST_HEAD(&cm_id_priv->work_list); atomic_set(&cm_id_priv->work_count, -1); atomic_set(&cm_id_priv->refcount, 1); return &cm_id_priv->id; error: kfree(cm_id_priv); return ERR_PTR(-ENOMEM); } EXPORT_SYMBOL(ib_create_cm_id); static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv) { struct cm_work *work; if (list_empty(&cm_id_priv->work_list)) return NULL; work = list_entry(cm_id_priv->work_list.next, struct cm_work, list); list_del(&work->list); return work; } static void cm_free_work(struct cm_work *work) { if (work->mad_recv_wc) ib_free_recv_mad(work->mad_recv_wc); kfree(work); } static inline int cm_convert_to_ms(int iba_time) { /* approximate conversion to ms from 4.096us x 2^iba_time */ return 1 << max(iba_time - 8, 0); } /* * calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time * Because of how ack_timeout is stored, adding one doubles the timeout. * To avoid large timeouts, select the max(ack_delay, life_time + 1), and * increment it (round up) only if the other is within 50%. */ static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time) { int ack_timeout = packet_life_time + 1; if (ack_timeout >= ca_ack_delay) ack_timeout += (ca_ack_delay >= (ack_timeout - 1)); else ack_timeout = ca_ack_delay + (ack_timeout >= (ca_ack_delay - 1)); return min(31, ack_timeout); } static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info) { if (timewait_info->inserted_remote_id) { rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table); timewait_info->inserted_remote_id = 0; } if (timewait_info->inserted_remote_qp) { rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table); timewait_info->inserted_remote_qp = 0; } } static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id, gfp_t flags) { struct cm_timewait_info *timewait_info; timewait_info = kzalloc(sizeof *timewait_info, flags); if (!timewait_info) return ERR_PTR(-ENOMEM); timewait_info->work.local_id = local_id; INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler); timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT; return timewait_info; } static void cm_enter_timewait(struct cm_id_private *cm_id_priv) { int wait_time; unsigned long flags; spin_lock_irqsave(&cm.lock, flags); cm_cleanup_timewait(cm_id_priv->timewait_info); list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list); spin_unlock_irqrestore(&cm.lock, flags); /* * The cm_id could be destroyed by the user before we exit timewait. * To protect against this, we search for the cm_id after exiting * timewait before notifying the user that we've exited timewait. */ cm_id_priv->id.state = IB_CM_TIMEWAIT; wait_time = cm_convert_to_ms(cm_id_priv->av.timeout); queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work, msecs_to_jiffies(wait_time)); cm_id_priv->timewait_info = NULL; } static void cm_reset_to_idle(struct cm_id_private *cm_id_priv) { unsigned long flags; cm_id_priv->id.state = IB_CM_IDLE; if (cm_id_priv->timewait_info) { spin_lock_irqsave(&cm.lock, flags); cm_cleanup_timewait(cm_id_priv->timewait_info); spin_unlock_irqrestore(&cm.lock, flags); kfree(cm_id_priv->timewait_info); cm_id_priv->timewait_info = NULL; } } static void cm_destroy_id(struct ib_cm_id *cm_id, int err) { struct cm_id_private *cm_id_priv; struct cm_work *work; cm_id_priv = container_of(cm_id, struct cm_id_private, id); retest: spin_lock_irq(&cm_id_priv->lock); switch (cm_id->state) { case IB_CM_LISTEN: cm_id->state = IB_CM_IDLE; spin_unlock_irq(&cm_id_priv->lock); spin_lock_irq(&cm.lock); rb_erase(&cm_id_priv->service_node, &cm.listen_service_table); spin_unlock_irq(&cm.lock); break; case IB_CM_SIDR_REQ_SENT: cm_id->state = IB_CM_IDLE; ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); spin_unlock_irq(&cm_id_priv->lock); break; case IB_CM_SIDR_REQ_RCVD: spin_unlock_irq(&cm_id_priv->lock); cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT); break; case IB_CM_REQ_SENT: + case IB_CM_MRA_REQ_RCVD: ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); spin_unlock_irq(&cm_id_priv->lock); ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT, &cm_id_priv->id.device->node_guid, sizeof cm_id_priv->id.device->node_guid, NULL, 0); break; case IB_CM_REQ_RCVD: if (err == -ENOMEM) { /* Do not reject to allow future retries. */ cm_reset_to_idle(cm_id_priv); spin_unlock_irq(&cm_id_priv->lock); } else { spin_unlock_irq(&cm_id_priv->lock); ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, NULL, 0); } break; - case IB_CM_MRA_REQ_RCVD: case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); /* Fall through */ case IB_CM_MRA_REQ_SENT: case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: spin_unlock_irq(&cm_id_priv->lock); ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, NULL, 0); break; case IB_CM_ESTABLISHED: spin_unlock_irq(&cm_id_priv->lock); if (cm_id_priv->qp_type == IB_QPT_XRC_TGT) break; ib_send_cm_dreq(cm_id, NULL, 0); goto retest; case IB_CM_DREQ_SENT: ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); cm_enter_timewait(cm_id_priv); spin_unlock_irq(&cm_id_priv->lock); break; case IB_CM_DREQ_RCVD: spin_unlock_irq(&cm_id_priv->lock); ib_send_cm_drep(cm_id, NULL, 0); break; default: spin_unlock_irq(&cm_id_priv->lock); break; } cm_free_id(cm_id->local_id); cm_deref_id(cm_id_priv); wait_for_completion(&cm_id_priv->comp); while ((work = cm_dequeue_work(cm_id_priv)) != NULL) cm_free_work(work); kfree(cm_id_priv->compare_data); kfree(cm_id_priv->private_data); kfree(cm_id_priv); } void ib_destroy_cm_id(struct ib_cm_id *cm_id) { cm_destroy_id(cm_id, 0); } EXPORT_SYMBOL(ib_destroy_cm_id); int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask, struct ib_cm_compare_data *compare_data) { struct cm_id_private *cm_id_priv, *cur_cm_id_priv; unsigned long flags; int ret = 0; service_mask = service_mask ? service_mask : ~cpu_to_be64(0); service_id &= service_mask; if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID && (service_id != IB_CM_ASSIGN_SERVICE_ID)) return -EINVAL; cm_id_priv = container_of(cm_id, struct cm_id_private, id); if (cm_id->state != IB_CM_IDLE) return -EINVAL; if (compare_data) { cm_id_priv->compare_data = kzalloc(sizeof *compare_data, GFP_KERNEL); if (!cm_id_priv->compare_data) return -ENOMEM; cm_mask_copy(cm_id_priv->compare_data->data, compare_data->data, compare_data->mask); memcpy(cm_id_priv->compare_data->mask, compare_data->mask, IB_CM_COMPARE_SIZE); } cm_id->state = IB_CM_LISTEN; spin_lock_irqsave(&cm.lock, flags); if (service_id == IB_CM_ASSIGN_SERVICE_ID) { cm_id->service_id = cpu_to_be64(cm.listen_service_id++); cm_id->service_mask = ~cpu_to_be64(0); } else { cm_id->service_id = service_id; cm_id->service_mask = service_mask; } cur_cm_id_priv = cm_insert_listen(cm_id_priv); spin_unlock_irqrestore(&cm.lock, flags); if (cur_cm_id_priv) { cm_id->state = IB_CM_IDLE; kfree(cm_id_priv->compare_data); cm_id_priv->compare_data = NULL; ret = -EBUSY; } return ret; } EXPORT_SYMBOL(ib_cm_listen); static __be64 cm_form_tid(struct cm_id_private *cm_id_priv, enum cm_msg_sequence msg_seq) { u64 hi_tid, low_tid; hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32; low_tid = (u64) ((__force u32)cm_id_priv->id.local_id | (msg_seq << 30)); return cpu_to_be64(hi_tid | low_tid); } static void cm_format_mad_hdr(struct ib_mad_hdr *hdr, __be16 attr_id, __be64 tid) { hdr->base_version = IB_MGMT_BASE_VERSION; hdr->mgmt_class = IB_MGMT_CLASS_CM; hdr->class_version = IB_CM_CLASS_VERSION; hdr->method = IB_MGMT_METHOD_SEND; hdr->attr_id = attr_id; hdr->tid = tid; } static void cm_format_req(struct cm_req_msg *req_msg, struct cm_id_private *cm_id_priv, struct ib_cm_req_param *param) { struct ib_sa_path_rec *pri_path = param->primary_path; struct ib_sa_path_rec *alt_path = param->alternate_path; cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID, cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ)); req_msg->local_comm_id = cm_id_priv->id.local_id; req_msg->service_id = param->service_id; req_msg->local_ca_guid = cm_id_priv->id.device->node_guid; cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num)); cm_req_set_init_depth(req_msg, param->initiator_depth); cm_req_set_remote_resp_timeout(req_msg, param->remote_cm_response_timeout); cm_req_set_qp_type(req_msg, param->qp_type); cm_req_set_flow_ctrl(req_msg, param->flow_control); cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn)); cm_req_set_local_resp_timeout(req_msg, param->local_cm_response_timeout); req_msg->pkey = param->primary_path->pkey; cm_req_set_path_mtu(req_msg, param->primary_path->mtu); cm_req_set_max_cm_retries(req_msg, param->max_cm_retries); if (param->qp_type != IB_QPT_XRC_INI) { cm_req_set_resp_res(req_msg, param->responder_resources); cm_req_set_retry_count(req_msg, param->retry_count); cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count); cm_req_set_srq(req_msg, param->srq); } if (pri_path->hop_limit <= 1) { req_msg->primary_local_lid = pri_path->slid; req_msg->primary_remote_lid = pri_path->dlid; } else { /* Work-around until there's a way to obtain remote LID info */ req_msg->primary_local_lid = IB_LID_PERMISSIVE; req_msg->primary_remote_lid = IB_LID_PERMISSIVE; } req_msg->primary_local_gid = pri_path->sgid; req_msg->primary_remote_gid = pri_path->dgid; cm_req_set_primary_flow_label(req_msg, pri_path->flow_label); cm_req_set_primary_packet_rate(req_msg, pri_path->rate); req_msg->primary_traffic_class = pri_path->traffic_class; req_msg->primary_hop_limit = pri_path->hop_limit; cm_req_set_primary_sl(req_msg, pri_path->sl); cm_req_set_primary_subnet_local(req_msg, (pri_path->hop_limit <= 1)); cm_req_set_primary_local_ack_timeout(req_msg, cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay, pri_path->packet_life_time)); if (alt_path) { if (alt_path->hop_limit <= 1) { req_msg->alt_local_lid = alt_path->slid; req_msg->alt_remote_lid = alt_path->dlid; } else { req_msg->alt_local_lid = IB_LID_PERMISSIVE; req_msg->alt_remote_lid = IB_LID_PERMISSIVE; } req_msg->alt_local_gid = alt_path->sgid; req_msg->alt_remote_gid = alt_path->dgid; cm_req_set_alt_flow_label(req_msg, alt_path->flow_label); cm_req_set_alt_packet_rate(req_msg, alt_path->rate); req_msg->alt_traffic_class = alt_path->traffic_class; req_msg->alt_hop_limit = alt_path->hop_limit; cm_req_set_alt_sl(req_msg, alt_path->sl); cm_req_set_alt_subnet_local(req_msg, (alt_path->hop_limit <= 1)); cm_req_set_alt_local_ack_timeout(req_msg, cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay, alt_path->packet_life_time)); } if (param->private_data && param->private_data_len) memcpy(req_msg->private_data, param->private_data, param->private_data_len); } static int cm_validate_req_param(struct ib_cm_req_param *param) { /* peer-to-peer not supported */ if (param->peer_to_peer) return -EINVAL; if (!param->primary_path) return -EINVAL; if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC && param->qp_type != IB_QPT_XRC_INI) return -EINVAL; if (param->private_data && param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE) return -EINVAL; if (param->alternate_path && (param->alternate_path->pkey != param->primary_path->pkey || param->alternate_path->mtu != param->primary_path->mtu)) return -EINVAL; return 0; } int ib_send_cm_req(struct ib_cm_id *cm_id, struct ib_cm_req_param *param) { struct cm_id_private *cm_id_priv; struct cm_req_msg *req_msg; unsigned long flags; int ret; ret = cm_validate_req_param(param); if (ret) return ret; /* Verify that we're not in timewait. */ cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id->state != IB_CM_IDLE) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); return -EINVAL; } cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> id.local_id, GFP_ATOMIC); if (IS_ERR(cm_id_priv->timewait_info)) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); return (PTR_ERR(cm_id_priv->timewait_info)); } ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av); if (!ret && param->alternate_path) { ret = cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av); } if (ret) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); goto error1; } spin_unlock_irqrestore(&cm_id_priv->lock, flags); cm_id->service_id = param->service_id; cm_id->service_mask = ~cpu_to_be64(0); cm_id_priv->timeout_ms = cm_convert_to_ms( param->primary_path->packet_life_time) * 2 + cm_convert_to_ms( param->remote_cm_response_timeout); cm_id_priv->max_cm_retries = param->max_cm_retries; cm_id_priv->initiator_depth = param->initiator_depth; cm_id_priv->responder_resources = param->responder_resources; cm_id_priv->retry_count = param->retry_count; cm_id_priv->path_mtu = param->primary_path->mtu; cm_id_priv->pkey = param->primary_path->pkey; cm_id_priv->qp_type = param->qp_type; ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg); if (ret) goto error1; req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad; cm_format_req(req_msg, cm_id_priv, param); cm_id_priv->tid = req_msg->hdr.tid; cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms; cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT; cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg); cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg); spin_lock_irqsave(&cm_id_priv->lock, flags); ret = ib_post_send_mad(cm_id_priv->msg, NULL); if (ret) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); goto error2; } BUG_ON(cm_id->state != IB_CM_IDLE); cm_id->state = IB_CM_REQ_SENT; spin_unlock_irqrestore(&cm_id_priv->lock, flags); return 0; error2: cm_free_msg(cm_id_priv->msg); error1: kfree(cm_id_priv->timewait_info); return ret; } EXPORT_SYMBOL(ib_send_cm_req); static int cm_issue_rej(struct cm_port *port, struct ib_mad_recv_wc *mad_recv_wc, enum ib_cm_rej_reason reason, enum cm_msg_response msg_rejected, void *ari, u8 ari_length) { struct ib_mad_send_buf *msg = NULL; struct cm_rej_msg *rej_msg, *rcv_msg; int ret; ret = cm_alloc_response_msg(port, mad_recv_wc, &msg); if (ret) return ret; /* We just need common CM header information. Cast to any message. */ rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad; rej_msg = (struct cm_rej_msg *) msg->mad; cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid); rej_msg->remote_comm_id = rcv_msg->local_comm_id; rej_msg->local_comm_id = rcv_msg->remote_comm_id; cm_rej_set_msg_rejected(rej_msg, msg_rejected); rej_msg->reason = cpu_to_be16(reason); if (ari && ari_length) { cm_rej_set_reject_info_len(rej_msg, ari_length); memcpy(rej_msg->ari, ari, ari_length); } ret = ib_post_send_mad(msg, NULL); if (ret) cm_free_msg(msg); return ret; } static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid, __be32 local_qpn, __be32 remote_qpn) { return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) || ((local_ca_guid == remote_ca_guid) && (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn)))); } static void cm_format_paths_from_req(struct cm_req_msg *req_msg, struct ib_sa_path_rec *primary_path, struct ib_sa_path_rec *alt_path) { memset(primary_path, 0, sizeof *primary_path); primary_path->dgid = req_msg->primary_local_gid; primary_path->sgid = req_msg->primary_remote_gid; primary_path->dlid = req_msg->primary_local_lid; primary_path->slid = req_msg->primary_remote_lid; primary_path->flow_label = cm_req_get_primary_flow_label(req_msg); primary_path->hop_limit = req_msg->primary_hop_limit; primary_path->traffic_class = req_msg->primary_traffic_class; primary_path->reversible = 1; primary_path->pkey = req_msg->pkey; primary_path->sl = cm_req_get_primary_sl(req_msg); primary_path->mtu_selector = IB_SA_EQ; primary_path->mtu = cm_req_get_path_mtu(req_msg); primary_path->rate_selector = IB_SA_EQ; primary_path->rate = cm_req_get_primary_packet_rate(req_msg); primary_path->packet_life_time_selector = IB_SA_EQ; primary_path->packet_life_time = cm_req_get_primary_local_ack_timeout(req_msg); primary_path->packet_life_time -= (primary_path->packet_life_time > 0); if (req_msg->alt_local_lid) { memset(alt_path, 0, sizeof *alt_path); alt_path->dgid = req_msg->alt_local_gid; alt_path->sgid = req_msg->alt_remote_gid; alt_path->dlid = req_msg->alt_local_lid; alt_path->slid = req_msg->alt_remote_lid; alt_path->flow_label = cm_req_get_alt_flow_label(req_msg); alt_path->hop_limit = req_msg->alt_hop_limit; alt_path->traffic_class = req_msg->alt_traffic_class; alt_path->reversible = 1; alt_path->pkey = req_msg->pkey; alt_path->sl = cm_req_get_alt_sl(req_msg); alt_path->mtu_selector = IB_SA_EQ; alt_path->mtu = cm_req_get_path_mtu(req_msg); alt_path->rate_selector = IB_SA_EQ; alt_path->rate = cm_req_get_alt_packet_rate(req_msg); alt_path->packet_life_time_selector = IB_SA_EQ; alt_path->packet_life_time = cm_req_get_alt_local_ack_timeout(req_msg); alt_path->packet_life_time -= (alt_path->packet_life_time > 0); } } static void cm_format_req_event(struct cm_work *work, struct cm_id_private *cm_id_priv, struct ib_cm_id *listen_id) { struct cm_req_msg *req_msg; struct ib_cm_req_event_param *param; req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; param = &work->cm_event.param.req_rcvd; param->listen_id = listen_id; param->port = cm_id_priv->av.port->port_num; param->primary_path = &work->path[0]; if (req_msg->alt_local_lid) param->alternate_path = &work->path[1]; else param->alternate_path = NULL; param->remote_ca_guid = req_msg->local_ca_guid; param->remote_qkey = be32_to_cpu(req_msg->local_qkey); param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg)); param->qp_type = cm_req_get_qp_type(req_msg); param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg)); param->responder_resources = cm_req_get_init_depth(req_msg); param->initiator_depth = cm_req_get_resp_res(req_msg); param->local_cm_response_timeout = cm_req_get_remote_resp_timeout(req_msg); param->flow_control = cm_req_get_flow_ctrl(req_msg); param->remote_cm_response_timeout = cm_req_get_local_resp_timeout(req_msg); param->retry_count = cm_req_get_retry_count(req_msg); param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg); param->srq = cm_req_get_srq(req_msg); work->cm_event.private_data = &req_msg->private_data; } static void cm_process_work(struct cm_id_private *cm_id_priv, struct cm_work *work) { int ret; /* We will typically only have the current event to report. */ ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event); cm_free_work(work); while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) { spin_lock_irq(&cm_id_priv->lock); work = cm_dequeue_work(cm_id_priv); spin_unlock_irq(&cm_id_priv->lock); BUG_ON(!work); ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event); cm_free_work(work); } cm_deref_id(cm_id_priv); if (ret) cm_destroy_id(&cm_id_priv->id, ret); } static void cm_format_mra(struct cm_mra_msg *mra_msg, struct cm_id_private *cm_id_priv, enum cm_msg_response msg_mraed, u8 service_timeout, const void *private_data, u8 private_data_len) { cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid); cm_mra_set_msg_mraed(mra_msg, msg_mraed); mra_msg->local_comm_id = cm_id_priv->id.local_id; mra_msg->remote_comm_id = cm_id_priv->id.remote_id; cm_mra_set_service_timeout(mra_msg, service_timeout); if (private_data && private_data_len) memcpy(mra_msg->private_data, private_data, private_data_len); } static void cm_format_rej(struct cm_rej_msg *rej_msg, struct cm_id_private *cm_id_priv, enum ib_cm_rej_reason reason, void *ari, u8 ari_length, const void *private_data, u8 private_data_len) { cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid); rej_msg->remote_comm_id = cm_id_priv->id.remote_id; switch(cm_id_priv->id.state) { case IB_CM_REQ_RCVD: rej_msg->local_comm_id = 0; cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ); break; case IB_CM_MRA_REQ_SENT: rej_msg->local_comm_id = cm_id_priv->id.local_id; cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ); break; case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: rej_msg->local_comm_id = cm_id_priv->id.local_id; cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP); break; default: rej_msg->local_comm_id = cm_id_priv->id.local_id; cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER); break; } rej_msg->reason = cpu_to_be16(reason); if (ari && ari_length) { cm_rej_set_reject_info_len(rej_msg, ari_length); memcpy(rej_msg->ari, ari, ari_length); } if (private_data && private_data_len) memcpy(rej_msg->private_data, private_data, private_data_len); } static void cm_dup_req_handler(struct cm_work *work, struct cm_id_private *cm_id_priv) { struct ib_mad_send_buf *msg = NULL; int ret; atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_REQ_COUNTER]); /* Quick state check to discard duplicate REQs. */ if (cm_id_priv->id.state == IB_CM_REQ_RCVD) return; ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); if (ret) return; spin_lock_irq(&cm_id_priv->lock); switch (cm_id_priv->id.state) { case IB_CM_MRA_REQ_SENT: cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout, cm_id_priv->private_data, cm_id_priv->private_data_len); break; case IB_CM_TIMEWAIT: cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv, IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0); break; default: goto unlock; } spin_unlock_irq(&cm_id_priv->lock); ret = ib_post_send_mad(msg, NULL); if (ret) goto free; return; unlock: spin_unlock_irq(&cm_id_priv->lock); free: cm_free_msg(msg); } static struct cm_id_private * cm_match_req(struct cm_work *work, struct cm_id_private *cm_id_priv) { struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv; struct cm_timewait_info *timewait_info; struct cm_req_msg *req_msg; req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; /* Check for possible duplicate REQ. */ spin_lock_irq(&cm.lock); timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info); if (timewait_info) { cur_cm_id_priv = cm_get_id(timewait_info->work.local_id, timewait_info->work.remote_id); spin_unlock_irq(&cm.lock); if (cur_cm_id_priv) { cm_dup_req_handler(work, cur_cm_id_priv); cm_deref_id(cur_cm_id_priv); } return NULL; } /* Check for stale connections. */ timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info); if (timewait_info) { cm_cleanup_timewait(cm_id_priv->timewait_info); spin_unlock_irq(&cm.lock); cm_issue_rej(work->port, work->mad_recv_wc, IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ, NULL, 0); return NULL; } /* Find matching listen request. */ listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device, req_msg->service_id, req_msg->private_data); if (!listen_cm_id_priv) { cm_cleanup_timewait(cm_id_priv->timewait_info); spin_unlock_irq(&cm.lock); cm_issue_rej(work->port, work->mad_recv_wc, IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ, NULL, 0); goto out; } atomic_inc(&listen_cm_id_priv->refcount); atomic_inc(&cm_id_priv->refcount); cm_id_priv->id.state = IB_CM_REQ_RCVD; atomic_inc(&cm_id_priv->work_count); spin_unlock_irq(&cm.lock); out: return listen_cm_id_priv; } /* * Work-around for inter-subnet connections. If the LIDs are permissive, * we need to override the LID/SL data in the REQ with the LID information * in the work completion. */ static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc) { if (!cm_req_get_primary_subnet_local(req_msg)) { if (req_msg->primary_local_lid == IB_LID_PERMISSIVE) { req_msg->primary_local_lid = cpu_to_be16(wc->slid); cm_req_set_primary_sl(req_msg, wc->sl); } if (req_msg->primary_remote_lid == IB_LID_PERMISSIVE) req_msg->primary_remote_lid = cpu_to_be16(wc->dlid_path_bits); } if (!cm_req_get_alt_subnet_local(req_msg)) { if (req_msg->alt_local_lid == IB_LID_PERMISSIVE) { req_msg->alt_local_lid = cpu_to_be16(wc->slid); cm_req_set_alt_sl(req_msg, wc->sl); } if (req_msg->alt_remote_lid == IB_LID_PERMISSIVE) req_msg->alt_remote_lid = cpu_to_be16(wc->dlid_path_bits); } } static int cm_req_handler(struct cm_work *work) { struct ib_cm_id *cm_id; struct cm_id_private *cm_id_priv, *listen_cm_id_priv; struct cm_req_msg *req_msg; int ret; req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL); if (IS_ERR(cm_id)) return PTR_ERR(cm_id); cm_id_priv = container_of(cm_id, struct cm_id_private, id); cm_id_priv->id.remote_id = req_msg->local_comm_id; cm_init_av_for_response(work->port, work->mad_recv_wc->wc, work->mad_recv_wc->recv_buf.grh, &cm_id_priv->av); cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> id.local_id, GFP_KERNEL); if (IS_ERR(cm_id_priv->timewait_info)) { ret = PTR_ERR(cm_id_priv->timewait_info); goto destroy; } cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id; cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid; cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg); listen_cm_id_priv = cm_match_req(work, cm_id_priv); if (!listen_cm_id_priv) { ret = -EINVAL; kfree(cm_id_priv->timewait_info); goto destroy; } cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; cm_id_priv->id.context = listen_cm_id_priv->id.context; cm_id_priv->id.service_id = req_msg->service_id; cm_id_priv->id.service_mask = ~cpu_to_be64(0); cm_process_routed_req(req_msg, work->mad_recv_wc->wc); cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); /* Workarround: path in req_msg doesn't contain MAC, take it from wc */ memcpy(work->path[0].dmac, cm_id_priv->av.ah_attr.dmac, 6); work->path[0].vlan_id = cm_id_priv->av.ah_attr.vlan_id; ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); if (ret) { ib_get_cached_gid(work->port->cm_dev->ib_device, work->port->port_num, 0, &work->path[0].sgid); ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID, &work->path[0].sgid, sizeof work->path[0].sgid, NULL, 0); goto rejected; } if (req_msg->alt_local_lid) { ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av); if (ret) { ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID, &work->path[0].sgid, sizeof work->path[0].sgid, NULL, 0); goto rejected; } } cm_id_priv->tid = req_msg->hdr.tid; cm_id_priv->timeout_ms = cm_convert_to_ms( cm_req_get_local_resp_timeout(req_msg)); cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg); cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg); cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg); cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg); cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg); cm_id_priv->pkey = req_msg->pkey; cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg); cm_id_priv->retry_count = cm_req_get_retry_count(req_msg); cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg); cm_id_priv->qp_type = cm_req_get_qp_type(req_msg); cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id); cm_process_work(cm_id_priv, work); cm_deref_id(listen_cm_id_priv); return 0; rejected: atomic_dec(&cm_id_priv->refcount); cm_deref_id(listen_cm_id_priv); destroy: ib_destroy_cm_id(cm_id); return ret; } static void cm_format_rep(struct cm_rep_msg *rep_msg, struct cm_id_private *cm_id_priv, struct ib_cm_rep_param *param) { cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid); rep_msg->local_comm_id = cm_id_priv->id.local_id; rep_msg->remote_comm_id = cm_id_priv->id.remote_id; cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn)); rep_msg->resp_resources = param->responder_resources; cm_rep_set_target_ack_delay(rep_msg, cm_id_priv->av.port->cm_dev->ack_delay); cm_rep_set_failover(rep_msg, param->failover_accepted); cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count); rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid; if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) { rep_msg->initiator_depth = param->initiator_depth; cm_rep_set_flow_ctrl(rep_msg, param->flow_control); cm_rep_set_srq(rep_msg, param->srq); cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num)); } else { cm_rep_set_srq(rep_msg, 1); cm_rep_set_local_eecn(rep_msg, cpu_to_be32(param->qp_num)); } if (param->private_data && param->private_data_len) memcpy(rep_msg->private_data, param->private_data, param->private_data_len); } int ib_send_cm_rep(struct ib_cm_id *cm_id, struct ib_cm_rep_param *param) { struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; struct cm_rep_msg *rep_msg; unsigned long flags; int ret; if (param->private_data && param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE) return -EINVAL; cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id->state != IB_CM_REQ_RCVD && cm_id->state != IB_CM_MRA_REQ_SENT) { pr_debug("cm_id->state: %d\n", cm_id->state); ret = -EINVAL; goto out; } ret = cm_alloc_msg(cm_id_priv, &msg); if (ret) goto out; rep_msg = (struct cm_rep_msg *) msg->mad; cm_format_rep(rep_msg, cm_id_priv, param); msg->timeout_ms = cm_id_priv->timeout_ms; msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT; ret = ib_post_send_mad(msg, NULL); if (ret) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); cm_free_msg(msg); return ret; } cm_id->state = IB_CM_REP_SENT; cm_id_priv->msg = msg; cm_id_priv->initiator_depth = param->initiator_depth; cm_id_priv->responder_resources = param->responder_resources; cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg); cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF); out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_send_cm_rep); static void cm_format_rtu(struct cm_rtu_msg *rtu_msg, struct cm_id_private *cm_id_priv, const void *private_data, u8 private_data_len) { cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid); rtu_msg->local_comm_id = cm_id_priv->id.local_id; rtu_msg->remote_comm_id = cm_id_priv->id.remote_id; if (private_data && private_data_len) memcpy(rtu_msg->private_data, private_data, private_data_len); } int ib_send_cm_rtu(struct ib_cm_id *cm_id, const void *private_data, u8 private_data_len) { struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; unsigned long flags; void *data; int ret; if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE) return -EINVAL; data = cm_copy_private_data(private_data, private_data_len); if (IS_ERR(data)) return PTR_ERR(data); cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id->state != IB_CM_REP_RCVD && cm_id->state != IB_CM_MRA_REP_SENT) { pr_debug("cm_id->state: %d\n", cm_id->state); ret = -EINVAL; goto error; } ret = cm_alloc_msg(cm_id_priv, &msg); if (ret) goto error; cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, private_data, private_data_len); ret = ib_post_send_mad(msg, NULL); if (ret) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); cm_free_msg(msg); kfree(data); return ret; } cm_id->state = IB_CM_ESTABLISHED; cm_set_private_data(cm_id_priv, data, private_data_len); spin_unlock_irqrestore(&cm_id_priv->lock, flags); return 0; error: spin_unlock_irqrestore(&cm_id_priv->lock, flags); kfree(data); return ret; } EXPORT_SYMBOL(ib_send_cm_rtu); static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type) { struct cm_rep_msg *rep_msg; struct ib_cm_rep_event_param *param; rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; param = &work->cm_event.param.rep_rcvd; param->remote_ca_guid = rep_msg->local_ca_guid; param->remote_qkey = be32_to_cpu(rep_msg->local_qkey); param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type)); param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg)); param->responder_resources = rep_msg->initiator_depth; param->initiator_depth = rep_msg->resp_resources; param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg); param->failover_accepted = cm_rep_get_failover(rep_msg); param->flow_control = cm_rep_get_flow_ctrl(rep_msg); param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg); param->srq = cm_rep_get_srq(rep_msg); work->cm_event.private_data = &rep_msg->private_data; } static void cm_dup_rep_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_rep_msg *rep_msg; struct ib_mad_send_buf *msg = NULL; int ret; rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, rep_msg->local_comm_id); if (!cm_id_priv) return; atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_REP_COUNTER]); ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); if (ret) goto deref; spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->id.state == IB_CM_ESTABLISHED) cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, cm_id_priv->private_data, cm_id_priv->private_data_len); else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT) cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout, cm_id_priv->private_data, cm_id_priv->private_data_len); else goto unlock; spin_unlock_irq(&cm_id_priv->lock); ret = ib_post_send_mad(msg, NULL); if (ret) goto free; goto deref; unlock: spin_unlock_irq(&cm_id_priv->lock); free: cm_free_msg(msg); deref: cm_deref_id(cm_id_priv); } static int cm_rep_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_rep_msg *rep_msg; int ret; rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0); if (!cm_id_priv) { cm_dup_rep_handler(work); pr_debug("no cm_id_priv\n"); return -EINVAL; } cm_format_rep_event(work, cm_id_priv->qp_type); spin_lock_irq(&cm_id_priv->lock); switch (cm_id_priv->id.state) { case IB_CM_REQ_SENT: case IB_CM_MRA_REQ_RCVD: break; default: spin_unlock_irq(&cm_id_priv->lock); ret = -EINVAL; pr_debug("cm_id_priv->id.state: %d\n", cm_id_priv->id.state); goto error; } cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id; cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid; cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type); spin_lock(&cm.lock); /* Check for duplicate REP. */ if (cm_insert_remote_id(cm_id_priv->timewait_info)) { spin_unlock(&cm.lock); spin_unlock_irq(&cm_id_priv->lock); ret = -EINVAL; pr_debug("Failed to insert remote id\n"); goto error; } /* Check for a stale connection. */ if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) { rb_erase(&cm_id_priv->timewait_info->remote_id_node, &cm.remote_id_table); cm_id_priv->timewait_info->inserted_remote_id = 0; spin_unlock(&cm.lock); spin_unlock_irq(&cm_id_priv->lock); cm_issue_rej(work->port, work->mad_recv_wc, IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP, NULL, 0); ret = -EINVAL; pr_debug("Stale connection.\n"); goto error; } spin_unlock(&cm.lock); cm_id_priv->id.state = IB_CM_REP_RCVD; cm_id_priv->id.remote_id = rep_msg->local_comm_id; cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type); cm_id_priv->initiator_depth = rep_msg->resp_resources; cm_id_priv->responder_resources = rep_msg->initiator_depth; cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg); cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg); cm_id_priv->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg); cm_id_priv->av.timeout = cm_ack_timeout(cm_id_priv->target_ack_delay, cm_id_priv->av.timeout - 1); cm_id_priv->alt_av.timeout = cm_ack_timeout(cm_id_priv->target_ack_delay, cm_id_priv->alt_av.timeout - 1); /* todo: handle peer_to_peer */ ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); ret = atomic_inc_and_test(&cm_id_priv->work_count); if (!ret) list_add_tail(&work->list, &cm_id_priv->work_list); spin_unlock_irq(&cm_id_priv->lock); if (ret) cm_process_work(cm_id_priv, work); else cm_deref_id(cm_id_priv); return 0; error: cm_deref_id(cm_id_priv); return ret; } static int cm_establish_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; int ret; /* See comment in cm_establish about lookup. */ cm_id_priv = cm_acquire_id(work->local_id, work->remote_id); if (!cm_id_priv) return -EINVAL; spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->id.state != IB_CM_ESTABLISHED) { spin_unlock_irq(&cm_id_priv->lock); goto out; } ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); ret = atomic_inc_and_test(&cm_id_priv->work_count); if (!ret) list_add_tail(&work->list, &cm_id_priv->work_list); spin_unlock_irq(&cm_id_priv->lock); if (ret) cm_process_work(cm_id_priv, work); else cm_deref_id(cm_id_priv); return 0; out: cm_deref_id(cm_id_priv); return -EINVAL; } static int cm_rtu_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_rtu_msg *rtu_msg; int ret; rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id, rtu_msg->local_comm_id); if (!cm_id_priv) return -EINVAL; work->cm_event.private_data = &rtu_msg->private_data; spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->id.state != IB_CM_REP_SENT && cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) { spin_unlock_irq(&cm_id_priv->lock); atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_RTU_COUNTER]); goto out; } cm_id_priv->id.state = IB_CM_ESTABLISHED; ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); ret = atomic_inc_and_test(&cm_id_priv->work_count); if (!ret) list_add_tail(&work->list, &cm_id_priv->work_list); spin_unlock_irq(&cm_id_priv->lock); if (ret) cm_process_work(cm_id_priv, work); else cm_deref_id(cm_id_priv); return 0; out: cm_deref_id(cm_id_priv); return -EINVAL; } static void cm_format_dreq(struct cm_dreq_msg *dreq_msg, struct cm_id_private *cm_id_priv, const void *private_data, u8 private_data_len) { cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID, cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ)); dreq_msg->local_comm_id = cm_id_priv->id.local_id; dreq_msg->remote_comm_id = cm_id_priv->id.remote_id; cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn); if (private_data && private_data_len) memcpy(dreq_msg->private_data, private_data, private_data_len); } int ib_send_cm_dreq(struct ib_cm_id *cm_id, const void *private_data, u8 private_data_len) { struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; unsigned long flags; int ret; if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE) return -EINVAL; cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id->state != IB_CM_ESTABLISHED) { pr_debug("cm_id->state: %d\n", cm_id->state); ret = -EINVAL; goto out; } if (cm_id->lap_state == IB_CM_LAP_SENT || cm_id->lap_state == IB_CM_MRA_LAP_RCVD) ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); ret = cm_alloc_msg(cm_id_priv, &msg); if (ret) { cm_enter_timewait(cm_id_priv); goto out; } cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv, private_data, private_data_len); msg->timeout_ms = cm_id_priv->timeout_ms; msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT; ret = ib_post_send_mad(msg, NULL); if (ret) { cm_enter_timewait(cm_id_priv); spin_unlock_irqrestore(&cm_id_priv->lock, flags); cm_free_msg(msg); return ret; } cm_id->state = IB_CM_DREQ_SENT; cm_id_priv->msg = msg; out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_send_cm_dreq); static void cm_format_drep(struct cm_drep_msg *drep_msg, struct cm_id_private *cm_id_priv, const void *private_data, u8 private_data_len) { cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid); drep_msg->local_comm_id = cm_id_priv->id.local_id; drep_msg->remote_comm_id = cm_id_priv->id.remote_id; if (private_data && private_data_len) memcpy(drep_msg->private_data, private_data, private_data_len); } int ib_send_cm_drep(struct ib_cm_id *cm_id, const void *private_data, u8 private_data_len) { struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; unsigned long flags; void *data; int ret; if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE) return -EINVAL; data = cm_copy_private_data(private_data, private_data_len); if (IS_ERR(data)) return PTR_ERR(data); cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id->state != IB_CM_DREQ_RCVD) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); kfree(data); pr_debug("cm_id->state(%d) != IB_CM_DREQ_RCVD\n", cm_id->state); return -EINVAL; } cm_set_private_data(cm_id_priv, data, private_data_len); cm_enter_timewait(cm_id_priv); ret = cm_alloc_msg(cm_id_priv, &msg); if (ret) goto out; cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, private_data, private_data_len); ret = ib_post_send_mad(msg, NULL); if (ret) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); cm_free_msg(msg); return ret; } out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_send_cm_drep); static int cm_issue_drep(struct cm_port *port, struct ib_mad_recv_wc *mad_recv_wc) { struct ib_mad_send_buf *msg = NULL; struct cm_dreq_msg *dreq_msg; struct cm_drep_msg *drep_msg; int ret; ret = cm_alloc_response_msg(port, mad_recv_wc, &msg); if (ret) return ret; dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad; drep_msg = (struct cm_drep_msg *) msg->mad; cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid); drep_msg->remote_comm_id = dreq_msg->local_comm_id; drep_msg->local_comm_id = dreq_msg->remote_comm_id; ret = ib_post_send_mad(msg, NULL); if (ret) cm_free_msg(msg); return ret; } static int cm_dreq_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_dreq_msg *dreq_msg; struct ib_mad_send_buf *msg = NULL; int ret; dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id, dreq_msg->local_comm_id); if (!cm_id_priv) { atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_DREQ_COUNTER]); cm_issue_drep(work->port, work->mad_recv_wc); pr_debug("no cm_id_priv\n"); return -EINVAL; } work->cm_event.private_data = &dreq_msg->private_data; spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg)) goto unlock; switch (cm_id_priv->id.state) { case IB_CM_REP_SENT: case IB_CM_DREQ_SENT: ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); break; case IB_CM_ESTABLISHED: if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT || cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); break; case IB_CM_MRA_REP_RCVD: break; case IB_CM_TIMEWAIT: atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_DREQ_COUNTER]); if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) goto unlock; cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, cm_id_priv->private_data, cm_id_priv->private_data_len); spin_unlock_irq(&cm_id_priv->lock); if (ib_post_send_mad(msg, NULL)) cm_free_msg(msg); goto deref; case IB_CM_DREQ_RCVD: atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_DREQ_COUNTER]); goto unlock; default: pr_debug("cm_id_priv->id.state: %d\n", cm_id_priv->id.state); goto unlock; } cm_id_priv->id.state = IB_CM_DREQ_RCVD; cm_id_priv->tid = dreq_msg->hdr.tid; ret = atomic_inc_and_test(&cm_id_priv->work_count); if (!ret) list_add_tail(&work->list, &cm_id_priv->work_list); spin_unlock_irq(&cm_id_priv->lock); if (ret) cm_process_work(cm_id_priv, work); else cm_deref_id(cm_id_priv); return 0; unlock: spin_unlock_irq(&cm_id_priv->lock); deref: cm_deref_id(cm_id_priv); return -EINVAL; } static int cm_drep_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_drep_msg *drep_msg; int ret; drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id, drep_msg->local_comm_id); if (!cm_id_priv) return -EINVAL; work->cm_event.private_data = &drep_msg->private_data; spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->id.state != IB_CM_DREQ_SENT && cm_id_priv->id.state != IB_CM_DREQ_RCVD) { spin_unlock_irq(&cm_id_priv->lock); goto out; } cm_enter_timewait(cm_id_priv); ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); ret = atomic_inc_and_test(&cm_id_priv->work_count); if (!ret) list_add_tail(&work->list, &cm_id_priv->work_list); spin_unlock_irq(&cm_id_priv->lock); if (ret) cm_process_work(cm_id_priv, work); else cm_deref_id(cm_id_priv); return 0; out: cm_deref_id(cm_id_priv); return -EINVAL; } int ib_send_cm_rej(struct ib_cm_id *cm_id, enum ib_cm_rej_reason reason, void *ari, u8 ari_length, const void *private_data, u8 private_data_len) { struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; unsigned long flags; int ret; if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) || (ari && ari_length > IB_CM_REJ_ARI_LENGTH)) return -EINVAL; cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); switch (cm_id->state) { case IB_CM_REQ_SENT: case IB_CM_MRA_REQ_RCVD: case IB_CM_REQ_RCVD: case IB_CM_MRA_REQ_SENT: case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: ret = cm_alloc_msg(cm_id_priv, &msg); if (!ret) cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv, reason, ari, ari_length, private_data, private_data_len); cm_reset_to_idle(cm_id_priv); break; case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: ret = cm_alloc_msg(cm_id_priv, &msg); if (!ret) cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv, reason, ari, ari_length, private_data, private_data_len); cm_enter_timewait(cm_id_priv); break; default: pr_debug("cm_id->state: 0x%x\n", cm_id->state); ret = -EINVAL; goto out; } if (ret) goto out; ret = ib_post_send_mad(msg, NULL); if (ret) cm_free_msg(msg); out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_send_cm_rej); static void cm_format_rej_event(struct cm_work *work) { struct cm_rej_msg *rej_msg; struct ib_cm_rej_event_param *param; rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; param = &work->cm_event.param.rej_rcvd; param->ari = rej_msg->ari; param->ari_length = cm_rej_get_reject_info_len(rej_msg); param->reason = __be16_to_cpu(rej_msg->reason); work->cm_event.private_data = &rej_msg->private_data; } static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg) { struct cm_timewait_info *timewait_info; struct cm_id_private *cm_id_priv; __be32 remote_id; remote_id = rej_msg->local_comm_id; if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) { spin_lock_irq(&cm.lock); timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari), remote_id); if (!timewait_info) { spin_unlock_irq(&cm.lock); return NULL; } cm_id_priv = idr_find(&cm.local_id_table, (__force int) (timewait_info->work.local_id ^ cm.random_id_operand)); if (cm_id_priv) { if (cm_id_priv->id.remote_id == remote_id) atomic_inc(&cm_id_priv->refcount); else cm_id_priv = NULL; } spin_unlock_irq(&cm.lock); } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ) cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0); else cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id); return cm_id_priv; } static int cm_rej_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_rej_msg *rej_msg; int ret; rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_rejected_id(rej_msg); if (!cm_id_priv) return -EINVAL; cm_format_rej_event(work); spin_lock_irq(&cm_id_priv->lock); switch (cm_id_priv->id.state) { case IB_CM_REQ_SENT: case IB_CM_MRA_REQ_RCVD: case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); /* fall through */ case IB_CM_REQ_RCVD: case IB_CM_MRA_REQ_SENT: if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN) cm_enter_timewait(cm_id_priv); else cm_reset_to_idle(cm_id_priv); break; case IB_CM_DREQ_SENT: ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); /* fall through */ case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: cm_enter_timewait(cm_id_priv); break; case IB_CM_ESTABLISHED: if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT || cm_id_priv->id.lap_state == IB_CM_LAP_SENT) { if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT) ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); cm_enter_timewait(cm_id_priv); break; } /* fall through */ default: spin_unlock_irq(&cm_id_priv->lock); pr_debug("cm_id_priv->id.state: 0x%x\n", cm_id_priv->id.state); ret = -EINVAL; goto out; } ret = atomic_inc_and_test(&cm_id_priv->work_count); if (!ret) list_add_tail(&work->list, &cm_id_priv->work_list); spin_unlock_irq(&cm_id_priv->lock); if (ret) cm_process_work(cm_id_priv, work); else cm_deref_id(cm_id_priv); return 0; out: cm_deref_id(cm_id_priv); return -EINVAL; } int ib_send_cm_mra(struct ib_cm_id *cm_id, u8 service_timeout, const void *private_data, u8 private_data_len) { struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; enum ib_cm_state cm_state; enum ib_cm_lap_state lap_state; enum cm_msg_response msg_response; void *data; unsigned long flags; int ret; if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE) return -EINVAL; data = cm_copy_private_data(private_data, private_data_len); if (IS_ERR(data)) return PTR_ERR(data); cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); switch(cm_id_priv->id.state) { case IB_CM_REQ_RCVD: cm_state = IB_CM_MRA_REQ_SENT; lap_state = cm_id->lap_state; msg_response = CM_MSG_RESPONSE_REQ; break; case IB_CM_REP_RCVD: cm_state = IB_CM_MRA_REP_SENT; lap_state = cm_id->lap_state; msg_response = CM_MSG_RESPONSE_REP; break; case IB_CM_ESTABLISHED: if (cm_id->lap_state == IB_CM_LAP_RCVD) { cm_state = cm_id->state; lap_state = IB_CM_MRA_LAP_SENT; msg_response = CM_MSG_RESPONSE_OTHER; break; } default: pr_debug("cm_id_priv->id.state: 0x%x\n", cm_id_priv->id.state); ret = -EINVAL; goto error1; } if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) { ret = cm_alloc_msg(cm_id_priv, &msg); if (ret) goto error1; cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, msg_response, service_timeout, private_data, private_data_len); ret = ib_post_send_mad(msg, NULL); if (ret) goto error2; } cm_id->state = cm_state; cm_id->lap_state = lap_state; cm_id_priv->service_timeout = service_timeout; cm_set_private_data(cm_id_priv, data, private_data_len); spin_unlock_irqrestore(&cm_id_priv->lock, flags); return 0; error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags); kfree(data); return ret; error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags); kfree(data); cm_free_msg(msg); return ret; } EXPORT_SYMBOL(ib_send_cm_mra); static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg) { switch (cm_mra_get_msg_mraed(mra_msg)) { case CM_MSG_RESPONSE_REQ: return cm_acquire_id(mra_msg->remote_comm_id, 0); case CM_MSG_RESPONSE_REP: case CM_MSG_RESPONSE_OTHER: return cm_acquire_id(mra_msg->remote_comm_id, mra_msg->local_comm_id); default: return NULL; } } static int cm_mra_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_mra_msg *mra_msg; int timeout, ret; mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_mraed_id(mra_msg); if (!cm_id_priv) return -EINVAL; work->cm_event.private_data = &mra_msg->private_data; work->cm_event.param.mra_rcvd.service_timeout = cm_mra_get_service_timeout(mra_msg); timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) + cm_convert_to_ms(cm_id_priv->av.timeout); spin_lock_irq(&cm_id_priv->lock); switch (cm_id_priv->id.state) { case IB_CM_REQ_SENT: if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ || ib_modify_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg, timeout)) goto out; cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD; break; case IB_CM_REP_SENT: if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP || ib_modify_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg, timeout)) goto out; cm_id_priv->id.state = IB_CM_MRA_REP_RCVD; break; case IB_CM_ESTABLISHED: if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER || cm_id_priv->id.lap_state != IB_CM_LAP_SENT || ib_modify_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg, timeout)) { if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) atomic_long_inc(&work->port-> counter_group[CM_RECV_DUPLICATES]. counter[CM_MRA_COUNTER]); goto out; } cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD; break; case IB_CM_MRA_REQ_RCVD: case IB_CM_MRA_REP_RCVD: atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_MRA_COUNTER]); /* fall through */ default: pr_debug("cm_id_priv->id.state: 0x%x\n", cm_id_priv->id.state); goto out; } cm_id_priv->msg->context[1] = (void *) (unsigned long) cm_id_priv->id.state; ret = atomic_inc_and_test(&cm_id_priv->work_count); if (!ret) list_add_tail(&work->list, &cm_id_priv->work_list); spin_unlock_irq(&cm_id_priv->lock); if (ret) cm_process_work(cm_id_priv, work); else cm_deref_id(cm_id_priv); return 0; out: spin_unlock_irq(&cm_id_priv->lock); cm_deref_id(cm_id_priv); return -EINVAL; } static void cm_format_lap(struct cm_lap_msg *lap_msg, struct cm_id_private *cm_id_priv, struct ib_sa_path_rec *alternate_path, const void *private_data, u8 private_data_len) { cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID, cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP)); lap_msg->local_comm_id = cm_id_priv->id.local_id; lap_msg->remote_comm_id = cm_id_priv->id.remote_id; cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn); /* todo: need remote CM response timeout */ cm_lap_set_remote_resp_timeout(lap_msg, 0x1F); lap_msg->alt_local_lid = alternate_path->slid; lap_msg->alt_remote_lid = alternate_path->dlid; lap_msg->alt_local_gid = alternate_path->sgid; lap_msg->alt_remote_gid = alternate_path->dgid; cm_lap_set_flow_label(lap_msg, alternate_path->flow_label); cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class); lap_msg->alt_hop_limit = alternate_path->hop_limit; cm_lap_set_packet_rate(lap_msg, alternate_path->rate); cm_lap_set_sl(lap_msg, alternate_path->sl); cm_lap_set_subnet_local(lap_msg, 1); /* local only... */ cm_lap_set_local_ack_timeout(lap_msg, cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay, alternate_path->packet_life_time)); if (private_data && private_data_len) memcpy(lap_msg->private_data, private_data, private_data_len); } int ib_send_cm_lap(struct ib_cm_id *cm_id, struct ib_sa_path_rec *alternate_path, const void *private_data, u8 private_data_len) { struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; unsigned long flags; int ret; if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE) return -EINVAL; cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id->state != IB_CM_ESTABLISHED || (cm_id->lap_state != IB_CM_LAP_UNINIT && cm_id->lap_state != IB_CM_LAP_IDLE)) { ret = -EINVAL; goto out; } ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av); if (ret) goto out; cm_id_priv->alt_av.timeout = cm_ack_timeout(cm_id_priv->target_ack_delay, cm_id_priv->alt_av.timeout - 1); ret = cm_alloc_msg(cm_id_priv, &msg); if (ret) goto out; cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv, alternate_path, private_data, private_data_len); msg->timeout_ms = cm_id_priv->timeout_ms; msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED; ret = ib_post_send_mad(msg, NULL); if (ret) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); cm_free_msg(msg); return ret; } cm_id->lap_state = IB_CM_LAP_SENT; cm_id_priv->msg = msg; out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_send_cm_lap); static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv, struct ib_sa_path_rec *path, struct cm_lap_msg *lap_msg) { memset(path, 0, sizeof *path); path->dgid = lap_msg->alt_local_gid; path->sgid = lap_msg->alt_remote_gid; path->dlid = lap_msg->alt_local_lid; path->slid = lap_msg->alt_remote_lid; path->flow_label = cm_lap_get_flow_label(lap_msg); path->hop_limit = lap_msg->alt_hop_limit; path->traffic_class = cm_lap_get_traffic_class(lap_msg); path->reversible = 1; path->pkey = cm_id_priv->pkey; path->sl = cm_lap_get_sl(lap_msg); path->mtu_selector = IB_SA_EQ; path->mtu = cm_id_priv->path_mtu; path->rate_selector = IB_SA_EQ; path->rate = cm_lap_get_packet_rate(lap_msg); path->packet_life_time_selector = IB_SA_EQ; path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg); path->packet_life_time -= (path->packet_life_time > 0); } static int cm_lap_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_lap_msg *lap_msg; struct ib_cm_lap_event_param *param; struct ib_mad_send_buf *msg = NULL; int ret; /* todo: verify LAP request and send reject APR if invalid. */ lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id, lap_msg->local_comm_id); if (!cm_id_priv) return -EINVAL; param = &work->cm_event.param.lap_rcvd; param->alternate_path = &work->path[0]; cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg); work->cm_event.private_data = &lap_msg->private_data; spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->id.state != IB_CM_ESTABLISHED) goto unlock; switch (cm_id_priv->id.lap_state) { case IB_CM_LAP_UNINIT: case IB_CM_LAP_IDLE: break; case IB_CM_MRA_LAP_SENT: atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_LAP_COUNTER]); if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) goto unlock; cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, CM_MSG_RESPONSE_OTHER, cm_id_priv->service_timeout, cm_id_priv->private_data, cm_id_priv->private_data_len); spin_unlock_irq(&cm_id_priv->lock); if (ib_post_send_mad(msg, NULL)) cm_free_msg(msg); goto deref; case IB_CM_LAP_RCVD: atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_LAP_COUNTER]); goto unlock; default: goto unlock; } cm_id_priv->id.lap_state = IB_CM_LAP_RCVD; cm_id_priv->tid = lap_msg->hdr.tid; cm_init_av_for_response(work->port, work->mad_recv_wc->wc, work->mad_recv_wc->recv_buf.grh, &cm_id_priv->av); if (cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av)) goto unlock; ret = atomic_inc_and_test(&cm_id_priv->work_count); if (!ret) list_add_tail(&work->list, &cm_id_priv->work_list); spin_unlock_irq(&cm_id_priv->lock); if (ret) cm_process_work(cm_id_priv, work); else cm_deref_id(cm_id_priv); return 0; unlock: spin_unlock_irq(&cm_id_priv->lock); deref: cm_deref_id(cm_id_priv); return -EINVAL; } static void cm_format_apr(struct cm_apr_msg *apr_msg, struct cm_id_private *cm_id_priv, enum ib_cm_apr_status status, void *info, u8 info_length, const void *private_data, u8 private_data_len) { cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid); apr_msg->local_comm_id = cm_id_priv->id.local_id; apr_msg->remote_comm_id = cm_id_priv->id.remote_id; apr_msg->ap_status = (u8) status; if (info && info_length) { apr_msg->info_length = info_length; memcpy(apr_msg->info, info, info_length); } if (private_data && private_data_len) memcpy(apr_msg->private_data, private_data, private_data_len); } int ib_send_cm_apr(struct ib_cm_id *cm_id, enum ib_cm_apr_status status, void *info, u8 info_length, const void *private_data, u8 private_data_len) { struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; unsigned long flags; int ret; if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) || (info && info_length > IB_CM_APR_INFO_LENGTH)) return -EINVAL; cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id->state != IB_CM_ESTABLISHED || (cm_id->lap_state != IB_CM_LAP_RCVD && cm_id->lap_state != IB_CM_MRA_LAP_SENT)) { ret = -EINVAL; goto out; } ret = cm_alloc_msg(cm_id_priv, &msg); if (ret) goto out; cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status, info, info_length, private_data, private_data_len); ret = ib_post_send_mad(msg, NULL); if (ret) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); cm_free_msg(msg); return ret; } cm_id->lap_state = IB_CM_LAP_IDLE; out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_send_cm_apr); static int cm_apr_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_apr_msg *apr_msg; int ret; apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id, apr_msg->local_comm_id); if (!cm_id_priv) return -EINVAL; /* Unmatched reply. */ work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status; work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info; work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length; work->cm_event.private_data = &apr_msg->private_data; spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->id.state != IB_CM_ESTABLISHED || (cm_id_priv->id.lap_state != IB_CM_LAP_SENT && cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) { spin_unlock_irq(&cm_id_priv->lock); goto out; } cm_id_priv->id.lap_state = IB_CM_LAP_IDLE; ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); cm_id_priv->msg = NULL; ret = atomic_inc_and_test(&cm_id_priv->work_count); if (!ret) list_add_tail(&work->list, &cm_id_priv->work_list); spin_unlock_irq(&cm_id_priv->lock); if (ret) cm_process_work(cm_id_priv, work); else cm_deref_id(cm_id_priv); return 0; out: cm_deref_id(cm_id_priv); return -EINVAL; } static int cm_timewait_handler(struct cm_work *work) { struct cm_timewait_info *timewait_info; struct cm_id_private *cm_id_priv; int ret; timewait_info = (struct cm_timewait_info *)work; spin_lock_irq(&cm.lock); list_del(&timewait_info->list); spin_unlock_irq(&cm.lock); cm_id_priv = cm_acquire_id(timewait_info->work.local_id, timewait_info->work.remote_id); if (!cm_id_priv) return -EINVAL; spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->id.state != IB_CM_TIMEWAIT || cm_id_priv->remote_qpn != timewait_info->remote_qpn) { spin_unlock_irq(&cm_id_priv->lock); goto out; } cm_id_priv->id.state = IB_CM_IDLE; ret = atomic_inc_and_test(&cm_id_priv->work_count); if (!ret) list_add_tail(&work->list, &cm_id_priv->work_list); spin_unlock_irq(&cm_id_priv->lock); if (ret) cm_process_work(cm_id_priv, work); else cm_deref_id(cm_id_priv); return 0; out: cm_deref_id(cm_id_priv); return -EINVAL; } static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg, struct cm_id_private *cm_id_priv, struct ib_cm_sidr_req_param *param) { cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID, cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR)); sidr_req_msg->request_id = cm_id_priv->id.local_id; sidr_req_msg->pkey = param->path->pkey; sidr_req_msg->service_id = param->service_id; if (param->private_data && param->private_data_len) memcpy(sidr_req_msg->private_data, param->private_data, param->private_data_len); } int ib_send_cm_sidr_req(struct ib_cm_id *cm_id, struct ib_cm_sidr_req_param *param) { struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; unsigned long flags; int ret; if (!param->path || (param->private_data && param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE)) return -EINVAL; cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); ret = cm_init_av_by_path(param->path, &cm_id_priv->av); if (ret) goto out; cm_id->service_id = param->service_id; cm_id->service_mask = ~cpu_to_be64(0); cm_id_priv->timeout_ms = param->timeout_ms; cm_id_priv->max_cm_retries = param->max_cm_retries; ret = cm_alloc_msg(cm_id_priv, &msg); if (ret) goto out; cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv, param); msg->timeout_ms = cm_id_priv->timeout_ms; msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT; if (cm_id->state == IB_CM_IDLE) ret = ib_post_send_mad(msg, NULL); else ret = -EINVAL; if (ret) { cm_free_msg(msg); goto out; } cm_id->state = IB_CM_SIDR_REQ_SENT; cm_id_priv->msg = msg; out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_send_cm_sidr_req); static void cm_format_sidr_req_event(struct cm_work *work, struct ib_cm_id *listen_id) { struct cm_sidr_req_msg *sidr_req_msg; struct ib_cm_sidr_req_event_param *param; sidr_req_msg = (struct cm_sidr_req_msg *) work->mad_recv_wc->recv_buf.mad; param = &work->cm_event.param.sidr_req_rcvd; param->pkey = __be16_to_cpu(sidr_req_msg->pkey); param->listen_id = listen_id; param->port = work->port->port_num; work->cm_event.private_data = &sidr_req_msg->private_data; } static int cm_sidr_req_handler(struct cm_work *work) { struct ib_cm_id *cm_id; struct cm_id_private *cm_id_priv, *cur_cm_id_priv; struct cm_sidr_req_msg *sidr_req_msg; struct ib_wc *wc; cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL); if (IS_ERR(cm_id)) return PTR_ERR(cm_id); cm_id_priv = container_of(cm_id, struct cm_id_private, id); /* Record SGID/SLID and request ID for lookup. */ sidr_req_msg = (struct cm_sidr_req_msg *) work->mad_recv_wc->recv_buf.mad; wc = work->mad_recv_wc->wc; cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid); cm_id_priv->av.dgid.global.interface_id = 0; cm_init_av_for_response(work->port, work->mad_recv_wc->wc, work->mad_recv_wc->recv_buf.grh, &cm_id_priv->av); cm_id_priv->id.remote_id = sidr_req_msg->request_id; cm_id_priv->tid = sidr_req_msg->hdr.tid; atomic_inc(&cm_id_priv->work_count); spin_lock_irq(&cm.lock); cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv); if (cur_cm_id_priv) { spin_unlock_irq(&cm.lock); atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_SIDR_REQ_COUNTER]); goto out; /* Duplicate message. */ } cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD; cur_cm_id_priv = cm_find_listen(cm_id->device, sidr_req_msg->service_id, sidr_req_msg->private_data); if (!cur_cm_id_priv) { spin_unlock_irq(&cm.lock); cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED); goto out; /* No match. */ } atomic_inc(&cur_cm_id_priv->refcount); atomic_inc(&cm_id_priv->refcount); spin_unlock_irq(&cm.lock); cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler; cm_id_priv->id.context = cur_cm_id_priv->id.context; cm_id_priv->id.service_id = sidr_req_msg->service_id; cm_id_priv->id.service_mask = ~cpu_to_be64(0); cm_format_sidr_req_event(work, &cur_cm_id_priv->id); cm_process_work(cm_id_priv, work); cm_deref_id(cur_cm_id_priv); return 0; out: ib_destroy_cm_id(&cm_id_priv->id); return -EINVAL; } static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg, struct cm_id_private *cm_id_priv, struct ib_cm_sidr_rep_param *param) { cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID, cm_id_priv->tid); sidr_rep_msg->request_id = cm_id_priv->id.remote_id; sidr_rep_msg->status = param->status; cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num)); sidr_rep_msg->service_id = cm_id_priv->id.service_id; sidr_rep_msg->qkey = cpu_to_be32(param->qkey); if (param->info && param->info_length) memcpy(sidr_rep_msg->info, param->info, param->info_length); if (param->private_data && param->private_data_len) memcpy(sidr_rep_msg->private_data, param->private_data, param->private_data_len); } int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id, struct ib_cm_sidr_rep_param *param) { struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; unsigned long flags; int ret; if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) || (param->private_data && param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE)) return -EINVAL; cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id->state != IB_CM_SIDR_REQ_RCVD) { ret = -EINVAL; goto error; } ret = cm_alloc_msg(cm_id_priv, &msg); if (ret) goto error; cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv, param); ret = ib_post_send_mad(msg, NULL); if (ret) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); cm_free_msg(msg); return ret; } cm_id->state = IB_CM_IDLE; spin_unlock_irqrestore(&cm_id_priv->lock, flags); spin_lock_irqsave(&cm.lock, flags); rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); spin_unlock_irqrestore(&cm.lock, flags); return 0; error: spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_send_cm_sidr_rep); static void cm_format_sidr_rep_event(struct cm_work *work) { struct cm_sidr_rep_msg *sidr_rep_msg; struct ib_cm_sidr_rep_event_param *param; sidr_rep_msg = (struct cm_sidr_rep_msg *) work->mad_recv_wc->recv_buf.mad; param = &work->cm_event.param.sidr_rep_rcvd; param->status = sidr_rep_msg->status; param->qkey = be32_to_cpu(sidr_rep_msg->qkey); param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg)); param->info = &sidr_rep_msg->info; param->info_len = sidr_rep_msg->info_length; work->cm_event.private_data = &sidr_rep_msg->private_data; } static int cm_sidr_rep_handler(struct cm_work *work) { struct cm_sidr_rep_msg *sidr_rep_msg; struct cm_id_private *cm_id_priv; sidr_rep_msg = (struct cm_sidr_rep_msg *) work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0); if (!cm_id_priv) return -EINVAL; /* Unmatched reply. */ spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) { spin_unlock_irq(&cm_id_priv->lock); goto out; } cm_id_priv->id.state = IB_CM_IDLE; ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); spin_unlock_irq(&cm_id_priv->lock); cm_format_sidr_rep_event(work); cm_process_work(cm_id_priv, work); return 0; out: cm_deref_id(cm_id_priv); return -EINVAL; } static void cm_process_send_error(struct ib_mad_send_buf *msg, enum ib_wc_status wc_status) { struct cm_id_private *cm_id_priv; struct ib_cm_event cm_event; enum ib_cm_state state; int ret; memset(&cm_event, 0, sizeof cm_event); cm_id_priv = msg->context[0]; /* Discard old sends or ones without a response. */ spin_lock_irq(&cm_id_priv->lock); state = (enum ib_cm_state) (unsigned long) msg->context[1]; if (msg != cm_id_priv->msg || state != cm_id_priv->id.state) goto discard; switch (state) { case IB_CM_REQ_SENT: case IB_CM_MRA_REQ_RCVD: cm_reset_to_idle(cm_id_priv); cm_event.event = IB_CM_REQ_ERROR; break; case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: cm_reset_to_idle(cm_id_priv); cm_event.event = IB_CM_REP_ERROR; break; case IB_CM_DREQ_SENT: cm_enter_timewait(cm_id_priv); cm_event.event = IB_CM_DREQ_ERROR; break; case IB_CM_SIDR_REQ_SENT: cm_id_priv->id.state = IB_CM_IDLE; cm_event.event = IB_CM_SIDR_REQ_ERROR; break; default: goto discard; } spin_unlock_irq(&cm_id_priv->lock); cm_event.param.send_status = wc_status; /* No other events can occur on the cm_id at this point. */ ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event); cm_free_msg(msg); if (ret) ib_destroy_cm_id(&cm_id_priv->id); return; discard: spin_unlock_irq(&cm_id_priv->lock); cm_free_msg(msg); } static void cm_send_handler(struct ib_mad_agent *mad_agent, struct ib_mad_send_wc *mad_send_wc) { struct ib_mad_send_buf *msg = mad_send_wc->send_buf; struct cm_port *port; u16 attr_index; port = mad_agent->context; attr_index = be16_to_cpu(((struct ib_mad_hdr *) msg->mad)->attr_id) - CM_ATTR_ID_OFFSET; /* * If the send was in response to a received message (context[0] is not * set to a cm_id), and is not a REJ, then it is a send that was * manually retried. */ if (!msg->context[0] && (attr_index != CM_REJ_COUNTER)) msg->retries = 1; atomic_long_add(1 + msg->retries, &port->counter_group[CM_XMIT].counter[attr_index]); if (msg->retries) atomic_long_add(msg->retries, &port->counter_group[CM_XMIT_RETRIES]. counter[attr_index]); switch (mad_send_wc->status) { case IB_WC_SUCCESS: case IB_WC_WR_FLUSH_ERR: cm_free_msg(msg); break; default: if (msg->context[0] && msg->context[1]) cm_process_send_error(msg, mad_send_wc->status); else cm_free_msg(msg); break; } } static void cm_work_handler(struct work_struct *_work) { struct cm_work *work = container_of(_work, struct cm_work, work.work); int ret; switch (work->cm_event.event) { case IB_CM_REQ_RECEIVED: ret = cm_req_handler(work); break; case IB_CM_MRA_RECEIVED: ret = cm_mra_handler(work); break; case IB_CM_REJ_RECEIVED: ret = cm_rej_handler(work); break; case IB_CM_REP_RECEIVED: ret = cm_rep_handler(work); break; case IB_CM_RTU_RECEIVED: ret = cm_rtu_handler(work); break; case IB_CM_USER_ESTABLISHED: ret = cm_establish_handler(work); break; case IB_CM_DREQ_RECEIVED: ret = cm_dreq_handler(work); break; case IB_CM_DREP_RECEIVED: ret = cm_drep_handler(work); break; case IB_CM_SIDR_REQ_RECEIVED: ret = cm_sidr_req_handler(work); break; case IB_CM_SIDR_REP_RECEIVED: ret = cm_sidr_rep_handler(work); break; case IB_CM_LAP_RECEIVED: ret = cm_lap_handler(work); break; case IB_CM_APR_RECEIVED: ret = cm_apr_handler(work); break; case IB_CM_TIMEWAIT_EXIT: ret = cm_timewait_handler(work); break; default: pr_debug("work->cm_event.event: 0x%x\n", work->cm_event.event); ret = -EINVAL; break; } if (ret) cm_free_work(work); } static int cm_establish(struct ib_cm_id *cm_id) { struct cm_id_private *cm_id_priv; struct cm_work *work; unsigned long flags; int ret = 0; work = kmalloc(sizeof *work, GFP_ATOMIC); if (!work) return -ENOMEM; cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); switch (cm_id->state) { case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: cm_id->state = IB_CM_ESTABLISHED; break; case IB_CM_ESTABLISHED: ret = -EISCONN; break; default: pr_debug("cm_id->state: 0x%x\n", cm_id->state); ret = -EINVAL; break; } spin_unlock_irqrestore(&cm_id_priv->lock, flags); if (ret) { kfree(work); goto out; } /* * The CM worker thread may try to destroy the cm_id before it * can execute this work item. To prevent potential deadlock, * we need to find the cm_id once we're in the context of the * worker thread, rather than holding a reference on it. */ INIT_DELAYED_WORK(&work->work, cm_work_handler); work->local_id = cm_id->local_id; work->remote_id = cm_id->remote_id; work->mad_recv_wc = NULL; work->cm_event.event = IB_CM_USER_ESTABLISHED; queue_delayed_work(cm.wq, &work->work, 0); out: return ret; } static int cm_migrate(struct ib_cm_id *cm_id) { struct cm_id_private *cm_id_priv; unsigned long flags; int ret = 0; cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id->state == IB_CM_ESTABLISHED && (cm_id->lap_state == IB_CM_LAP_UNINIT || cm_id->lap_state == IB_CM_LAP_IDLE)) { cm_id->lap_state = IB_CM_LAP_IDLE; cm_id_priv->av = cm_id_priv->alt_av; } else ret = -EINVAL; spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event) { int ret; switch (event) { case IB_EVENT_COMM_EST: ret = cm_establish(cm_id); break; case IB_EVENT_PATH_MIG: ret = cm_migrate(cm_id); break; default: ret = -EINVAL; } return ret; } EXPORT_SYMBOL(ib_cm_notify); static void cm_recv_handler(struct ib_mad_agent *mad_agent, struct ib_mad_recv_wc *mad_recv_wc) { struct cm_port *port = mad_agent->context; struct cm_work *work; enum ib_cm_event_type event; u16 attr_id; int paths = 0; switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) { case CM_REQ_ATTR_ID: paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)-> alt_local_lid != 0); event = IB_CM_REQ_RECEIVED; break; case CM_MRA_ATTR_ID: event = IB_CM_MRA_RECEIVED; break; case CM_REJ_ATTR_ID: event = IB_CM_REJ_RECEIVED; break; case CM_REP_ATTR_ID: event = IB_CM_REP_RECEIVED; break; case CM_RTU_ATTR_ID: event = IB_CM_RTU_RECEIVED; break; case CM_DREQ_ATTR_ID: event = IB_CM_DREQ_RECEIVED; break; case CM_DREP_ATTR_ID: event = IB_CM_DREP_RECEIVED; break; case CM_SIDR_REQ_ATTR_ID: event = IB_CM_SIDR_REQ_RECEIVED; break; case CM_SIDR_REP_ATTR_ID: event = IB_CM_SIDR_REP_RECEIVED; break; case CM_LAP_ATTR_ID: paths = 1; event = IB_CM_LAP_RECEIVED; break; case CM_APR_ATTR_ID: event = IB_CM_APR_RECEIVED; break; default: ib_free_recv_mad(mad_recv_wc); return; } attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id); atomic_long_inc(&port->counter_group[CM_RECV]. counter[attr_id - CM_ATTR_ID_OFFSET]); work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths, GFP_KERNEL); if (!work) { ib_free_recv_mad(mad_recv_wc); return; } INIT_DELAYED_WORK(&work->work, cm_work_handler); work->cm_event.event = event; work->mad_recv_wc = mad_recv_wc; work->port = port; queue_delayed_work(cm.wq, &work->work, 0); } static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, struct ib_qp_attr *qp_attr, int *qp_attr_mask) { unsigned long flags; int ret; spin_lock_irqsave(&cm_id_priv->lock, flags); switch (cm_id_priv->id.state) { case IB_CM_REQ_SENT: case IB_CM_MRA_REQ_RCVD: case IB_CM_REQ_RCVD: case IB_CM_MRA_REQ_SENT: case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: case IB_CM_ESTABLISHED: *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT; qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE; if (cm_id_priv->responder_resources) qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_ATOMIC; qp_attr->pkey_index = cm_id_priv->av.pkey_index; qp_attr->port_num = cm_id_priv->av.port->port_num; ret = 0; break; default: pr_debug("cm_id_priv->id.state: 0x%x\n", cm_id_priv->id.state); ret = -EINVAL; break; } spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv, struct ib_qp_attr *qp_attr, int *qp_attr_mask) { unsigned long flags; int ret; spin_lock_irqsave(&cm_id_priv->lock, flags); switch (cm_id_priv->id.state) { case IB_CM_REQ_RCVD: case IB_CM_MRA_REQ_SENT: case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: case IB_CM_ESTABLISHED: *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN | IB_QP_RQ_PSN; qp_attr->ah_attr = cm_id_priv->av.ah_attr; if (!cm_id_priv->av.valid) return -EINVAL; if (cm_id_priv->av.ah_attr.vlan_id != 0xffff) { qp_attr->vlan_id = cm_id_priv->av.ah_attr.vlan_id; *qp_attr_mask |= IB_QP_VID; } if (!is_zero_ether_addr(cm_id_priv->av.smac)) { memcpy(qp_attr->smac, cm_id_priv->av.smac, sizeof(qp_attr->smac)); *qp_attr_mask |= IB_QP_SMAC; } if (cm_id_priv->alt_av.valid) { if (cm_id_priv->alt_av.ah_attr.vlan_id != 0xffff) { qp_attr->alt_vlan_id = cm_id_priv->alt_av.ah_attr.vlan_id; *qp_attr_mask |= IB_QP_ALT_VID; } if (!is_zero_ether_addr(cm_id_priv->alt_av.smac)) { memcpy(qp_attr->alt_smac, cm_id_priv->alt_av.smac, sizeof(qp_attr->alt_smac)); *qp_attr_mask |= IB_QP_ALT_SMAC; } } qp_attr->path_mtu = cm_id_priv->path_mtu; qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn); qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn); if (cm_id_priv->qp_type == IB_QPT_RC || cm_id_priv->qp_type == IB_QPT_XRC_TGT) { *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER; qp_attr->max_dest_rd_atomic = cm_id_priv->responder_resources; qp_attr->min_rnr_timer = 0; } if (cm_id_priv->alt_av.ah_attr.dlid) { *qp_attr_mask |= IB_QP_ALT_PATH; qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num; qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index; qp_attr->alt_timeout = cm_id_priv->alt_av.timeout; qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr; } ret = 0; break; default: pr_debug("cm_id_priv->id.state: 0x%x\n", cm_id_priv->id.state); ret = -EINVAL; break; } spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv, struct ib_qp_attr *qp_attr, int *qp_attr_mask) { unsigned long flags; int ret; spin_lock_irqsave(&cm_id_priv->lock, flags); switch (cm_id_priv->id.state) { /* Allow transition to RTS before sending REP */ case IB_CM_REQ_RCVD: case IB_CM_MRA_REQ_SENT: case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: case IB_CM_ESTABLISHED: if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) { *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN; qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn); switch (cm_id_priv->qp_type) { case IB_QPT_RC: case IB_QPT_XRC_INI: *qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY | IB_QP_MAX_QP_RD_ATOMIC; qp_attr->retry_cnt = cm_id_priv->retry_count; qp_attr->rnr_retry = cm_id_priv->rnr_retry_count; qp_attr->max_rd_atomic = cm_id_priv->initiator_depth; /* fall through */ case IB_QPT_XRC_TGT: *qp_attr_mask |= IB_QP_TIMEOUT; qp_attr->timeout = cm_id_priv->av.timeout; break; default: break; } if (cm_id_priv->alt_av.ah_attr.dlid) { *qp_attr_mask |= IB_QP_PATH_MIG_STATE; qp_attr->path_mig_state = IB_MIG_REARM; } } else { *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE; qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num; qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index; qp_attr->alt_timeout = cm_id_priv->alt_av.timeout; qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr; qp_attr->path_mig_state = IB_MIG_REARM; } ret = 0; break; default: pr_debug("cm_id_priv->id.state: 0x%x\n", cm_id_priv->id.state); ret = -EINVAL; break; } spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } int ib_cm_init_qp_attr(struct ib_cm_id *cm_id, struct ib_qp_attr *qp_attr, int *qp_attr_mask) { struct cm_id_private *cm_id_priv; int ret; cm_id_priv = container_of(cm_id, struct cm_id_private, id); switch (qp_attr->qp_state) { case IB_QPS_INIT: ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask); break; case IB_QPS_RTR: ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask); break; case IB_QPS_RTS: ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask); break; default: pr_debug("qp_attr->qp_state: 0x%x\n", qp_attr->qp_state); ret = -EINVAL; break; } return ret; } EXPORT_SYMBOL(ib_cm_init_qp_attr); static void cm_get_ack_delay(struct cm_device *cm_dev) { struct ib_device_attr attr; if (ib_query_device(cm_dev->ib_device, &attr)) cm_dev->ack_delay = 0; /* acks will rely on packet life time */ else cm_dev->ack_delay = attr.local_ca_ack_delay; } static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr, char *buf) { struct cm_counter_group *group; struct cm_counter_attribute *cm_attr; group = container_of(obj, struct cm_counter_group, obj); cm_attr = container_of(attr, struct cm_counter_attribute, attr); return sprintf(buf, "%ld\n", atomic_long_read(&group->counter[cm_attr->index])); } static const struct sysfs_ops cm_counter_ops = { .show = cm_show_counter }; static struct kobj_type cm_counter_obj_type = { .sysfs_ops = &cm_counter_ops, .default_attrs = cm_counter_default_attrs }; static void cm_release_port_obj(struct kobject *obj) { struct cm_port *cm_port; cm_port = container_of(obj, struct cm_port, port_obj); kfree(cm_port); } static struct kobj_type cm_port_obj_type = { .release = cm_release_port_obj }; static char *cm_devnode(struct device *dev, umode_t *mode) { if (mode) *mode = 0666; return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev)); } struct class cm_class = { .owner = THIS_MODULE, .name = "infiniband_cm", .devnode = cm_devnode, }; EXPORT_SYMBOL(cm_class); static int cm_create_port_fs(struct cm_port *port) { int i, ret; ret = kobject_init_and_add(&port->port_obj, &cm_port_obj_type, &port->cm_dev->device->kobj, "%d", port->port_num); if (ret) { kfree(port); return ret; } for (i = 0; i < CM_COUNTER_GROUPS; i++) { ret = kobject_init_and_add(&port->counter_group[i].obj, &cm_counter_obj_type, &port->port_obj, "%s", counter_group_names[i]); if (ret) goto error; } return 0; error: while (i--) kobject_put(&port->counter_group[i].obj); kobject_put(&port->port_obj); return ret; } static void cm_remove_port_fs(struct cm_port *port) { int i; for (i = 0; i < CM_COUNTER_GROUPS; i++) kobject_put(&port->counter_group[i].obj); kobject_put(&port->port_obj); } static void cm_add_one(struct ib_device *ib_device) { struct cm_device *cm_dev; struct cm_port *port; struct ib_mad_reg_req reg_req = { .mgmt_class = IB_MGMT_CLASS_CM, .mgmt_class_version = IB_CM_CLASS_VERSION }; struct ib_port_modify port_modify = { .set_port_cap_mask = IB_PORT_CM_SUP }; unsigned long flags; int ret; u8 i; if (rdma_node_get_transport(ib_device->node_type) != RDMA_TRANSPORT_IB) return; cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) * ib_device->phys_port_cnt, GFP_KERNEL); if (!cm_dev) return; cm_dev->ib_device = ib_device; cm_get_ack_delay(cm_dev); cm_dev->device = device_create(&cm_class, &ib_device->dev, MKDEV(0, 0), NULL, "%s", ib_device->name); if (IS_ERR(cm_dev->device)) { kfree(cm_dev); return; } set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask); for (i = 1; i <= ib_device->phys_port_cnt; i++) { port = kzalloc(sizeof *port, GFP_KERNEL); if (!port) goto error1; cm_dev->port[i-1] = port; port->cm_dev = cm_dev; port->port_num = i; ret = cm_create_port_fs(port); if (ret) goto error1; port->mad_agent = ib_register_mad_agent(ib_device, i, IB_QPT_GSI, ®_req, 0, cm_send_handler, cm_recv_handler, port); if (IS_ERR(port->mad_agent)) goto error2; ret = ib_modify_port(ib_device, i, 0, &port_modify); if (ret) goto error3; } ib_set_client_data(ib_device, &cm_client, cm_dev); write_lock_irqsave(&cm.device_lock, flags); list_add_tail(&cm_dev->list, &cm.device_list); write_unlock_irqrestore(&cm.device_lock, flags); return; error3: ib_unregister_mad_agent(port->mad_agent); error2: cm_remove_port_fs(port); error1: port_modify.set_port_cap_mask = 0; port_modify.clr_port_cap_mask = IB_PORT_CM_SUP; while (--i) { port = cm_dev->port[i-1]; ib_modify_port(ib_device, port->port_num, 0, &port_modify); ib_unregister_mad_agent(port->mad_agent); cm_remove_port_fs(port); } device_unregister(cm_dev->device); kfree(cm_dev); } static void cm_remove_one(struct ib_device *ib_device) { struct cm_device *cm_dev; struct cm_port *port; struct ib_port_modify port_modify = { .clr_port_cap_mask = IB_PORT_CM_SUP }; unsigned long flags; int i; cm_dev = ib_get_client_data(ib_device, &cm_client); if (!cm_dev) return; write_lock_irqsave(&cm.device_lock, flags); list_del(&cm_dev->list); write_unlock_irqrestore(&cm.device_lock, flags); for (i = 1; i <= ib_device->phys_port_cnt; i++) { port = cm_dev->port[i-1]; ib_modify_port(ib_device, port->port_num, 0, &port_modify); ib_unregister_mad_agent(port->mad_agent); flush_workqueue(cm.wq); cm_remove_port_fs(port); } device_unregister(cm_dev->device); kfree(cm_dev); } static int __init ib_cm_init(void) { int ret; memset(&cm, 0, sizeof cm); INIT_LIST_HEAD(&cm.device_list); rwlock_init(&cm.device_lock); spin_lock_init(&cm.lock); cm.listen_service_table = RB_ROOT; cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); cm.remote_id_table = RB_ROOT; cm.remote_qp_table = RB_ROOT; cm.remote_sidr_table = RB_ROOT; idr_init(&cm.local_id_table); get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand); if (!idr_pre_get(&cm.local_id_table, GFP_KERNEL)) return -ENOMEM; INIT_LIST_HEAD(&cm.timewait_list); ret = class_register(&cm_class); if (ret) { ret = -ENOMEM; goto error1; } cm.wq = create_workqueue("ib_cm"); if (!cm.wq) { ret = -ENOMEM; goto error2; } ret = ib_register_client(&cm_client); if (ret) goto error3; return 0; error3: destroy_workqueue(cm.wq); error2: class_unregister(&cm_class); error1: idr_destroy(&cm.local_id_table); return ret; } static void __exit ib_cm_cleanup(void) { struct cm_timewait_info *timewait_info, *tmp; spin_lock_irq(&cm.lock); list_for_each_entry(timewait_info, &cm.timewait_list, list) cancel_delayed_work(&timewait_info->work.work); spin_unlock_irq(&cm.lock); ib_unregister_client(&cm_client); destroy_workqueue(cm.wq); list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) { list_del(&timewait_info->list); kfree(timewait_info); } class_unregister(&cm_class); idr_destroy(&cm.local_id_table); } module_init_order(ib_cm_init, SI_ORDER_SECOND); module_exit_order(ib_cm_cleanup, SI_ORDER_FIRST); Index: projects/make-check-sandbox/usr.bin/calendar/calendars/calendar.freebsd =================================================================== --- projects/make-check-sandbox/usr.bin/calendar/calendars/calendar.freebsd (revision 321993) +++ projects/make-check-sandbox/usr.bin/calendar/calendars/calendar.freebsd (revision 321994) @@ -1,435 +1,438 @@ /* * FreeBSD * * $FreeBSD$ */ #ifndef _calendar_freebsd_ #define _calendar_freebsd_ 01/01 Dimitry Andric born in Utrecht, the Netherlands, 1969 01/01 Lev Serebryakov born in Leningrad, USSR, 1979 01/01 Alexander Langer born in Duesseldorf, Nordrhein-Westfalen, Germany, 1981 01/02 Ion-Mihai "IOnut" Tetcu born in Bucharest, Romania, 1980 01/02 Patrick Li born in Beijing, People's Republic of China, 1985 01/03 Tetsurou Okazaki born in Mobara, Chiba, Japan, 1972 01/04 Hiroyuki Hanai born in Kagawa pre., Japan, 1969 01/06 Philippe Audeoud born in Bretigny-Sur-Orge, France, 1980 01/08 Michael L. Hostbaek born in Copenhagen, Denmark, 1977 01/10 Jean-Yves Lefort born in Charleroi, Belgium, 1980 01/12 Yen-Ming Lee born in Taipei, Taiwan, Republic of China, 1977 01/12 Ying-Chieh Liao born in Taipei, Taiwan, Republic of China, 1979 01/12 Kristof Provost born in Aalst, Belgium, 1983 01/13 Ruslan Bukin born in Dudinka, Russian Federation, 1985 01/14 Yi-Jheng Lin born in Taichung, Taiwan, Republic of China, 1985 01/15 Anne Dickison born in Madison, Indiana, United States, 1976 01/16 Ariff Abdullah born in Kuala Lumpur, Malaysia, 1978 01/16 Dmitry Sivachenko born in Moscow, USSR, 1978 01/16 Vanilla I. Shu born in Taipei, Taiwan, Republic of China, 1978 01/17 Raphael Kubo da Costa born in Sao Paulo, Sao Paulo, Brazil, 1989 01/18 Dejan Lesjak born in Ljubljana, Slovenia, Yugoslavia, 1977 01/19 Marshall Kirk McKusick born in Wilmington, Delaware, United States, 1954 01/19 Ruslan Ermilov born in Simferopol, USSR, 1974 01/19 Marcelo S. Araujo born in Joinville, Santa Catarina, Brazil, 1981 01/20 Poul-Henning Kamp born in Korsoer, Denmark, 1966 01/21 Mahdi Mokhtari born in Tehran, Iran, 1995 01/22 Johann Visagie born in Cape Town, South Africa, 1970 01/23 Hideyuki KURASHINA born in Niigata, Japan, 1982 01/24 Fabien Thomas born in Avignon, France, 1971 01/24 Matteo Riondato born in Padova, Italy, 1986 01/25 Nick Hibma born in Groningen, the Netherlands, 1972 01/25 Bernd Walter born in Moers, Nordrhein-Westfalen, Germany, 1974 01/26 Andrew Gallatin born in Buffalo, New York, United States, 1970 01/27 Nick Sayer born in San Diego, California, United States, 1968 01/27 Jacques Anthony Vidrine born in Baton Rouge, Louisiana, United States, 1971 01/27 Ngie Cooper born in Seattle, Washington, United States, 1984 01/31 Hidetoshi Shimokawa born in Yokohama, Kanagawa, Japan, 1970 02/01 Doug Rabson born in London, England, 1966 02/01 Nicola Vitale born in Busto Arsizio, Varese, Italy, 1976 02/01 Paul Saab born in Champaign-Urbana, Illinois, United States, 1978 02/01 Martin Wilke born in Ludwigsfelde, Brandenburg, Germany, 1980 02/01 Christian Brueffer born in Gronau, Nordrhein-Westfalen, Germany, 1982 02/01 Steven Kreuzer born in Oceanside, New York, United States, 1982 02/01 Juli Mallett born in Washington, Pennsylvania, United States, 1985 02/02 Diomidis D. Spinellis born in Athens, Greece, 1967 02/02 Michael W Lucas born in Detroit, Michigan, United States, 1967 02/02 Dmitry Chagin born in Stalingrad, USSR, 1976 02/02 Yoichi Nakayama born in Tsu, Mie, Japan, 1976 02/02 Yoshihiro Takahashi born in Yokohama, Kanagawa, Japan, 1976 02/03 Jason Helfman born in Royal Oak, Michigan, United States, 1972 02/04 Eitan Adler born in West Hempstead, New York, United States, 1991 02/05 Frank Laszlo born in Howell, Michigan, United States, 1983 02/06 Julien Charbon born in Saint Etienne, Loire, France, 1978 02/10 David Greenman born in Portland, Oregon, United States, 1968 02/10 Paul Richards born in Ammanford, Carmarthenshire, United Kingdom, 1968 02/10 Simon Barner born in Rosenheim, Bayern, Germany, 1980 02/10 Jason E. Hale born in Pittsburgh, Pennsylvania, United States, 1982 02/13 Jesper Skriver born in Aarhus, Denmark, 1975 02/13 Steve Wills born in Lynchburg, Virginia, United States, 1975 02/13 Andrey Slusar born in Odessa, USSR, 1979 02/13 David W. Chapman Jr. born in Bethel, Connecticut, United States, 1981 02/14 Manolis Kiagias born in Chania, Greece, 1970 02/14 Erwin Lansing born in 's-Hertogenbosch, the Netherlands, 1975 02/14 Martin Blapp born in Olten, Switzerland, 1976 02/15 Hiren Panchasara born in Ahmedabad, Gujarat, India, 1984 02/16 Justin Hibbits born in Toledo, Ohio, United States, 1983 02/16 Tobias Christian Berner born in Bern, Switzerland, 1985 02/19 Murray Stokely born in Jacksonville, Florida, United States, 1979 02/20 Anders Nordby born in Oslo, Norway, 1976 02/21 Alexey Zelkin born in Simferopol, Ukraine, 1978 02/22 Brooks Davis born in Longview, Washington, United States, 1976 02/22 Jake Burkholder born in Maynooth, Ontario, Canada, 1979 02/23 Peter Wemm born in Perth, Western Australia, Australia, 1971 02/23 Mathieu Arnold born in Champigny sur Marne, Val de Marne, France, 1978 02/24 Johan Karlsson born in Mariannelund, Sweden, 1974 02/24 Colin Percival born in Burnaby, Canada, 1981 02/26 Pietro Cerutti born in Faido, Switzerland, 1984 02/28 Daichi GOTO born in Shimizu Suntou, Shizuoka, Japan, 1980 02/28 Ruslan Makhmatkhanov born in Rostov-on-Don, USSR, 1984 03/01 Hye-Shik Chang born in Daejeon, Republic of Korea, 1980 03/02 Cy Schubert born in Edmonton, Alberta, Canada, 1956 03/03 Sergey Matveychuk born in Moscow, Russian Federation, 1973 03/03 Doug White born in Eugene, Oregon, United States, 1977 03/03 Gordon Tetlow born in Reno, Nevada, United States, 1978 03/04 Oleksandr Tymoshenko born in Chernihiv, Ukraine, 1980 03/05 Baptiste Daroussin born in Beauvais, France, 1980 03/05 Philip Paeps born in Leuven, Belgium, 1983 03/05 Ulf Lilleengen born in Hamar, Norway, 1985 03/06 Christopher Piazza born in Kamloops, British Columbia, Canada, 1981 03/07 Michael P. Pritchard born in Los Angeles, California, United States, 1964 03/07 Giorgos Keramidas born in Athens, Greece, 1976 03/10 Andreas Klemm born in Duesseldorf, Nordrhein-Westfalen, Germany, 1963 +03/10 Luiz Otavio O Souza born in Bauru, Sao Paulo, Brazil, 1978 03/10 Nikolai Lifanov born in Moscow, Russian Federation, 1989 03/11 Soeren Straarup born in Andst, Denmark, 1978 03/12 Greg Lewis born in Adelaide, South Australia, Australia, 1969 03/13 Alexander Leidinger born in Neunkirchen, Saarland, Germany, 1976 03/13 Will Andrews born in Pontiac, Michigan, United States, 1982 03/14 Bernhard Froehlich born in Graz, Styria, Austria, 1985 03/15 Paolo Pisati born in Lodi, Italy, 1977 03/15 Brian Fundakowski Feldman born in Alexandria, Virginia, United States, 1983 03/17 Michael Smith born in Bankstown, New South Wales, Australia, 1971 03/17 Alexander Motin born in Simferopol, Ukraine, 1979 03/18 Koop Mast born in Dokkum, the Netherlands, 1981 03/19 Mikhail Teterin born in Kyiv, Ukraine, 1972 03/20 Joseph S. Atkinson born in Batesville, Arkansas, United States, 1977 03/20 Henrik Brix Andersen born in Aarhus, Denmark, 1978 03/20 MANTANI Nobutaka born in Hiroshima, Japan, 1978 03/20 Cameron Grant died in Hemel Hempstead, United Kingdom, 2005 03/22 Brad Davis born in Farmington, New Mexico, United States, 1983 03/23 Daniel C. Sobral born in Brasilia, Distrito Federal, Brazil, 1971 03/23 Benno Rice born in Adelaide, South Australia, Australia, 1977 03/24 Marcel Moolenaar born in Hilversum, the Netherlands, 1968 03/24 Emanuel Haupt born in Zurich, Switzerland, 1979 03/25 Andrew R. Reiter born in Springfield, Massachusetts, United States, 1980 03/27 Josef El-Rayes born in Linz, Austria, 1982 03/28 Sean C. Farley born in Indianapolis, Indiana, United States, 1970 03/29 Thierry Thomas born in Luxeuil les Bains, France, 1961 03/30 Po-Chuan Hsieh born in Taipei, Taiwan, Republic of China, 1978 03/31 First quarter status reports are due on 04/15 04/01 Matthew Jacob born in San Francisco, California, United States, 1958 04/01 Bill Fenner born in Bellefonte, Pennsylvania, United States, 1971 04/01 Peter Edwards born in Dublin, Ireland, 1973 04/03 Hellmuth Michaelis born in Kiel, Schleswig-Holstein, Germany, 1958 04/03 Tong Liu born in Beijing, People's Republic of China, 1981 04/03 Gabor Pali born in Kunhegyes, Hungary, 1982 04/04 Jason Unovitch born in Scranton, Pennsylvania, United States, 1986 04/05 Stacey Son born in Burley, Idaho, United States, 1967 04/06 Peter Jeremy born in Sydney, New South Wales, Australia, 1961 04/07 Edward Tomasz Napierala born in Wolsztyn, Poland, 1981 04/08 Jordan K. Hubbard born in Honolulu, Hawaii, United States, 1963 04/09 Ceri Davies born in Haverfordwest, Pembrokeshire, United Kingdom, 1976 04/11 Bruce A. Mah born in Fresno, California, United States, 1969 04/12 Patrick Gardella born in Columbus, Ohio, United States, 1967 04/12 Ed Schouten born in Oss, the Netherlands, 1986 04/13 Oliver Braun born in Nuremberg, Bavaria, Germany, 1972 04/14 Crist J. Clark born in Milwaukee, Wisconsin, United States, 1970 04/14 Glen J. Barber born in Wilkes-Barre, Pennsylvania, United States, 1981 04/15 David Malone born in Dublin, Ireland, 1973 04/17 Alexey Degtyarev born in Ahtubinsk, Russian Federation, 1984 04/17 Dryice Liu born in Jinan, Shandong, China, 1975 04/22 Joerg Wunsch born in Dresden, Sachsen, Germany, 1962 04/22 Jun Kuriyama born in Matsue, Shimane, Japan, 1973 04/22 Jakub Klama born in Blachownia, Silesia, Poland, 1989 04/25 Richard Gallamore born in Kissimmee, Florida, United States, 1987 04/26 Rene Ladan born in Geldrop, the Netherlands, 1980 04/29 Adam Weinberger born in Berkeley, California, United States, 1980 04/29 Eric Anholt born in Portland, Oregon, United States, 1983 05/01 Randall Stewart born in Spokane, Washington, United States, 1959 05/02 Danilo G. Baio born in Maringa, Parana, Brazil, 1986 05/02 Wojciech A. Koszek born in Czestochowa, Poland, 1987 05/03 Brian Dean born in Elkins, West Virginia, United States, 1966 05/03 Patrick Kelsey born in Freehold, New Jersey, United States, 1976 05/03 Robert Nicholas Maxwell Watson born in Harrow, Middlesex, United Kingdom, 1977 05/04 Denis Peplin born in Nizhniy Novgorod, Russian Federation, 1977 05/08 Kirill Ponomarew born in Volgograd, Russian Federation, 1977 05/08 Sean Kelly born in Walnut Creek, California, United States, 1982 05/09 Daniel Eischen born in Syracuse, New York, United States, 1963 05/09 Aaron Dalton born in Boise, Idaho, United States, 1973 05/09 Jase Thew born in Abergavenny, Gwent, United Kingdom, 1974 05/10 Markus Brueffer born in Gronau, Nordrhein-Westfalen, Germany, 1977 05/11 Kurt Lidl born in Rockville, Maryland, United States, 1968 05/11 Jesus Rodriguez born in Barcelona, Spain, 1972 05/11 Marcin Wojtas born in Krakow, Poland, 1986 05/11 Roman Kurakin born in Moscow, USSR, 1979 05/11 Ulrich Spoerlein born in Schesslitz, Bayern, Germany, 1981 05/13 Pete Fritchman born in Lansdale, Pennsylvania, United States, 1983 05/14 Tatsumi Hosokawa born in Tokyo, Japan, 1968 05/14 Shigeyuku Fukushima born in Osaka, Japan, 1974 05/14 Bruce Cran born in Cambridge, United Kingdom, 1981 +05/15 Hans Petter Selasky born in Flekkefjord, Norway, 1982 05/16 Johann Kois born in Wolfsberg, Austria, 1975 05/16 Marcus Alves Grando born in Florianopolis, Santa Catarina, Brazil, 1979 05/17 Thomas Abthorpe born in Port Arthur, Ontario, Canada, 1968 05/19 Philippe Charnier born in Fontainebleau, France, 1966 05/19 Ian Dowse born in Dublin, Ireland, 1975 05/19 Sofian Brabez born in Toulouse, France, 1984 05/20 Dan Moschuk died in Burlington, Ontario, Canada, 2010 05/21 Kris Kennaway born in Winnipeg, Manitoba, Canada, 1978 05/22 James Gritton born in San Francisco, California, United States, 1967 05/22 Clive Tong-I Lin born in Changhua, Taiwan, Republic of China, 1978 05/22 Michael Bushkov born in Rostov-on-Don, Russian Federation, 1985 05/22 Rui Paulo born in Evora, Portugal, 1986 05/22 David Naylor born in Johannesburg, South Africa, 1988 05/23 Munechika Sumikawa born in Osaka, Osaka, Japan, 1972 05/24 Duncan McLennan Barclay born in London, Middlesex, United Kingdom, 1970 05/24 Oliver Lehmann born in Karlsburg, Germany, 1981 05/25 Pawel Pekala born in Swidnica, Poland, 1980 05/25 Tom Rhodes born in Ellwood City, Pennsylvania, United States, 1981 05/25 Roman Divacky born in Brno, Czech Republic, 1983 05/26 Jim Pirzyk born in Chicago, Illinois, United States, 1968 05/26 Florian Smeets born in Schwerte, Nordrhein-Westfalen, Germany, 1982 05/27 Ollivier Robert born in Paris, France, 1967 05/29 Wilko Bulte born in Arnhem, the Netherlands, 1965 05/29 Seigo Tanimura born in Kitakyushu, Fukuoka, Japan, 1976 05/30 Wen Heping born in Xiangxiang, Hunan, China, 1970 05/31 Ville Skytta born in Helsinki, Finland, 1974 06/02 Jean-Marc Zucconi born in Pontarlier, France, 1954 06/02 Alexander Botero-Lowry born in Austin, Texas, United States, 1986 06/03 CHOI Junho born in Seoul, Korea, 1974 06/03 Wesley Shields born in Binghamton, New York, United States, 1981 06/04 Julian Elischer born in Perth, Australia, 1959 06/04 Justin Gibbs born in San Pedro, California, United States, 1973 06/04 Jason Evans born in Greeley, Colorado, United States, 1973 06/04 Thomas Moestl born in Braunschweig, Niedersachsen, Germany, 1980 06/04 Devin Teske born in Arcadia, California, United States, 1982 06/04 Zack Kirsch born in Memphis, Tennessee, United States, 1982 06/04 Johannes Jost Meixner born in Wiesbaden, Germany, 1987 06/06 Sergei Kolobov born in Karpinsk, Russian Federation, 1972 06/06 Alan Eldridge died in Denver, Colorado, 2003 06/07 Jimmy Olgeni born in Milano, Italy, 1976 06/07 Benjamin Close born in Adelaide, Australia, 1978 06/08 Ravi Pokala born in Royal Oak, Michigan, United States, 1980 06/09 Stanislav Galabov born in Sofia, Bulgaria 1978 06/11 Alonso Cardenas Marquez born in Arequipa, Peru, 1979 06/14 Josh Paetzel born in Minneapolis, Minnesota, United States, 1973 06/17 Tilman Linneweh born in Weinheim, Baden-Wuerttemberg, Germany, 1978 06/18 Li-Wen Hsu born in Taipei, Taiwan, Republic of China, 1984 06/18 Roman Bogorodskiy born in Saratov, Russian Federation, 1986 06/19 Charlie Root born in Portland, Oregon, United States, 1993 06/21 Ganbold Tsagaankhuu born in Ulaanbaatar, Mongolia, 1971 06/21 Niels Heinen born in Markelo, the Netherlands, 1978 06/22 Andreas Tobler born in Heiden, Switzerland, 1968 06/24 Chris Faulhaber born in Springfield, Illinois, United States, 1971 06/26 Brian Somers born in Dundrum, Dublin, Ireland, 1967 06/28 Mark Santcroos born in Rotterdam, the Netherlands, 1979 06/28 Xin Li born in Beijing, People's Republic of China, 1982 06/29 Wilfredo Sanchez Vega born in Majaguez, Puerto Rico, United States, 1972 06/29 Daniel Harris born in Lubbock, Texas, United States, 1985 06/29 Andrew Pantyukhin born in Moscow, Russian Federation, 1985 06/30 Guido van Rooij born in Best, Noord-Brabant, the Netherlands, 1965 06/30 Second quarter status reports are due on 07/15 07/01 Matthew Dillon born in San Francisco, California, United States, 1966 07/01 Mateusz Guzik born in Nowy Targ, Poland, 1986 07/02 Mark Christopher Ovens born in Preston, Lancashire, United Kingdom, 1958 07/02 Vasil Venelinov Dimov born in Shumen, Bulgaria, 1982 07/04 Motoyuki Konno born in Musashino, Tokyo, Japan, 1969 07/04 Florent Thoumie born in Montmorency, Val d'Oise, France, 1982 07/05 Olivier Cochard-Labbe born in Brest, France, 1977 07/05 Sergey Kandaurov born in Gubkin, Russian Federation, 1985 07/07 Andrew Thompson born in Lower Hutt, Wellington, New Zealand, 1979 07/07 Maxime Henrion born in Metz, France, 1981 07/07 George Reid born in Frimley, Hampshire, United Kingdom, 1983 07/10 Jung-uk Kim born in Seoul, Korea, 1971 07/10 Justin Seger born in Harvard, Massachusetts, United States, 1981 07/10 David Schultz born in Oakland, California, United States, 1982 07/10 Ben Woods born in Perth, Western Australia, Australia, 1984 07/11 Jesus R. Camou born in Hermosillo, Sonora, Mexico, 1983 07/15 Gary Jennejohn born in Rochester, New York, United States, 1950 07/16 Suleiman Souhlal born in Roma, Italy, 1983 07/16 Davide Italiano born in Milazzo, Italy, 1989 07/17 Michael Chin-Yuan Wu born in Taipei, Taiwan, Republic of China, 1980 07/19 Masafumi NAKANE born in Okazaki, Aichi, Japan, 1972 07/19 Simon L. Nielsen born in Copenhagen, Denmark, 1980 07/19 Gleb Smirnoff born in Kharkov, USSR, 1981 07/20 Dru Lavigne born in Kingston, Ontario, Canada, 1965 07/20 Andrey V. Elsukov born in Kotelnich, Russian Federation, 1981 07/22 James Housley born in Chicago, Illinois, United States, 1965 07/22 Jens Schweikhardt born in Waiblingen, Baden-Wuerttemberg, Germany, 1967 07/22 Lukas Ertl born in Weissenbach/Enns, Steiermark, Austria, 1976 07/23 Sergey A. Osokin born in Krasnogorsky, Stepnogorsk, Akmolinskaya region, Kazakhstan, 1972 07/23 Andrey Zonov born in Kirov, Russian Federation, 1985 07/24 Alexander Nedotsukov born in Ulyanovsk, Russian Federation, 1974 07/24 Alberto Villa born in Vercelli, Italy, 1987 07/27 Andriy Gapon born in Kyrykivka, Sumy region, Ukraine, 1976 07/28 Jim Mock born in Bethlehem, Pennsylvania, United States, 1974 07/28 Tom Hukins born in Manchester, United Kingdom, 1976 07/29 Dirk Meyer born in Kassel, Hessen, Germany, 1965 07/29 Felippe M. Motta born in Maceio, Alagoas, Brazil, 1988 08/02 Gabor Kovesdan born in Budapest, Hungary, 1987 08/03 Peter Holm born in Copenhagen, Denmark, 1955 08/05 Alfred Perlstein born in Brooklyn, New York, United States, 1978 08/06 Anton Berezin born in Dnepropetrovsk, Ukraine, 1970 08/06 John-Mark Gurney born in Detroit, Michigan, United States, 1978 08/06 Damjan Marion born in Rijeka, Croatia, 1978 08/07 Jonathan Mini born in San Mateo, California, United States, 1979 08/08 Mikolaj Golub born in Kharkov, USSR, 1977 08/08 Juergen Lock died in Bremen, Germany, 2015 08/09 Stefan Farfeleder died in Wien, Austria, 2015 08/10 Julio Merino born in Barcelona, Spain, 1984 08/10 Peter Pentchev born in Sofia, Bulgaria, 1977 08/12 Joe Marcus Clarke born in Lakeland, Florida, United States, 1976 08/12 Max Brazhnikov born in Leningradskaya, Russian Federation, 1979 08/14 Stefan Esser born in Cologne, Nordrhein-Westfalen, Germany, 1961 08/17 Olivier Houchard born in Nancy, France, 1980 08/19 Chin-San Huang born in Yi-Lan, Taiwan, Republic of China, 1979 08/19 Pav Lucistnik born in Kutna Hora, Czech Republic, 1980 08/20 Michael Heffner born in Cleona, Pennsylvania, United States, 1981 08/21 Jason A. Harmening born in Fort Wayne, Indiana, United States, 1981 08/24 Mark Linimon born in Houston, Texas, United States, 1955 08/24 Alexander Botero-Lowry died in San Francisco, California, United States, 2012 08/25 Beech Rintoul born in Oakland, California, United States, 1952 08/25 Jean Milanez Melo born in Divinopolis, Minas Gerais, Brazil, 1982 08/26 Scott Long born in Chicago, Illinois, United States, 1974 08/26 Dima Ruban born in Nalchik, USSR, 1970 08/26 Marc Fonvieille born in Avignon, France, 1972 08/26 Herve Quiroz born in Aix-en-Provence, France, 1977 08/27 Andrey Chernov born in Moscow, USSR, 1966 08/27 Tony Finch born in London, United Kingdom, 1974 08/27 Michael Johnson born in Morganton, North Carolina, United States, 1980 08/28 Norikatsu Shigemura born in Fujisawa, Kanagawa, Japan, 1974 08/29 Thomas Gellekum born in Moenchengladbach, Nordrhein-Westfalen, Germany, 1967 08/29 Max Laier born in Karlsruhe, Germany, 1981 09/01 Pyun YongHyeon born in Kimcheon, Korea, 1968 09/01 William Grzybowski born in Parana, Brazil, 1988 09/03 Max Khon born in Novosibirsk, USSR, 1976 09/03 Allan Jude born in Hamilton, Ontario, Canada, 1984 09/03 Cheng-Lung Sung born in Taipei, Taiwan, Republic of China, 1977 09/05 Mark Robert Vaughan Murray born in Harare, Mashonaland, Zimbabwe, 1961 09/05 Adrian Harold Chadd born in Perth, Western Australia, Australia, 1979 09/05 Rodrigo Osorio born in Montevideo, Uruguay, 1975 09/06 Eric Joyner born in Fairfax, Virginia, United States, 1991 09/07 Tim Bishop born in Cornwall, United Kingdom, 1978 09/07 Chris Rees born in Kettering, United Kingdom, 1987 09/08 Boris Samorodov born in Krasnodar, Russian Federation, 1963 09/09 Yoshio Mita born in Hiroshima, Japan, 1972 09/10 Wesley R. Peters born in Hartford, Alabama, United States, 1961 09/12 Weongyo Jeong born in Haman, Korea, 1980 09/12 Benedict Christopher Reuschling born in Darmstadt, Germany, 1981 09/12 William C. Fumerola II born in Detroit, Michigan, United States, 1981 09/14 Matthew Seaman born in Bristol, United Kingdom, 1965 09/15 Aleksandr Rybalko born in Odessa, Ukraine, 1977 09/15 Dima Panov born in Khabarovsk, Russian Federation, 1978 09/16 Maksim Yevmenkin born in Taganrog, USSR, 1974 09/17 Maxim Bolotin born in Rostov-on-Don, Russian Federation, 1976 09/18 Matthew Fleming born in Cleveland, Ohio, United States, 1975 09/20 Kevin Lo born in Taipei, Taiwan, Republic of China, 1972 +09/21 Alex Kozlov born in Bila Tserkva, Ukraine, 1970 09/21 Gleb Kurtsou born in Minsk, Belarus, 1984 09/22 Alan Somers born in San Antonio, Texas, United States, 1982 09/22 Bryan Drewery born in San Diego, California, United States, 1984 09/23 Martin Matuska born in Bratislava, Slovakia, 1979 09/24 Larry Rosenman born in Queens, New York, United States, 1957 09/27 Neil Blakey-Milner born in Port Elizabeth, South Africa, 1978 09/27 Renato Botelho born in Araras, Sao Paulo, Brazil, 1979 09/28 Greg Lehey born in Melbourne, Victoria, Australia, 1948 09/28 Alex Dupre born in Milano, Italy, 1980 09/29 Matthew Hunt born in Johnstown, Pennsylvania, United States, 1976 09/30 Mark Felder born in Prairie du Chien, Wisconsin, United States, 1985 09/30 Hiten M. Pandya born in Dar-es-Salaam, Tanzania, East Africa, 1986 09/30 Third quarter status reports are due on 10/15 10/02 Beat Gaetzi born in Zurich, Switzerland, 1980 10/02 Grzegorz Blach born in Starachowice, Poland, 1985 10/05 Hiroki Sato born in Yamagata, Japan, 1977 10/05 Chris Costello born in Houston, Texas, United States, 1985 10/09 Stefan Walter born in Werne, Nordrhein-Westfalen, Germany, 1978 10/11 Rick Macklem born in Ontario, Canada, 1955 10/12 Pawel Jakub Dawidek born in Radzyn Podlaski, Poland, 1980 10/15 Maxim Konovalov born in Khabarovsk, USSR, 1973 10/15 Eugene Grosbein born in Novokuznetsk, Russian Republic, USSR, 1976 10/16 Remko Lodder born in Rotterdam, the Netherlands, 1983 10/17 Maho NAKATA born in Osaka, Japan, 1974 10/18 Sheldon Hearn born in Cape Town, Western Cape, South Africa, 1974 10/18 Vladimir Kondratyev born in Ryazan, USSR, 1975 10/19 Nicholas Souchu born in Suresnes, Hauts-de-Seine, France, 1972 10/19 Nick Barkas born in Longview, Washington, United States, 1981 10/19 Pedro Giffuni born in Bogotá, Colombia, 1968 10/20 Joel Dahl born in Bitterna, Skaraborg, Sweden, 1983 10/20 Dmitry Marakasov born in Moscow, Russian Federation, 1984 10/21 Ben Smithurst born in Sheffield, South Yorkshire, United Kingdom, 1981 10/22 Jean-Sebastien Pedron born in Redon, Ille-et-Vilaine, France, 1980 10/23 Mario Sergio Fujikawa Ferreira born in Brasilia, Distrito Federal, Brazil, 1976 10/23 Romain Tartière born in Clermont-Ferrand, France, 1984 10/25 Eric Melville born in Los Gatos, California, United States, 1980 10/25 Julien Laffaye born in Toulouse, France, 1988 10/25 Ashish SHUKLA born in Kanpur, India, 1985 10/25 Toomas Soome born Estonia, 1971 10/26 Matthew Ahrens born in United States, 1979 10/26 Philip M. Gollucci born in Silver Spring, Maryland, United States, 1979 10/27 Takanori Watanabe born in Numazu, Shizuoka, Japan, 1972 10/31 Taras Korenko born in Cherkasy region, Ukraine, 1980 11/03 Ryan Stone born in Ottawa, Ontario, Canada, 1985 11/05 M. Warner Losh born in Kansas City, Kansas, United States, 1966 11/06 Michael Zhilin born in Stary Oskol, USSR, 1985 11/08 Joseph R. Mingrone born in Charlottetown, Prince Edward Island, Canada, 1976 11/09 Coleman Kane born in Cincinnati, Ohio, United States, 1980 11/09 Antoine Brodin born in Bagnolet, France, 1981 11/10 Gregory Neil Shapiro born in Providence, Rhode Island, United States, 1970 11/11 Danilo E. Gondolfo born in Lobato, Parana, Brazil, 1987 11/13 John Baldwin born in Stuart, Virginia, United States, 1977 11/14 Jeremie Le Hen born in Nancy, France, 1980 11/15 Lars Engels born in Hilden, Nordrhein-Westfalen, Germany, 1980 11/15 Tijl Coosemans born in Duffel, Belgium, 1983 11/16 Jose Maria Alcaide Salinas born in Madrid, Spain, 1962 11/16 Matt Joras born in Evanston, Illinois, United States, 1992 11/17 Ralf S. Engelschall born in Dachau, Bavaria, Germany, 1972 11/18 Thomas Quinot born in Paris, France, 1977 11/19 Konstantin Belousov born in Kiev, USSR, 1972 11/20 Dmitry Morozovsky born in Moscow, USSR, 1968 11/20 Gavin Atkinson born in Middlesbrough, United Kingdom, 1979 11/21 Mark Johnston born in Toronto, Ontario, Canada, 1989 11/22 Frederic Culot born in Saint-Germain-En-Laye, France, 1976 11/23 Josef Lawrence Karthauser born in Pembury, Kent, United Kingdom, 1972 11/23 Sepherosa Ziehau born in Shanghai, China, 1980 11/24 Andrey Zakhvatov born in Chelyabinsk, Russian Federation, 1974 11/24 Daniel Gerzo born in Bratislava, Slovakia, 1986 11/28 Nik Clayton born in Peterborough, United Kingdom, 1973 11/28 Stanislav Sedov born in Chelyabinsk, USSR, 1985 12/01 Hajimu Umemoto born in Nara, Japan, 1961 12/01 Alexey Dokuchaev born in Magadan, USSR, 1980 12/02 Ermal Luçi born in Tirane, Albania, 1980 12/03 Diane Bruce born in Ottawa, Ontario, Canada, 1952 12/04 Mariusz Zaborski born in Skierniewice, Poland, 1990 12/05 Ivan Voras born in Slavonski Brod, Croatia, 1981 12/06 Stefan Farfeleder born in Wien, Austria, 1980 12/08 Michael Tuexen born in Oldenburg, Germany, 1966 12/11 Ganael Laplanche born in Reims, France, 1980 12/15 James FitzGibbon born in Amersham, Buckinghamshire, United Kingdom, 1974 12/15 Timur I. Bakeyev born in Kazan, Republic of Tatarstan, USSR, 1974 12/18 Chris Timmons born in Ellensburg, Washington, United States, 1964 12/18 Dag-Erling Smorgrav born in Brussels, Belgium, 1977 12/18 Muhammad Moinur Rahman born in Dhaka, Bangladesh, 1983 12/18 Semen Ustimenko born in Novosibirsk, Russian Federation, 1979 12/19 Stephen Hurd born in Estevan, Saskatchewan, Canada, 1975 12/19 Emmanuel Vadot born in Decines-Charpieu, France, 1983 12/21 Rong-En Fan born in Taipei, Taiwan, Republic of China, 1982 12/22 Maxim Sobolev born in Dnepropetrovsk, Ukraine, 1976 12/23 Sean Chittenden born in Seattle, Washington, United States, 1979 12/23 Alejandro Pulver born in Buenos Aires, Argentina, 1989 12/24 Jochen Neumeister born in Heidenheim, Germany, 1975 12/24 Guido Falsi born in Firenze, Italy, 1978 12/25 Niclas Zeising born in Stockholm, Sweden, 1986 12/28 Soren Schmidt born in Maribo, Denmark, 1960 12/28 Ade Lovett born in London, England, 1969 12/28 Marius Strobl born in Cham, Bavaria, Germany, 1978 12/31 Edwin Groothuis born in Geldrop, the Netherlands, 1970 12/31 Fourth quarter status reports are due on 01/15 #endif /* !_calendar_freebsd_ */ Index: projects/make-check-sandbox =================================================================== --- projects/make-check-sandbox (revision 321993) +++ projects/make-check-sandbox (revision 321994) Property changes on: projects/make-check-sandbox ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r321970-321993